Compare commits
108 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| eee13c42a4 | |||
| 65b48c0f84 | |||
| 30cdf29e52 | |||
| b1d2bb6d71 | |||
| 94bad2a956 | |||
| 111714c7ed | |||
| 1f9c516c5c | |||
| 3645c65bb2 | |||
| d3d2d2fb1e | |||
| 086fb1f5d5 | |||
| a73a535a5b | |||
| 96e01c3018 | |||
| 6b10a0359b | |||
| b62583ad4b | |||
| 3d6727a2cb | |||
| b163962a14 | |||
| 9aca4ea27c | |||
| b0c10131ea | |||
| c8c7e13e96 | |||
| 249b7ea38e | |||
| 1333096e25 | |||
| 54bc9324dd | |||
| 109e5f2b19 | |||
| ee176b4207 | |||
| 7e1e16be33 | |||
| 5cfe8082ce | |||
| 6b7f672834 | |||
| c55f6252a1 | |||
| 842553b677 | |||
| 05a771ba77 | |||
| 70a0d42e65 | |||
| 6b1d73b466 | |||
| d7b9596145 | |||
| 7a0aa1a40a | |||
| 4d8ea43e11 | |||
| 6fddae98f6 | |||
| e33fbb6087 | |||
| 2337393e13 | |||
| d7357a1b0a | |||
| afbf1af29c | |||
| 1d834c747c | |||
| a80edec752 | |||
| 2a6473e8c6 | |||
| 1f9c0b9b7f | |||
| 5494afabd8 | |||
| 07c6e86a50 | |||
| b543821a1c | |||
| e8b7045a9b | |||
| 2571089528 | |||
| 1fb733fe1e | |||
| 8990a107a0 | |||
| 53ceea918b | |||
| 19d24075da | |||
| d017c27eaf | |||
| 0bab4a2042 | |||
| f3cd74d9d8 | |||
| e3b4099c9d | |||
| fa2481c650 | |||
| 2c0ffdd590 | |||
| e8fa04404f | |||
| f11a8086b0 | |||
| 95b3bc9c7a | |||
| 93b000776f | |||
| e6fbf501ac | |||
| d3036d569e | |||
| d0f7bbc3bd | |||
| ceacb1e61e | |||
| 7ca6c77d22 | |||
| 1b5170700a | |||
| 5007fa4d5f | |||
| 58e335594a | |||
| 1800122cb4 | |||
| a2ab7e6519 | |||
| d167a977ef | |||
| 3507ddc3cf | |||
| 1873687492 | |||
| 43176a74a0 | |||
| 8dfe488cdf | |||
| 6035e45cb8 | |||
| a31e476623 | |||
| e5c3d151bb | |||
| 6ccd20d45f | |||
| a5bc0eb37d | |||
| e98ee5e33d | |||
| 45e08aa9f6 | |||
| 456c0bced9 | |||
| 193e4bd73b | |||
| edcee32ab9 | |||
| 2549fc71fa | |||
| c60c373bc9 | |||
| 25a0c807cb | |||
| fabb0b8344 | |||
| b84bb43a07 | |||
| 15885f8b36 | |||
| 6b1adb8b12 | |||
| 110dcbbb54 | |||
| 541f00b903 | |||
| 8aa9e9acc3 | |||
| d9e39ab5b1 | |||
| 683a7c0957 | |||
| a4296cbbc4 | |||
| efd98bd93a | |||
| 62fa0e8caa | |||
| 953a6159a4 | |||
| 11e17b3de9 | |||
| 549bb95bea | |||
| e3f78500e7 | |||
| 2265df51b4 |
@@ -181,7 +181,7 @@ jobs:
|
||||
echo "LINT_CACHE_DIR=$dir" >> "$GITHUB_ENV"
|
||||
|
||||
- name: golangci-lint cache
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
with:
|
||||
path: |
|
||||
${{ env.LINT_CACHE_DIR }}
|
||||
@@ -241,7 +241,9 @@ jobs:
|
||||
|
||||
lint-actions:
|
||||
needs: changes
|
||||
if: needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
# Only run this job if changes to CI workflow files are detected. This job
|
||||
# can flake as it reaches out to GitHub to check referenced actions.
|
||||
if: needs.changes.outputs.ci == 'true'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
@@ -1184,7 +1186,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -1391,7 +1393,7 @@ jobs:
|
||||
id: attest_main
|
||||
if: github.ref == 'refs/heads/main'
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: "ghcr.io/coder/coder-preview:main"
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -1428,7 +1430,7 @@ jobs:
|
||||
id: attest_latest
|
||||
if: github.ref == 'refs/heads/main'
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: "ghcr.io/coder/coder-preview:latest"
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -1465,7 +1467,7 @@ jobs:
|
||||
id: attest_version
|
||||
if: github.ref == 'refs/heads/main'
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: "ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}"
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -160,34 +160,41 @@ jobs:
|
||||
# Build context based on trigger type
|
||||
case "${TRIGGER_TYPE}" in
|
||||
new_pr)
|
||||
CONTEXT="This is a NEW PR. Perform a thorough documentation review."
|
||||
CONTEXT="This is a NEW PR. Perform initial documentation review."
|
||||
;;
|
||||
pr_updated)
|
||||
CONTEXT="This PR was UPDATED with new commits. Only comment if the changes affect documentation needs or address previous feedback."
|
||||
CONTEXT="This PR was UPDATED with new commits. Check if previous feedback was addressed or if new doc needs arose."
|
||||
;;
|
||||
label_requested)
|
||||
CONTEXT="A documentation review was REQUESTED via label. Perform a thorough documentation review."
|
||||
CONTEXT="A documentation review was REQUESTED via label. Perform a thorough review."
|
||||
;;
|
||||
ready_for_review)
|
||||
CONTEXT="This PR was marked READY FOR REVIEW (converted from draft). Perform a thorough documentation review."
|
||||
CONTEXT="This PR was marked READY FOR REVIEW. Perform a thorough review."
|
||||
;;
|
||||
manual)
|
||||
CONTEXT="This is a MANUAL review request. Perform a thorough documentation review."
|
||||
CONTEXT="This is a MANUAL review request. Perform a thorough review."
|
||||
;;
|
||||
*)
|
||||
CONTEXT="Perform a thorough documentation review."
|
||||
CONTEXT="Perform a documentation review."
|
||||
;;
|
||||
esac
|
||||
|
||||
# Build task prompt with PR-specific context
|
||||
# Build task prompt with sticky comment logic
|
||||
TASK_PROMPT="Use the doc-check skill to review PR #${PR_NUMBER} in coder/coder.
|
||||
|
||||
${CONTEXT}
|
||||
|
||||
Use \`gh\` to get PR details, diff, and all comments. Check for previous doc-check comments (from coder-doc-check) and only post a new comment if it adds value.
|
||||
Use \`gh\` to get PR details, diff, and all comments. Look for an existing doc-check comment containing \`<!-- doc-check-sticky -->\` - if one exists, you'll update it instead of creating a new one.
|
||||
|
||||
**Do not comment if no documentation changes are needed.**
|
||||
|
||||
If a sticky comment already exists, compare your current findings against it:
|
||||
- Check off \`[x]\` items that are now addressed
|
||||
- Strikethrough items no longer needed (e.g., code was reverted)
|
||||
- Add new unchecked \`[ ]\` items for newly discovered needs
|
||||
- If an item is checked but you can't verify the docs were added, add a warning note below it
|
||||
- If nothing meaningful changed, don't update the comment at all
|
||||
|
||||
## Comment format
|
||||
|
||||
Use this structure (only include relevant sections):
|
||||
@@ -195,18 +202,21 @@ jobs:
|
||||
\`\`\`
|
||||
## Documentation Check
|
||||
|
||||
### Previous Feedback
|
||||
[For re-reviews only: Addressed | Partially addressed | Not yet addressed]
|
||||
|
||||
### Updates Needed
|
||||
- [ ] \`docs/path/file.md\` - [what needs to change]
|
||||
- [ ] \`docs/path/file.md\` - What needs to change
|
||||
- [x] \`docs/other/file.md\` - This was addressed
|
||||
- ~~\`docs/removed.md\` - No longer needed~~ *(reverted in abc123)*
|
||||
|
||||
### New Documentation Needed
|
||||
- [ ] \`docs/suggested/path.md\` - [what should be documented]
|
||||
- [ ] \`docs/suggested/path.md\` - What should be documented
|
||||
> ⚠️ *Checked but no corresponding documentation changes found in this PR*
|
||||
|
||||
---
|
||||
*Automated review via [Coder Tasks](https://coder.com/docs/ai-coder/tasks)*
|
||||
\`\`\`"
|
||||
<!-- doc-check-sticky -->
|
||||
\`\`\`
|
||||
|
||||
The \`<!-- doc-check-sticky -->\` marker must be at the end so future runs can find and update this comment."
|
||||
|
||||
# Output the prompt
|
||||
{
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Docker login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
# on version 2.29 and above.
|
||||
nix_version: "2.28.5"
|
||||
|
||||
- uses: nix-community/cache-nix-action@106bba72ed8e29c8357661199511ef07790175e9 # v7.0.1
|
||||
- uses: nix-community/cache-nix-action@7df957e333c1e5da7721f60227dbba6d06080569 # v7.0.2
|
||||
with:
|
||||
# restore and save a cache using this key
|
||||
primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
@@ -248,7 +248,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -233,7 +233,7 @@ jobs:
|
||||
cat "$CODER_RELEASE_NOTES_FILE"
|
||||
|
||||
- name: Docker Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -448,7 +448,7 @@ jobs:
|
||||
id: attest_base
|
||||
if: ${{ !inputs.dry_run && steps.image-base-tag.outputs.tag != '' }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: ${{ steps.image-base-tag.outputs.tag }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -564,7 +564,7 @@ jobs:
|
||||
id: attest_main
|
||||
if: ${{ !inputs.dry_run }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: ${{ steps.build_docker.outputs.multiarch_image }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -608,7 +608,7 @@ jobs:
|
||||
id: attest_latest
|
||||
if: ${{ !inputs.dry_run && steps.build_docker.outputs.created_latest_tag == 'true' }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: ${{ steps.latest_tag.outputs.tag }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
|
||||
@@ -938,6 +938,7 @@ coderd/apidoc/.gen: \
|
||||
coderd/rbac/object_gen.go \
|
||||
.swaggo \
|
||||
scripts/apidocgen/generate.sh \
|
||||
scripts/apidocgen/swaginit/main.go \
|
||||
$(wildcard scripts/apidocgen/postprocess/*) \
|
||||
$(wildcard scripts/apidocgen/markdown-template/*)
|
||||
./scripts/apidocgen/generate.sh
|
||||
|
||||
Generated
+71
-2
@@ -1,9 +1,9 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: .. (interfaces: ContainerCLI,DevcontainerCLI)
|
||||
// Source: .. (interfaces: ContainerCLI,DevcontainerCLI,SubAgentClient)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI
|
||||
// mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI,SubAgentClient
|
||||
//
|
||||
|
||||
// Package acmock is a generated GoMock package.
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
agentcontainers "github.com/coder/coder/v2/agent/agentcontainers"
|
||||
codersdk "github.com/coder/coder/v2/codersdk"
|
||||
uuid "github.com/google/uuid"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
@@ -216,3 +217,71 @@ func (mr *MockDevcontainerCLIMockRecorder) Up(ctx, workspaceFolder, configPath a
|
||||
varargs := append([]any{ctx, workspaceFolder, configPath}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Up", reflect.TypeOf((*MockDevcontainerCLI)(nil).Up), varargs...)
|
||||
}
|
||||
|
||||
// MockSubAgentClient is a mock of SubAgentClient interface.
|
||||
type MockSubAgentClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockSubAgentClientMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockSubAgentClientMockRecorder is the mock recorder for MockSubAgentClient.
|
||||
type MockSubAgentClientMockRecorder struct {
|
||||
mock *MockSubAgentClient
|
||||
}
|
||||
|
||||
// NewMockSubAgentClient creates a new mock instance.
|
||||
func NewMockSubAgentClient(ctrl *gomock.Controller) *MockSubAgentClient {
|
||||
mock := &MockSubAgentClient{ctrl: ctrl}
|
||||
mock.recorder = &MockSubAgentClientMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockSubAgentClient) EXPECT() *MockSubAgentClientMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Create mocks base method.
|
||||
func (m *MockSubAgentClient) Create(ctx context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Create", ctx, agent)
|
||||
ret0, _ := ret[0].(agentcontainers.SubAgent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Create indicates an expected call of Create.
|
||||
func (mr *MockSubAgentClientMockRecorder) Create(ctx, agent any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockSubAgentClient)(nil).Create), ctx, agent)
|
||||
}
|
||||
|
||||
// Delete mocks base method.
|
||||
func (m *MockSubAgentClient) Delete(ctx context.Context, id uuid.UUID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Delete", ctx, id)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Delete indicates an expected call of Delete.
|
||||
func (mr *MockSubAgentClientMockRecorder) Delete(ctx, id any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockSubAgentClient)(nil).Delete), ctx, id)
|
||||
}
|
||||
|
||||
// List mocks base method.
|
||||
func (m *MockSubAgentClient) List(ctx context.Context) ([]agentcontainers.SubAgent, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "List", ctx)
|
||||
ret0, _ := ret[0].([]agentcontainers.SubAgent)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// List indicates an expected call of List.
|
||||
func (mr *MockSubAgentClientMockRecorder) List(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockSubAgentClient)(nil).List), ctx)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package acmock contains a mock implementation of agentcontainers.Lister for use in tests.
|
||||
package acmock
|
||||
|
||||
//go:generate mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI
|
||||
//go:generate mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI,SubAgentClient
|
||||
|
||||
@@ -562,12 +562,9 @@ func (api *API) discoverDevcontainersInProject(projectPath string) error {
|
||||
api.broadcastUpdatesLocked()
|
||||
|
||||
if dc.Status == codersdk.WorkspaceAgentDevcontainerStatusStarting {
|
||||
api.asyncWg.Add(1)
|
||||
go func() {
|
||||
defer api.asyncWg.Done()
|
||||
|
||||
api.asyncWg.Go(func() {
|
||||
_ = api.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath)
|
||||
}()
|
||||
})
|
||||
}
|
||||
}
|
||||
api.mu.Unlock()
|
||||
@@ -1627,16 +1624,25 @@ func (api *API) cleanupSubAgents(ctx context.Context) error {
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
|
||||
injected := make(map[uuid.UUID]bool, len(api.injectedSubAgentProcs))
|
||||
// Collect all subagent IDs that should be kept:
|
||||
// 1. Subagents currently tracked by injectedSubAgentProcs
|
||||
// 2. Subagents referenced by known devcontainers from the manifest
|
||||
var keep []uuid.UUID
|
||||
for _, proc := range api.injectedSubAgentProcs {
|
||||
injected[proc.agent.ID] = true
|
||||
keep = append(keep, proc.agent.ID)
|
||||
}
|
||||
for _, dc := range api.knownDevcontainers {
|
||||
if dc.SubagentID.Valid {
|
||||
keep = append(keep, dc.SubagentID.UUID)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, defaultOperationTimeout)
|
||||
defer cancel()
|
||||
|
||||
var errs []error
|
||||
for _, agent := range agents {
|
||||
if injected[agent.ID] {
|
||||
if slices.Contains(keep, agent.ID) {
|
||||
continue
|
||||
}
|
||||
client := *api.subAgentClient.Load()
|
||||
@@ -1647,10 +1653,11 @@ func (api *API) cleanupSubAgents(ctx context.Context) error {
|
||||
slog.F("agent_id", agent.ID),
|
||||
slog.F("agent_name", agent.Name),
|
||||
)
|
||||
errs = append(errs, xerrors.Errorf("delete agent %s (%s): %w", agent.Name, agent.ID, err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// maybeInjectSubAgentIntoContainerLocked injects a subagent into a dev
|
||||
@@ -2001,7 +2008,20 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
|
||||
// logger.Warn(ctx, "set CAP_NET_ADMIN on agent binary failed", slog.Error(err))
|
||||
// }
|
||||
|
||||
deleteSubAgent := proc.agent.ID != uuid.Nil && maybeRecreateSubAgent && !proc.agent.EqualConfig(subAgentConfig)
|
||||
// Only delete and recreate subagents that were dynamically created
|
||||
// (ID == uuid.Nil). Terraform-defined subagents (subAgentConfig.ID !=
|
||||
// uuid.Nil) must not be deleted because they have attached resources
|
||||
// managed by terraform.
|
||||
isTerraformManaged := subAgentConfig.ID != uuid.Nil
|
||||
configHasChanged := !proc.agent.EqualConfig(subAgentConfig)
|
||||
|
||||
logger.Debug(ctx, "checking if sub agent should be deleted",
|
||||
slog.F("is_terraform_managed", isTerraformManaged),
|
||||
slog.F("maybe_recreate_sub_agent", maybeRecreateSubAgent),
|
||||
slog.F("config_has_changed", configHasChanged),
|
||||
)
|
||||
|
||||
deleteSubAgent := !isTerraformManaged && maybeRecreateSubAgent && configHasChanged
|
||||
if deleteSubAgent {
|
||||
logger.Debug(ctx, "deleting existing subagent for recreation", slog.F("agent_id", proc.agent.ID))
|
||||
client := *api.subAgentClient.Load()
|
||||
@@ -2012,11 +2032,23 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
|
||||
proc.agent = SubAgent{} // Clear agent to signal that we need to create a new one.
|
||||
}
|
||||
|
||||
if proc.agent.ID == uuid.Nil {
|
||||
logger.Debug(ctx, "creating new subagent",
|
||||
slog.F("directory", subAgentConfig.Directory),
|
||||
slog.F("display_apps", subAgentConfig.DisplayApps),
|
||||
)
|
||||
// Re-create (upsert) terraform-managed subagents when the config
|
||||
// changes so that display apps and other settings are updated
|
||||
// without deleting the agent.
|
||||
recreateTerraformSubAgent := isTerraformManaged && maybeRecreateSubAgent && configHasChanged
|
||||
|
||||
if proc.agent.ID == uuid.Nil || recreateTerraformSubAgent {
|
||||
if recreateTerraformSubAgent {
|
||||
logger.Debug(ctx, "updating existing subagent",
|
||||
slog.F("directory", subAgentConfig.Directory),
|
||||
slog.F("display_apps", subAgentConfig.DisplayApps),
|
||||
)
|
||||
} else {
|
||||
logger.Debug(ctx, "creating new subagent",
|
||||
slog.F("directory", subAgentConfig.Directory),
|
||||
slog.F("display_apps", subAgentConfig.DisplayApps),
|
||||
)
|
||||
}
|
||||
|
||||
// Create new subagent record in the database to receive the auth token.
|
||||
// If we get a unique constraint violation, try with expanded names that
|
||||
|
||||
@@ -437,7 +437,11 @@ func (m *fakeSubAgentClient) Create(ctx context.Context, agent agentcontainers.S
|
||||
}
|
||||
}
|
||||
|
||||
agent.ID = uuid.New()
|
||||
// Only generate a new ID if one wasn't provided. Terraform-defined
|
||||
// subagents have pre-existing IDs that should be preserved.
|
||||
if agent.ID == uuid.Nil {
|
||||
agent.ID = uuid.New()
|
||||
}
|
||||
agent.AuthToken = uuid.New()
|
||||
if m.agents == nil {
|
||||
m.agents = make(map[uuid.UUID]agentcontainers.SubAgent)
|
||||
@@ -1035,6 +1039,30 @@ func TestAPI(t *testing.T) {
|
||||
wantStatus: []int{http.StatusAccepted, http.StatusConflict},
|
||||
wantBody: []string{"Devcontainer recreation initiated", "is currently starting and cannot be restarted"},
|
||||
},
|
||||
{
|
||||
name: "Terraform-defined devcontainer can be rebuilt",
|
||||
devcontainerID: devcontainerID1.String(),
|
||||
setupDevcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerID1,
|
||||
Name: "test-devcontainer-terraform",
|
||||
WorkspaceFolder: workspaceFolder1,
|
||||
ConfigPath: configPath1,
|
||||
Status: codersdk.WorkspaceAgentDevcontainerStatusRunning,
|
||||
Container: &devContainer1,
|
||||
SubagentID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
|
||||
},
|
||||
},
|
||||
lister: &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{devContainer1},
|
||||
},
|
||||
arch: "<none>",
|
||||
},
|
||||
devcontainerCLI: &fakeDevcontainerCLI{},
|
||||
wantStatus: []int{http.StatusAccepted, http.StatusConflict},
|
||||
wantBody: []string{"Devcontainer recreation initiated", "is currently starting and cannot be restarted"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1449,14 +1477,6 @@ func TestAPI(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
api := agentcontainers.NewAPI(logger, apiOpts...)
|
||||
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
var (
|
||||
agentRunningCh chan struct{}
|
||||
stopAgentCh chan struct{}
|
||||
@@ -1473,6 +1493,14 @@ func TestAPI(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
api := agentcontainers.NewAPI(logger, apiOpts...)
|
||||
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
@@ -2490,6 +2518,338 @@ func TestAPI(t *testing.T) {
|
||||
assert.Empty(t, fakeSAC.agents)
|
||||
})
|
||||
|
||||
t.Run("SubAgentCleanupPreservesTerraformDefined", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
// Given: A terraform-defined agent and devcontainer that should be preserved
|
||||
terraformAgentID = uuid.New()
|
||||
terraformAgentToken = uuid.New()
|
||||
terraformAgent = agentcontainers.SubAgent{
|
||||
ID: terraformAgentID,
|
||||
Name: "terraform-defined-agent",
|
||||
Directory: "/workspace",
|
||||
AuthToken: terraformAgentToken,
|
||||
}
|
||||
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "terraform-devcontainer",
|
||||
WorkspaceFolder: "/workspace/project",
|
||||
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
|
||||
}
|
||||
|
||||
// Given: An orphaned agent that should be cleaned up
|
||||
orphanedAgentID = uuid.New()
|
||||
orphanedAgentToken = uuid.New()
|
||||
orphanedAgent = agentcontainers.SubAgent{
|
||||
ID: orphanedAgentID,
|
||||
Name: "orphaned-agent",
|
||||
Directory: "/tmp",
|
||||
AuthToken: orphanedAgentToken,
|
||||
}
|
||||
|
||||
ctx = testutil.Context(t, testutil.WaitMedium)
|
||||
logger = slog.Make()
|
||||
mClock = quartz.NewMock(t)
|
||||
mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t))
|
||||
|
||||
fakeSAC = &fakeSubAgentClient{
|
||||
logger: logger.Named("fakeSubAgentClient"),
|
||||
agents: map[uuid.UUID]agentcontainers.SubAgent{
|
||||
terraformAgentID: terraformAgent,
|
||||
orphanedAgentID: orphanedAgent,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{},
|
||||
}, nil).AnyTimes()
|
||||
|
||||
mClock.Set(time.Now()).MustWait(ctx)
|
||||
tickerTrap := mClock.Trap().TickerFunc("updaterLoop")
|
||||
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithClock(mClock),
|
||||
agentcontainers.WithContainerCLI(mCCLI),
|
||||
agentcontainers.WithSubAgentClient(fakeSAC),
|
||||
agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}),
|
||||
agentcontainers.WithDevcontainers([]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer}, nil),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
// When: We advance the clock, allowing cleanup to occur
|
||||
_, aw := mClock.AdvanceNext()
|
||||
aw.MustWait(ctx)
|
||||
|
||||
// Then: The orphaned agent should be deleted
|
||||
assert.Contains(t, fakeSAC.deleted, orphanedAgentID, "orphaned agent should be deleted")
|
||||
|
||||
// And: The terraform-defined agent should not be deleted
|
||||
assert.NotContains(t, fakeSAC.deleted, terraformAgentID, "terraform-defined agent should be preserved")
|
||||
assert.Len(t, fakeSAC.agents, 1, "only terraform agent should remain")
|
||||
assert.Contains(t, fakeSAC.agents, terraformAgentID, "terraform agent should still exist")
|
||||
})
|
||||
|
||||
t.Run("TerraformDefinedSubAgentNotRecreatedOnConfigChange", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Dev Container tests are not supported on Windows (this test uses mocks but fails due to Windows paths)")
|
||||
}
|
||||
|
||||
var (
|
||||
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
mCtrl = gomock.NewController(t)
|
||||
|
||||
// Given: A terraform-defined devcontainer with a pre-assigned subagent ID.
|
||||
terraformAgentID = uuid.New()
|
||||
terraformContainer = codersdk.WorkspaceAgentContainer{
|
||||
ID: "test-container-id",
|
||||
FriendlyName: "test-container",
|
||||
Image: "test-image",
|
||||
Running: true,
|
||||
CreatedAt: time.Now(),
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspace/project",
|
||||
agentcontainers.DevcontainerConfigFileLabel: "/workspace/project/.devcontainer/devcontainer.json",
|
||||
},
|
||||
}
|
||||
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "terraform-devcontainer",
|
||||
WorkspaceFolder: "/workspace/project",
|
||||
ConfigPath: "/workspace/project/.devcontainer/devcontainer.json",
|
||||
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
|
||||
}
|
||||
|
||||
fCCLI = &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{terraformContainer},
|
||||
},
|
||||
arch: runtime.GOARCH,
|
||||
}
|
||||
|
||||
fDCCLI = &fakeDevcontainerCLI{
|
||||
upID: terraformContainer.ID,
|
||||
readConfig: agentcontainers.DevcontainerConfig{
|
||||
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
|
||||
Customizations: agentcontainers.DevcontainerMergedCustomizations{
|
||||
Coder: []agentcontainers.CoderCustomization{{
|
||||
Apps: []agentcontainers.SubAgentApp{{Slug: "app1"}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mSAC = acmock.NewMockSubAgentClient(mCtrl)
|
||||
closed bool
|
||||
)
|
||||
|
||||
mSAC.EXPECT().List(gomock.Any()).Return([]agentcontainers.SubAgent{}, nil).AnyTimes()
|
||||
|
||||
// EXPECT: Create is called twice with the terraform-defined ID:
|
||||
// once for the initial creation and once after the rebuild with
|
||||
// config changes (upsert).
|
||||
mSAC.EXPECT().Create(gomock.Any(), gomock.Any()).DoAndReturn(
|
||||
func(_ context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
|
||||
assert.Equal(t, terraformAgentID, agent.ID, "agent should have terraform-defined ID")
|
||||
agent.AuthToken = uuid.New()
|
||||
return agent, nil
|
||||
},
|
||||
).Times(2)
|
||||
|
||||
// EXPECT: Delete may be called during Close, but not before.
|
||||
mSAC.EXPECT().Delete(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, _ uuid.UUID) error {
|
||||
assert.True(t, closed, "Delete should only be called after Close, not during recreation")
|
||||
return nil
|
||||
}).AnyTimes()
|
||||
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithContainerCLI(fCCLI),
|
||||
agentcontainers.WithDevcontainerCLI(fDCCLI),
|
||||
agentcontainers.WithDevcontainers(
|
||||
[]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer},
|
||||
[]codersdk.WorkspaceAgentScript{{ID: terraformDevcontainer.ID, LogSourceID: uuid.New()}},
|
||||
),
|
||||
agentcontainers.WithSubAgentClient(mSAC),
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
api.Start()
|
||||
|
||||
// Given: We create the devcontainer for the first time.
|
||||
err := api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// When: The container is recreated (new container ID) with config changes.
|
||||
terraformContainer.ID = "new-container-id"
|
||||
fCCLI.containers.Containers = []codersdk.WorkspaceAgentContainer{terraformContainer}
|
||||
fDCCLI.upID = terraformContainer.ID
|
||||
fDCCLI.readConfig.MergedConfiguration.Customizations.Coder = []agentcontainers.CoderCustomization{{
|
||||
Apps: []agentcontainers.SubAgentApp{{Slug: "app2"}}, // Changed app triggers recreation logic.
|
||||
}}
|
||||
|
||||
err = api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath, agentcontainers.WithRemoveExistingContainer())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: Mock expectations verify that Create was called once and Delete was not called during recreation.
|
||||
closed = true
|
||||
api.Close()
|
||||
})
|
||||
|
||||
// Verify that rebuilding a terraform-defined devcontainer via the
|
||||
// HTTP API does not delete the sub agent. The sub agent should be
|
||||
// preserved (Create called again with the same terraform ID) and
|
||||
// display app changes should be picked up.
|
||||
t.Run("TerraformDefinedSubAgentRebuildViaHTTP", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Dev Container tests are not supported on Windows (this test uses mocks but fails due to Windows paths)")
|
||||
}
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitMedium)
|
||||
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
mCtrl = gomock.NewController(t)
|
||||
|
||||
terraformAgentID = uuid.New()
|
||||
containerID = "test-container-id"
|
||||
|
||||
terraformContainer = codersdk.WorkspaceAgentContainer{
|
||||
ID: containerID,
|
||||
FriendlyName: "test-container",
|
||||
Image: "test-image",
|
||||
Running: true,
|
||||
CreatedAt: time.Now(),
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspace/project",
|
||||
agentcontainers.DevcontainerConfigFileLabel: "/workspace/project/.devcontainer/devcontainer.json",
|
||||
},
|
||||
}
|
||||
terraformDevcontainer = codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "terraform-devcontainer",
|
||||
WorkspaceFolder: "/workspace/project",
|
||||
ConfigPath: "/workspace/project/.devcontainer/devcontainer.json",
|
||||
SubagentID: uuid.NullUUID{UUID: terraformAgentID, Valid: true},
|
||||
}
|
||||
|
||||
fCCLI = &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{terraformContainer},
|
||||
},
|
||||
arch: runtime.GOARCH,
|
||||
}
|
||||
|
||||
fDCCLI = &fakeDevcontainerCLI{
|
||||
upID: containerID,
|
||||
readConfig: agentcontainers.DevcontainerConfig{
|
||||
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
|
||||
Customizations: agentcontainers.DevcontainerMergedCustomizations{
|
||||
Coder: []agentcontainers.CoderCustomization{{
|
||||
DisplayApps: map[codersdk.DisplayApp]bool{
|
||||
codersdk.DisplayAppSSH: true,
|
||||
codersdk.DisplayAppWebTerminal: true,
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mSAC = acmock.NewMockSubAgentClient(mCtrl)
|
||||
closed bool
|
||||
|
||||
createCalled = make(chan agentcontainers.SubAgent, 2)
|
||||
)
|
||||
|
||||
mSAC.EXPECT().List(gomock.Any()).Return([]agentcontainers.SubAgent{}, nil).AnyTimes()
|
||||
|
||||
// Create should be called twice: once for the initial injection
|
||||
// and once after the rebuild picks up the new container.
|
||||
mSAC.EXPECT().Create(gomock.Any(), gomock.Any()).DoAndReturn(
|
||||
func(_ context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
|
||||
assert.Equal(t, terraformAgentID, agent.ID, "agent should always use terraform-defined ID")
|
||||
agent.AuthToken = uuid.New()
|
||||
createCalled <- agent
|
||||
return agent, nil
|
||||
},
|
||||
).Times(2)
|
||||
|
||||
// Delete must only be called during Close, never during rebuild.
|
||||
mSAC.EXPECT().Delete(gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, _ uuid.UUID) error {
|
||||
assert.True(t, closed, "Delete should only be called after Close, not during rebuild")
|
||||
return nil
|
||||
}).AnyTimes()
|
||||
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithContainerCLI(fCCLI),
|
||||
agentcontainers.WithDevcontainerCLI(fDCCLI),
|
||||
agentcontainers.WithDevcontainers(
|
||||
[]codersdk.WorkspaceAgentDevcontainer{terraformDevcontainer},
|
||||
[]codersdk.WorkspaceAgentScript{{ID: terraformDevcontainer.ID, LogSourceID: uuid.New()}},
|
||||
),
|
||||
agentcontainers.WithSubAgentClient(mSAC),
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
api.Start()
|
||||
defer func() {
|
||||
closed = true
|
||||
api.Close()
|
||||
}()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
// Perform the initial devcontainer creation directly to set up
|
||||
// the subagent (mirrors the TerraformDefinedSubAgentNotRecreatedOnConfigChange
|
||||
// test pattern).
|
||||
err := api.CreateDevcontainer(terraformDevcontainer.WorkspaceFolder, terraformDevcontainer.ConfigPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
initialAgent := testutil.RequireReceive(ctx, t, createCalled)
|
||||
assert.Equal(t, terraformAgentID, initialAgent.ID)
|
||||
|
||||
// Simulate container rebuild: new container ID, changed display apps.
|
||||
newContainerID := "new-container-id"
|
||||
terraformContainer.ID = newContainerID
|
||||
fCCLI.containers.Containers = []codersdk.WorkspaceAgentContainer{terraformContainer}
|
||||
fDCCLI.upID = newContainerID
|
||||
fDCCLI.readConfig.MergedConfiguration.Customizations.Coder = []agentcontainers.CoderCustomization{{
|
||||
DisplayApps: map[codersdk.DisplayApp]bool{
|
||||
codersdk.DisplayAppSSH: true,
|
||||
codersdk.DisplayAppWebTerminal: true,
|
||||
codersdk.DisplayAppVSCodeDesktop: true,
|
||||
codersdk.DisplayAppVSCodeInsiders: true,
|
||||
},
|
||||
}}
|
||||
|
||||
// Issue the rebuild request via the HTTP API.
|
||||
req := httptest.NewRequest(http.MethodPost, "/devcontainers/"+terraformDevcontainer.ID.String()+"/recreate", nil).
|
||||
WithContext(ctx)
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
require.Equal(t, http.StatusAccepted, rec.Code)
|
||||
|
||||
// Wait for the post-rebuild injection to complete.
|
||||
rebuiltAgent := testutil.RequireReceive(ctx, t, createCalled)
|
||||
assert.Equal(t, terraformAgentID, rebuiltAgent.ID, "rebuilt agent should preserve terraform ID")
|
||||
|
||||
// Verify that the display apps were updated.
|
||||
assert.Contains(t, rebuiltAgent.DisplayApps, codersdk.DisplayAppVSCodeDesktop,
|
||||
"rebuilt agent should include updated display apps")
|
||||
assert.Contains(t, rebuiltAgent.DisplayApps, codersdk.DisplayAppVSCodeInsiders,
|
||||
"rebuilt agent should include updated display apps")
|
||||
})
|
||||
|
||||
t.Run("Error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -24,10 +24,12 @@ type SubAgent struct {
|
||||
DisplayApps []codersdk.DisplayApp
|
||||
}
|
||||
|
||||
// CloneConfig makes a copy of SubAgent without ID and AuthToken. The
|
||||
// name is inherited from the devcontainer.
|
||||
// CloneConfig makes a copy of SubAgent using configuration from the
|
||||
// devcontainer. The ID is inherited from dc.SubagentID if present, and
|
||||
// the name is inherited from the devcontainer. AuthToken is not copied.
|
||||
func (s SubAgent) CloneConfig(dc codersdk.WorkspaceAgentDevcontainer) SubAgent {
|
||||
return SubAgent{
|
||||
ID: dc.SubagentID.UUID,
|
||||
Name: dc.Name,
|
||||
Directory: s.Directory,
|
||||
Architecture: s.Architecture,
|
||||
@@ -190,6 +192,11 @@ func (a *subAgentAPIClient) List(ctx context.Context) ([]SubAgent, error) {
|
||||
func (a *subAgentAPIClient) Create(ctx context.Context, agent SubAgent) (_ SubAgent, err error) {
|
||||
a.logger.Debug(ctx, "creating sub agent", slog.F("name", agent.Name), slog.F("directory", agent.Directory))
|
||||
|
||||
var id []byte
|
||||
if agent.ID != uuid.Nil {
|
||||
id = agent.ID[:]
|
||||
}
|
||||
|
||||
displayApps := make([]agentproto.CreateSubAgentRequest_DisplayApp, 0, len(agent.DisplayApps))
|
||||
for _, displayApp := range agent.DisplayApps {
|
||||
var app agentproto.CreateSubAgentRequest_DisplayApp
|
||||
@@ -228,6 +235,7 @@ func (a *subAgentAPIClient) Create(ctx context.Context, agent SubAgent) (_ SubAg
|
||||
OperatingSystem: agent.OperatingSystem,
|
||||
DisplayApps: displayApps,
|
||||
Apps: apps,
|
||||
Id: id,
|
||||
})
|
||||
if err != nil {
|
||||
return SubAgent{}, err
|
||||
|
||||
@@ -306,3 +306,128 @@ func TestSubAgentClient_CreateWithDisplayApps(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubAgent_CloneConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("CopiesIDFromDevcontainer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
subAgent := agentcontainers.SubAgent{
|
||||
ID: uuid.New(),
|
||||
Name: "original-name",
|
||||
Directory: "/workspace",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
DisplayApps: []codersdk.DisplayApp{codersdk.DisplayAppVSCodeDesktop},
|
||||
Apps: []agentcontainers.SubAgentApp{{Slug: "app1"}},
|
||||
}
|
||||
expectedID := uuid.MustParse("550e8400-e29b-41d4-a716-446655440000")
|
||||
dc := codersdk.WorkspaceAgentDevcontainer{
|
||||
Name: "devcontainer-name",
|
||||
SubagentID: uuid.NullUUID{UUID: expectedID, Valid: true},
|
||||
}
|
||||
|
||||
cloned := subAgent.CloneConfig(dc)
|
||||
|
||||
assert.Equal(t, expectedID, cloned.ID)
|
||||
assert.Equal(t, dc.Name, cloned.Name)
|
||||
assert.Equal(t, subAgent.Directory, cloned.Directory)
|
||||
assert.Zero(t, cloned.AuthToken, "AuthToken should not be copied")
|
||||
})
|
||||
|
||||
t.Run("HandlesNilSubagentID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
subAgent := agentcontainers.SubAgent{
|
||||
ID: uuid.New(),
|
||||
Name: "original-name",
|
||||
Directory: "/workspace",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
}
|
||||
dc := codersdk.WorkspaceAgentDevcontainer{
|
||||
Name: "devcontainer-name",
|
||||
SubagentID: uuid.NullUUID{Valid: false},
|
||||
}
|
||||
|
||||
cloned := subAgent.CloneConfig(dc)
|
||||
|
||||
assert.Equal(t, uuid.Nil, cloned.ID)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubAgent_EqualConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
base := agentcontainers.SubAgent{
|
||||
ID: uuid.New(),
|
||||
Name: "test-agent",
|
||||
Directory: "/workspace",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
DisplayApps: []codersdk.DisplayApp{codersdk.DisplayAppVSCodeDesktop},
|
||||
Apps: []agentcontainers.SubAgentApp{
|
||||
{Slug: "test-app", DisplayName: "Test App"},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
modify func(*agentcontainers.SubAgent)
|
||||
wantEqual bool
|
||||
}{
|
||||
{
|
||||
name: "identical",
|
||||
modify: func(s *agentcontainers.SubAgent) {},
|
||||
wantEqual: true,
|
||||
},
|
||||
{
|
||||
name: "different ID",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.ID = uuid.New() },
|
||||
wantEqual: true,
|
||||
},
|
||||
{
|
||||
name: "different Name",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.Name = "different-name" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Directory",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.Directory = "/different/path" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Architecture",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.Architecture = "arm64" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different OperatingSystem",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.OperatingSystem = "windows" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different DisplayApps",
|
||||
modify: func(s *agentcontainers.SubAgent) { s.DisplayApps = []codersdk.DisplayApp{codersdk.DisplayAppSSH} },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Apps",
|
||||
modify: func(s *agentcontainers.SubAgent) {
|
||||
s.Apps = []agentcontainers.SubAgentApp{{Slug: "different-app", DisplayName: "Different App"}}
|
||||
},
|
||||
wantEqual: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
modified := base
|
||||
tt.modify(&modified)
|
||||
assert.Equal(t, tt.wantEqual, base.EqualConfig(modified))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/go-reap"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
type Option func(o *options)
|
||||
@@ -34,8 +36,15 @@ func WithCatchSignals(sigs ...os.Signal) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(logger slog.Logger) Option {
|
||||
return func(o *options) {
|
||||
o.Logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
ExecArgs []string
|
||||
PIDs reap.PidCh
|
||||
CatchSignals []os.Signal
|
||||
Logger slog.Logger
|
||||
}
|
||||
|
||||
@@ -3,12 +3,15 @@
|
||||
package reaper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/hashicorp/go-reap"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
// IsInitProcess returns true if the current process's PID is 1.
|
||||
@@ -16,7 +19,7 @@ func IsInitProcess() bool {
|
||||
return os.Getpid() == 1
|
||||
}
|
||||
|
||||
func catchSignals(pid int, sigs []os.Signal) {
|
||||
func catchSignals(logger slog.Logger, pid int, sigs []os.Signal) {
|
||||
if len(sigs) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -25,10 +28,19 @@ func catchSignals(pid int, sigs []os.Signal) {
|
||||
signal.Notify(sc, sigs...)
|
||||
defer signal.Stop(sc)
|
||||
|
||||
logger.Info(context.Background(), "reaper catching signals",
|
||||
slog.F("signals", sigs),
|
||||
slog.F("child_pid", pid),
|
||||
)
|
||||
|
||||
for {
|
||||
s := <-sc
|
||||
sig, ok := s.(syscall.Signal)
|
||||
if ok {
|
||||
logger.Info(context.Background(), "reaper caught signal, killing child process",
|
||||
slog.F("signal", sig.String()),
|
||||
slog.F("child_pid", pid),
|
||||
)
|
||||
_ = syscall.Kill(pid, sig)
|
||||
}
|
||||
}
|
||||
@@ -78,7 +90,7 @@ func ForkReap(opt ...Option) (int, error) {
|
||||
return 1, xerrors.Errorf("fork exec: %w", err)
|
||||
}
|
||||
|
||||
go catchSignals(pid, opts.CatchSignals)
|
||||
go catchSignals(opts.Logger, pid, opts.CatchSignals)
|
||||
|
||||
var wstatus syscall.WaitStatus
|
||||
_, err = syscall.Wait4(pid, &wstatus, 0, nil)
|
||||
|
||||
+44
-16
@@ -9,6 +9,7 @@ import (
|
||||
"net/http/pprof"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
@@ -130,6 +131,7 @@ func workspaceAgent() *serpent.Command {
|
||||
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
|
||||
logger = logger.Named("reaper")
|
||||
|
||||
logger.Info(ctx, "spawning reaper process")
|
||||
// Do not start a reaper on the child process. It's important
|
||||
@@ -139,31 +141,19 @@ func workspaceAgent() *serpent.Command {
|
||||
exitCode, err := reaper.ForkReap(
|
||||
reaper.WithExecArgs(args...),
|
||||
reaper.WithCatchSignals(StopSignals...),
|
||||
reaper.WithLogger(logger),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "agent process reaper unable to fork", slog.Error(err))
|
||||
return xerrors.Errorf("fork reap: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(ctx, "reaper child process exited", slog.F("exit_code", exitCode))
|
||||
logger.Info(ctx, "child process exited, propagating exit code",
|
||||
slog.F("exit_code", exitCode),
|
||||
)
|
||||
return ExitError(exitCode, nil)
|
||||
}
|
||||
|
||||
// Handle interrupt signals to allow for graceful shutdown,
|
||||
// note that calling stopNotify disables the signal handler
|
||||
// and the next interrupt will terminate the program (you
|
||||
// probably want cancel instead).
|
||||
//
|
||||
// Note that we don't want to handle these signals in the
|
||||
// process that runs as PID 1, that's why we do this after
|
||||
// the reaper forked.
|
||||
ctx, stopNotify := inv.SignalNotifyContext(ctx, StopSignals...)
|
||||
defer stopNotify()
|
||||
|
||||
// DumpHandler does signal handling, so we call it after the
|
||||
// reaper.
|
||||
go DumpHandler(ctx, "agent")
|
||||
|
||||
logWriter := &clilog.LumberjackWriteCloseFixer{Writer: &lumberjack.Logger{
|
||||
Filename: filepath.Join(logDir, "coder-agent.log"),
|
||||
MaxSize: 5, // MB
|
||||
@@ -176,6 +166,21 @@ func workspaceAgent() *serpent.Command {
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
|
||||
|
||||
// Handle interrupt signals to allow for graceful shutdown,
|
||||
// note that calling stopNotify disables the signal handler
|
||||
// and the next interrupt will terminate the program (you
|
||||
// probably want cancel instead).
|
||||
//
|
||||
// Note that we also handle these signals in the
|
||||
// process that runs as PID 1, mainly to forward it to the agent child
|
||||
// so that it can shutdown gracefully.
|
||||
ctx, stopNotify := logSignalNotifyContext(ctx, logger, StopSignals...)
|
||||
defer stopNotify()
|
||||
|
||||
// DumpHandler does signal handling, so we call it after the
|
||||
// reaper.
|
||||
go DumpHandler(ctx, "agent")
|
||||
|
||||
version := buildinfo.Version()
|
||||
logger.Info(ctx, "agent is starting now",
|
||||
slog.F("url", agentAuth.agentURL),
|
||||
@@ -565,3 +570,26 @@ func urlPort(u string) (int, error) {
|
||||
}
|
||||
return -1, xerrors.Errorf("invalid port: %s", u)
|
||||
}
|
||||
|
||||
// logSignalNotifyContext is like signal.NotifyContext but logs the received
|
||||
// signal before canceling the context.
|
||||
func logSignalNotifyContext(parent context.Context, logger slog.Logger, signals ...os.Signal) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancelCause(parent)
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, signals...)
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case sig := <-c:
|
||||
logger.Info(ctx, "agent received signal", slog.F("signal", sig.String()))
|
||||
cancel(xerrors.Errorf("signal: %s", sig.String()))
|
||||
case <-ctx.Done():
|
||||
logger.Info(ctx, "ctx canceled, stopping signal handler")
|
||||
}
|
||||
}()
|
||||
|
||||
return ctx, func() {
|
||||
cancel(context.Canceled)
|
||||
signal.Stop(c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ func (r *RootCmd) organizations() *serpent.Command {
|
||||
},
|
||||
Children: []*serpent.Command{
|
||||
r.showOrganization(orgContext),
|
||||
r.listOrganizations(),
|
||||
r.createOrganization(),
|
||||
r.deleteOrganization(orgContext),
|
||||
r.organizationMembers(orgContext),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -58,6 +59,48 @@ func TestCurrentOrganization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestOrganizationList(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
orgID := uuid.New()
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch {
|
||||
case r.Method == http.MethodGet && r.URL.Path == "/api/v2/organizations":
|
||||
_ = json.NewEncoder(w).Encode([]codersdk.Organization{
|
||||
{
|
||||
MinimalOrganization: codersdk.MinimalOrganization{
|
||||
ID: orgID,
|
||||
Name: "my-org",
|
||||
DisplayName: "My Org",
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
})
|
||||
default:
|
||||
t.Errorf("unexpected request: %s %s", r.Method, r.URL.Path)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client := codersdk.New(must(url.Parse(server.URL)))
|
||||
inv, root := clitest.New(t, "organizations", "list")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
|
||||
require.NoError(t, inv.Run())
|
||||
require.Contains(t, buf.String(), "my-org")
|
||||
require.Contains(t, buf.String(), "My Org")
|
||||
require.Contains(t, buf.String(), orgID.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestOrganizationDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (r *RootCmd) listOrganizations() *serpent.Command {
|
||||
formatter := cliui.NewOutputFormatter(
|
||||
cliui.TableFormat([]codersdk.Organization{}, []string{"name", "display name", "id", "default"}),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "list",
|
||||
Short: "List all organizations",
|
||||
Long: "List all organizations. Requires a role which grants ResourceOrganization: read.",
|
||||
Aliases: []string{"ls"},
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(0),
|
||||
),
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
organizations, err := client.Organizations(inv.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := formatter.Format(inv.Context(), organizations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No organizations found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
formatter.AttachOptions(&cmd.Options)
|
||||
return cmd
|
||||
}
|
||||
+1
-1
@@ -2174,7 +2174,7 @@ func startBuiltinPostgres(ctx context.Context, cfg config.Root, logger slog.Logg
|
||||
// existing database
|
||||
retryPortDiscovery := errors.Is(err, os.ErrNotExist) && testing.Testing()
|
||||
if retryPortDiscovery {
|
||||
maxAttempts = 3
|
||||
maxAttempts = 10
|
||||
}
|
||||
|
||||
var startErr error
|
||||
|
||||
+24
-19
@@ -2244,6 +2244,7 @@ type runServerOpts struct {
|
||||
waitForSnapshot bool
|
||||
telemetryDisabled bool
|
||||
waitForTelemetryDisabledCheck bool
|
||||
name string
|
||||
}
|
||||
|
||||
func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
@@ -2266,25 +2267,23 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
"--cache-dir", cacheDir,
|
||||
"--log-filter", ".*",
|
||||
)
|
||||
finished := make(chan bool, 2)
|
||||
inv.Logger = inv.Logger.Named(opts.name)
|
||||
|
||||
errChan := make(chan error, 1)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
pty := ptytest.New(t).Named(opts.name).Attach(inv)
|
||||
go func() {
|
||||
errChan <- inv.WithContext(ctx).Run()
|
||||
finished <- true
|
||||
// close the pty here so that we can start tearing down resources. This test creates multiple servers with
|
||||
// associated ptys. There is a `t.Cleanup()` that does this, but it waits until the whole test is complete.
|
||||
_ = pty.Close()
|
||||
}()
|
||||
go func() {
|
||||
defer func() {
|
||||
finished <- true
|
||||
}()
|
||||
if opts.waitForSnapshot {
|
||||
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "submitted snapshot")
|
||||
}
|
||||
if opts.waitForTelemetryDisabledCheck {
|
||||
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "finished telemetry status check")
|
||||
}
|
||||
}()
|
||||
<-finished
|
||||
|
||||
if opts.waitForSnapshot {
|
||||
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "submitted snapshot")
|
||||
}
|
||||
if opts.waitForTelemetryDisabledCheck {
|
||||
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "finished telemetry status check")
|
||||
}
|
||||
return errChan, cancelFunc
|
||||
}
|
||||
waitForShutdown := func(t *testing.T, errChan chan error) error {
|
||||
@@ -2298,7 +2297,9 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
|
||||
errChan, cancelFunc := runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
|
||||
errChan, cancelFunc := runServer(t, runServerOpts{
|
||||
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "0disabled",
|
||||
})
|
||||
cancelFunc()
|
||||
require.NoError(t, waitForShutdown(t, errChan))
|
||||
|
||||
@@ -2306,7 +2307,7 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
require.Empty(t, deployment)
|
||||
require.Empty(t, snapshot)
|
||||
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{waitForSnapshot: true})
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{waitForSnapshot: true, name: "1enabled"})
|
||||
cancelFunc()
|
||||
require.NoError(t, waitForShutdown(t, errChan))
|
||||
// we expect to see a deployment and a snapshot twice:
|
||||
@@ -2325,7 +2326,9 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{
|
||||
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "2disabled",
|
||||
})
|
||||
cancelFunc()
|
||||
require.NoError(t, waitForShutdown(t, errChan))
|
||||
|
||||
@@ -2341,7 +2344,9 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
t.Fatalf("timed out waiting for snapshot")
|
||||
}
|
||||
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{
|
||||
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "3disabled",
|
||||
})
|
||||
cancelFunc()
|
||||
require.NoError(t, waitForShutdown(t, errChan))
|
||||
// Since telemetry is disabled and we've already sent a snapshot, we expect no
|
||||
|
||||
-58
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/gofrs/flock"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/shirou/gopsutil/v4/process"
|
||||
"github.com/spf13/afero"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
gosshagent "golang.org/x/crypto/ssh/agent"
|
||||
@@ -85,9 +84,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
|
||||
containerName string
|
||||
containerUser string
|
||||
|
||||
// Used in tests to simulate the parent exiting.
|
||||
testForcePPID int64
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Annotations: workspaceCommand,
|
||||
@@ -179,24 +175,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// When running as a ProxyCommand (stdio mode), monitor the parent process
|
||||
// and exit if it dies to avoid leaving orphaned processes. This is
|
||||
// particularly important when editors like VSCode/Cursor spawn SSH
|
||||
// connections and then crash or are killed - we don't want zombie
|
||||
// `coder ssh` processes accumulating.
|
||||
// Note: using gopsutil to check the parent process as this handles
|
||||
// windows processes as well in a standard way.
|
||||
if stdio {
|
||||
ppid := int32(os.Getppid()) // nolint:gosec
|
||||
checkParentInterval := 10 * time.Second // Arbitrary interval to not be too frequent
|
||||
if testForcePPID > 0 {
|
||||
ppid = int32(testForcePPID) // nolint:gosec
|
||||
checkParentInterval = 100 * time.Millisecond // Shorter interval for testing
|
||||
}
|
||||
ctx, cancel = watchParentContext(ctx, quartz.NewReal(), ppid, process.PidExistsWithContext, checkParentInterval)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Prevent unnecessary logs from the stdlib from messing up the TTY.
|
||||
// See: https://github.com/coder/coder/issues/13144
|
||||
log.SetOutput(io.Discard)
|
||||
@@ -797,12 +775,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
Value: serpent.BoolOf(&forceNewTunnel),
|
||||
Hidden: true,
|
||||
},
|
||||
{
|
||||
Flag: "test.force-ppid",
|
||||
Description: "Override the parent process ID to simulate a different parent process. ONLY USE THIS IN TESTS.",
|
||||
Value: serpent.Int64Of(&testForcePPID),
|
||||
Hidden: true,
|
||||
},
|
||||
sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)),
|
||||
}
|
||||
return cmd
|
||||
@@ -1690,33 +1662,3 @@ func normalizeWorkspaceInput(input string) string {
|
||||
return input // Fallback
|
||||
}
|
||||
}
|
||||
|
||||
// watchParentContext returns a context that is canceled when the parent process
|
||||
// dies. It polls using the provided clock and checks if the parent is alive
|
||||
// using the provided pidExists function.
|
||||
func watchParentContext(ctx context.Context, clock quartz.Clock, originalPPID int32, pidExists func(context.Context, int32) (bool, error), interval time.Duration) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(ctx) // intentionally shadowed
|
||||
|
||||
go func() {
|
||||
ticker := clock.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
alive, err := pidExists(ctx, originalPPID)
|
||||
// If we get an error checking the parent process (e.g., permission
|
||||
// denied, the process is in an unknown state), we assume the parent
|
||||
// is still alive to avoid disrupting the SSH connection. We only
|
||||
// cancel when we definitively know the parent is gone (alive=false, err=nil).
|
||||
if !alive && err == nil {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
@@ -312,102 +312,6 @@ type fakeCloser struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func TestWatchParentContext(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("CancelsWhenParentDies", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
parentAlive := true
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return parentAlive, nil
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we simulate parent death and advance the clock
|
||||
parentAlive = false
|
||||
mClock.AdvanceNext()
|
||||
|
||||
// Then: The context should be canceled
|
||||
_ = testutil.TryReceive(ctx, t, childCtx.Done())
|
||||
})
|
||||
|
||||
t.Run("DoesNotCancelWhenParentAlive", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return true, nil // Parent always alive
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we advance the clock several times with the parent alive
|
||||
for range 3 {
|
||||
mClock.AdvanceNext()
|
||||
}
|
||||
|
||||
// Then: context should not be canceled
|
||||
require.NoError(t, childCtx.Err())
|
||||
})
|
||||
|
||||
t.Run("RespectsParentContext", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancelParent := context.WithCancel(context.Background())
|
||||
mClock := quartz.NewMock(t)
|
||||
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return true, nil
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// When: we cancel the parent context
|
||||
cancelParent()
|
||||
|
||||
// Then: The context should be canceled
|
||||
require.ErrorIs(t, childCtx.Err(), context.Canceled)
|
||||
})
|
||||
|
||||
t.Run("DoesNotCancelOnError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
// Simulate an error checking parent status (e.g., permission denied).
|
||||
// We should not cancel the context in this case to avoid disrupting
|
||||
// the SSH connection.
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return false, xerrors.New("permission denied")
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we advance clock several times
|
||||
for range 3 {
|
||||
mClock.AdvanceNext()
|
||||
}
|
||||
|
||||
// Context should NOT be canceled since we got an error (not a definitive "not alive")
|
||||
require.NoError(t, childCtx.Err(), "context was canceled even though pidExists returned an error")
|
||||
})
|
||||
}
|
||||
|
||||
func (c *fakeCloser) Close() error {
|
||||
*c.closes = append(*c.closes, c)
|
||||
return c.err
|
||||
|
||||
-101
@@ -1122,107 +1122,6 @@ func TestSSH(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// This test ensures that the SSH session exits when the parent process dies.
|
||||
t.Run("StdioExitOnParentDeath", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
|
||||
defer cancel()
|
||||
|
||||
// sleepStart -> agentReady -> sessionStarted -> sleepKill -> sleepDone -> cmdDone
|
||||
sleepStart := make(chan int)
|
||||
agentReady := make(chan struct{})
|
||||
sessionStarted := make(chan struct{})
|
||||
sleepKill := make(chan struct{})
|
||||
sleepDone := make(chan struct{})
|
||||
|
||||
// Start a sleep process which we will pretend is the parent.
|
||||
go func() {
|
||||
sleepCmd := exec.Command("sleep", "infinity")
|
||||
if !assert.NoError(t, sleepCmd.Start(), "failed to start sleep command") {
|
||||
return
|
||||
}
|
||||
sleepStart <- sleepCmd.Process.Pid
|
||||
defer close(sleepDone)
|
||||
<-sleepKill
|
||||
sleepCmd.Process.Kill()
|
||||
_ = sleepCmd.Wait()
|
||||
}()
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t)
|
||||
go func() {
|
||||
defer close(agentReady)
|
||||
_ = agenttest.New(t, client.URL, agentToken)
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).WaitFor(coderdtest.AgentsReady)
|
||||
}()
|
||||
|
||||
clientOutput, clientInput := io.Pipe()
|
||||
serverOutput, serverInput := io.Pipe()
|
||||
defer func() {
|
||||
for _, c := range []io.Closer{clientOutput, clientInput, serverOutput, serverInput} {
|
||||
_ = c.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a connection to the agent once it's ready
|
||||
go func() {
|
||||
<-agentReady
|
||||
conn, channels, requests, err := ssh.NewClientConn(&testutil.ReaderWriterConn{
|
||||
Reader: serverOutput,
|
||||
Writer: clientInput,
|
||||
}, "", &ssh.ClientConfig{
|
||||
// #nosec
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
})
|
||||
if !assert.NoError(t, err, "failed to create SSH client connection") {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
sshClient := ssh.NewClient(conn, channels, requests)
|
||||
defer sshClient.Close()
|
||||
|
||||
session, err := sshClient.NewSession()
|
||||
if !assert.NoError(t, err, "failed to create SSH session") {
|
||||
return
|
||||
}
|
||||
close(sessionStarted)
|
||||
<-sleepDone
|
||||
// Ref: https://github.com/coder/internal/issues/1289
|
||||
// This may return either a nil error or io.EOF.
|
||||
// There is an inherent race here:
|
||||
// 1. Sleep process is killed -> sleepDone is closed.
|
||||
// 2. watchParentContext detects parent death, cancels context,
|
||||
// causing SSH session teardown.
|
||||
// 3. We receive from sleepDone and attempt to call session.Close()
|
||||
// Now either:
|
||||
// a. Session teardown completes before we call Close(), resulting in io.EOF
|
||||
// b. We call Close() first, resulting in a nil error.
|
||||
_ = session.Close()
|
||||
}()
|
||||
|
||||
// Wait for our "parent" process to start
|
||||
sleepPid := testutil.RequireReceive(ctx, t, sleepStart)
|
||||
// Wait for the agent to be ready
|
||||
testutil.SoftTryReceive(ctx, t, agentReady)
|
||||
inv, root := clitest.New(t, "ssh", "--stdio", workspace.Name, "--test.force-ppid", fmt.Sprintf("%d", sleepPid))
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdin = clientOutput
|
||||
inv.Stdout = serverInput
|
||||
inv.Stderr = io.Discard
|
||||
|
||||
// Start the command
|
||||
clitest.Start(t, inv.WithContext(ctx))
|
||||
|
||||
// Wait for a session to be established
|
||||
testutil.SoftTryReceive(ctx, t, sessionStarted)
|
||||
// Now kill the fake "parent"
|
||||
close(sleepKill)
|
||||
// The sleep process should exit
|
||||
testutil.SoftTryReceive(ctx, t, sleepDone)
|
||||
// And then the command should exit. This is tracked by clitest.Start.
|
||||
})
|
||||
|
||||
t.Run("ForwardAgent", func(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Test not supported on windows")
|
||||
|
||||
@@ -10,6 +10,7 @@ USAGE:
|
||||
SUBCOMMANDS:
|
||||
create Create a new organization.
|
||||
delete Delete an organization
|
||||
list List all organizations
|
||||
members Manage organization members
|
||||
roles Manage organization roles.
|
||||
settings Manage organization settings.
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder organizations list [flags]
|
||||
|
||||
List all organizations
|
||||
|
||||
Aliases: ls
|
||||
|
||||
List all organizations. Requires a role which grants ResourceOrganization:
|
||||
read.
|
||||
|
||||
OPTIONS:
|
||||
-c, --column [id|name|display name|icon|description|created at|updated at|default] (default: name,display name,id,default)
|
||||
Columns to display in table output.
|
||||
|
||||
-o, --output table|json (default: table)
|
||||
Output format.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
@@ -89,6 +89,7 @@ type Options struct {
|
||||
PublishWorkspaceAgentLogsUpdateFn func(ctx context.Context, workspaceAgentID uuid.UUID, msg agentsdk.LogsNotifyMessage)
|
||||
NetworkTelemetryHandler func(batch []*tailnetproto.TelemetryEvent)
|
||||
BoundaryUsageTracker *boundaryusage.Tracker
|
||||
LifecycleMetrics *LifecycleMetrics
|
||||
|
||||
AccessURL *url.URL
|
||||
AppHostname string
|
||||
@@ -170,6 +171,7 @@ func New(opts Options, workspace database.Workspace) *API {
|
||||
Database: opts.Database,
|
||||
Log: opts.Log,
|
||||
PublishWorkspaceUpdateFn: api.publishWorkspaceUpdate,
|
||||
Metrics: opts.LifecycleMetrics,
|
||||
}
|
||||
|
||||
api.AppsAPI = &AppsAPI{
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -31,7 +32,9 @@ type LifecycleAPI struct {
|
||||
Log slog.Logger
|
||||
PublishWorkspaceUpdateFn func(context.Context, *database.WorkspaceAgent, wspubsub.WorkspaceEventKind) error
|
||||
|
||||
TimeNowFn func() time.Time // defaults to dbtime.Now()
|
||||
TimeNowFn func() time.Time // defaults to dbtime.Now()
|
||||
Metrics *LifecycleMetrics
|
||||
emitMetricsOnce sync.Once
|
||||
}
|
||||
|
||||
func (a *LifecycleAPI) now() time.Time {
|
||||
@@ -125,6 +128,17 @@ func (a *LifecycleAPI) UpdateLifecycle(ctx context.Context, req *agentproto.Upda
|
||||
}
|
||||
}
|
||||
|
||||
// Emit build duration metric when agent transitions to a terminal startup state.
|
||||
// We only emit once per agent connection to avoid duplicate metrics.
|
||||
switch lifecycleState {
|
||||
case database.WorkspaceAgentLifecycleStateReady,
|
||||
database.WorkspaceAgentLifecycleStateStartTimeout,
|
||||
database.WorkspaceAgentLifecycleStateStartError:
|
||||
a.emitMetricsOnce.Do(func() {
|
||||
a.emitBuildDurationMetric(ctx, workspaceAgent.ResourceID)
|
||||
})
|
||||
}
|
||||
|
||||
return req.Lifecycle, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentapi"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmock"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
@@ -22,6 +24,10 @@ import (
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// fullMetricName is the fully-qualified Prometheus metric name
|
||||
// (namespace + name) used for gathering in tests.
|
||||
const fullMetricName = "coderd_" + agentapi.BuildDurationMetricName
|
||||
|
||||
func TestUpdateLifecycle(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -30,6 +36,12 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
someTime = dbtime.Time(someTime)
|
||||
now := dbtime.Now()
|
||||
|
||||
// Fixed times for build duration metric assertions.
|
||||
// The expected duration is exactly 90 seconds.
|
||||
buildCreatedAt := dbtime.Time(time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC))
|
||||
agentReadyAt := dbtime.Time(time.Date(2025, 1, 1, 0, 1, 30, 0, time.UTC))
|
||||
expectedDuration := agentReadyAt.Sub(buildCreatedAt).Seconds() // 90.0
|
||||
|
||||
var (
|
||||
workspaceID = uuid.New()
|
||||
agentCreated = database.WorkspaceAgent{
|
||||
@@ -105,6 +117,19 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
Valid: true,
|
||||
},
|
||||
}).Return(nil)
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
@@ -113,6 +138,7 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
// Test that nil publish fn works.
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
}
|
||||
@@ -122,6 +148,16 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "false",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
|
||||
// This test jumps from CREATING to READY, skipping STARTED. Both the
|
||||
@@ -147,8 +183,21 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
Valid: true,
|
||||
},
|
||||
}).Return(nil)
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentCreated.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
publishCalled := false
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agentCreated, nil
|
||||
@@ -156,6 +205,7 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: func(ctx context.Context, agent *database.WorkspaceAgent, kind wspubsub.WorkspaceEventKind) error {
|
||||
publishCalled = true
|
||||
return nil
|
||||
@@ -168,6 +218,16 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
require.True(t, publishCalled)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "false",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
|
||||
t.Run("NoTimeSpecified", func(t *testing.T) {
|
||||
@@ -194,6 +254,19 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentCreated.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
@@ -202,6 +275,7 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
TimeNowFn: func() time.Time {
|
||||
return now
|
||||
@@ -213,6 +287,16 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "false",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
|
||||
t.Run("AllStates", func(t *testing.T) {
|
||||
@@ -228,6 +312,9 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
|
||||
var publishCalled int64
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agent, nil
|
||||
@@ -235,6 +322,7 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: func(ctx context.Context, agent *database.WorkspaceAgent, kind wspubsub.WorkspaceEventKind) error {
|
||||
atomic.AddInt64(&publishCalled, 1)
|
||||
return nil
|
||||
@@ -277,6 +365,20 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
ReadyAt: expectedReadyAt,
|
||||
}).Times(1).Return(nil)
|
||||
|
||||
// The first ready state triggers the build duration metric query.
|
||||
if state == agentproto.Lifecycle_READY || state == agentproto.Lifecycle_START_TIMEOUT || state == agentproto.Lifecycle_START_ERROR {
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agent.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: someTime,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: stateNow,
|
||||
WorstStatus: "success",
|
||||
}, nil).MaxTimes(1)
|
||||
}
|
||||
|
||||
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
|
||||
Lifecycle: lifecycle,
|
||||
})
|
||||
@@ -322,6 +424,164 @@ func TestUpdateLifecycle(t *testing.T) {
|
||||
require.Nil(t, resp)
|
||||
require.False(t, publishCalled)
|
||||
})
|
||||
|
||||
// Test that metric is NOT emitted when not all agents are ready (multi-agent case).
|
||||
t.Run("MetricNotEmittedWhenNotAllAgentsReady", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
lifecycle := &agentproto.Lifecycle{
|
||||
State: agentproto.Lifecycle_READY,
|
||||
ChangedAt: timestamppb.New(now),
|
||||
}
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
|
||||
// Return AllAgentsReady = false to simulate multi-agent case where not all are ready.
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: someTime,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: false, // Not all agents ready yet
|
||||
LastAgentReadyAt: time.Time{}, // No ready time yet
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agentStarting, nil
|
||||
},
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
}
|
||||
|
||||
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
|
||||
Lifecycle: lifecycle,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
require.Nil(t, promhelp.MetricValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "false",
|
||||
}), "metric should not be emitted when not all agents are ready")
|
||||
})
|
||||
|
||||
// Test that prebuild label is "true" when owner is prebuild system user.
|
||||
t.Run("PrebuildLabelTrue", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
lifecycle := &agentproto.Lifecycle{
|
||||
State: agentproto.Lifecycle_READY,
|
||||
ChangedAt: timestamppb.New(now),
|
||||
}
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: true, // Prebuild workspace
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "success",
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agentStarting, nil
|
||||
},
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
}
|
||||
|
||||
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
|
||||
Lifecycle: lifecycle,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "success",
|
||||
"is_prebuild": "true",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
|
||||
// Test worst status is used when one agent has an error.
|
||||
t.Run("WorstStatusError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
lifecycle := &agentproto.Lifecycle{
|
||||
State: agentproto.Lifecycle_READY,
|
||||
ChangedAt: timestamppb.New(now),
|
||||
}
|
||||
|
||||
dbM := dbmock.NewMockStore(gomock.NewController(t))
|
||||
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), gomock.Any()).Return(nil)
|
||||
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), agentStarting.ResourceID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{
|
||||
CreatedAt: buildCreatedAt,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
TemplateName: "test-template",
|
||||
OrganizationName: "test-org",
|
||||
IsPrebuild: false,
|
||||
AllAgentsReady: true,
|
||||
LastAgentReadyAt: agentReadyAt,
|
||||
WorstStatus: "error", // One agent had an error
|
||||
}, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := agentapi.NewLifecycleMetrics(reg)
|
||||
|
||||
api := &agentapi.LifecycleAPI{
|
||||
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
|
||||
return agentStarting, nil
|
||||
},
|
||||
WorkspaceID: workspaceID,
|
||||
Database: dbM,
|
||||
Log: testutil.Logger(t),
|
||||
Metrics: metrics,
|
||||
PublishWorkspaceUpdateFn: nil,
|
||||
}
|
||||
|
||||
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
|
||||
Lifecycle: lifecycle,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lifecycle, resp)
|
||||
|
||||
got := promhelp.HistogramValue(t, reg, fullMetricName, prometheus.Labels{
|
||||
"template_name": "test-template",
|
||||
"organization_name": "test-org",
|
||||
"transition": "start",
|
||||
"status": "error",
|
||||
"is_prebuild": "false",
|
||||
})
|
||||
require.Equal(t, uint64(1), got.GetSampleCount())
|
||||
require.Equal(t, expectedDuration, got.GetSampleSum())
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateStartup(t *testing.T) {
|
||||
|
||||
@@ -0,0 +1,97 @@
|
||||
package agentapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
// BuildDurationMetricName is the short name for the end-to-end
|
||||
// workspace build duration histogram. The full metric name is
|
||||
// prefixed with the namespace "coderd_".
|
||||
const BuildDurationMetricName = "template_workspace_build_duration_seconds"
|
||||
|
||||
// LifecycleMetrics contains Prometheus metrics for the lifecycle API.
|
||||
type LifecycleMetrics struct {
|
||||
BuildDuration *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
// NewLifecycleMetrics creates and registers all lifecycle-related
|
||||
// Prometheus metrics.
|
||||
//
|
||||
// The build duration histogram tracks the end-to-end duration from
|
||||
// workspace build creation to agent ready, by template. It is
|
||||
// recorded by the coderd replica handling the agent's connection
|
||||
// when the last agent reports ready. In multi-replica deployments,
|
||||
// each replica only has observations for agents it handles.
|
||||
//
|
||||
// The "is_prebuild" label distinguishes prebuild creation (background,
|
||||
// no user waiting) from user-initiated builds (regular workspace
|
||||
// creation or prebuild claims).
|
||||
func NewLifecycleMetrics(reg prometheus.Registerer) *LifecycleMetrics {
|
||||
m := &LifecycleMetrics{
|
||||
BuildDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "coderd",
|
||||
Name: BuildDurationMetricName,
|
||||
Help: "Duration from workspace build creation to agent ready, by template.",
|
||||
Buckets: []float64{
|
||||
1, // 1s
|
||||
10,
|
||||
30,
|
||||
60, // 1min
|
||||
60 * 5,
|
||||
60 * 10,
|
||||
60 * 30, // 30min
|
||||
60 * 60, // 1hr
|
||||
},
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: time.Hour,
|
||||
}, []string{"template_name", "organization_name", "transition", "status", "is_prebuild"}),
|
||||
}
|
||||
reg.MustRegister(m.BuildDuration)
|
||||
return m
|
||||
}
|
||||
|
||||
// emitBuildDurationMetric records the end-to-end workspace build
|
||||
// duration from build creation to when all agents are ready.
|
||||
func (a *LifecycleAPI) emitBuildDurationMetric(ctx context.Context, resourceID uuid.UUID) {
|
||||
if a.Metrics == nil {
|
||||
return
|
||||
}
|
||||
|
||||
buildInfo, err := a.Database.GetWorkspaceBuildMetricsByResourceID(ctx, resourceID)
|
||||
if err != nil {
|
||||
a.Log.Warn(ctx, "failed to get build info for metrics", slog.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
// Wait until all agents have reached a terminal startup state.
|
||||
if !buildInfo.AllAgentsReady {
|
||||
return
|
||||
}
|
||||
|
||||
// LastAgentReadyAt is the MAX(ready_at) across all agents. Since
|
||||
// we only get here when AllAgentsReady is true, this should always
|
||||
// be valid.
|
||||
if buildInfo.LastAgentReadyAt.IsZero() {
|
||||
a.Log.Warn(ctx, "last_agent_ready_at is unexpectedly zero",
|
||||
slog.F("last_agent_ready_at", buildInfo.LastAgentReadyAt))
|
||||
return
|
||||
}
|
||||
|
||||
duration := buildInfo.LastAgentReadyAt.Sub(buildInfo.CreatedAt).Seconds()
|
||||
|
||||
a.Metrics.BuildDuration.WithLabelValues(
|
||||
buildInfo.TemplateName,
|
||||
buildInfo.OrganizationName,
|
||||
string(buildInfo.Transition),
|
||||
buildInfo.WorstStatus,
|
||||
strconv.FormatBool(buildInfo.IsPrebuild),
|
||||
).Observe(duration)
|
||||
}
|
||||
+81
-4
@@ -977,10 +977,27 @@ func (api *API) authAndDoWithTaskAppClient(
|
||||
ctx := r.Context()
|
||||
|
||||
if task.Status != database.TaskStatusActive {
|
||||
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Task status must be active.",
|
||||
Detail: fmt.Sprintf("Task status is %q, it must be %q to interact with the task.", task.Status, codersdk.TaskStatusActive),
|
||||
})
|
||||
// Return 409 Conflict for valid requests blocked by current state
|
||||
// (pending/initializing are transitional, paused requires resume).
|
||||
// Return 400 Bad Request for error/unknown states.
|
||||
switch task.Status {
|
||||
case database.TaskStatusPending, database.TaskStatusInitializing:
|
||||
return httperror.NewResponseError(http.StatusConflict, codersdk.Response{
|
||||
Message: fmt.Sprintf("Task is %s.", task.Status),
|
||||
Detail: "The task is resuming. Wait for the task to become active before sending messages.",
|
||||
})
|
||||
case database.TaskStatusPaused:
|
||||
return httperror.NewResponseError(http.StatusConflict, codersdk.Response{
|
||||
Message: "Task is paused.",
|
||||
Detail: "Resume the task to send messages.",
|
||||
})
|
||||
default:
|
||||
// Default handler for error and unknown status.
|
||||
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Task must be active.",
|
||||
Detail: fmt.Sprintf("Task status is %q, it must be %q to interact with the task.", task.Status, codersdk.TaskStatusActive),
|
||||
})
|
||||
}
|
||||
}
|
||||
if !task.WorkspaceID.Valid {
|
||||
return httperror.NewResponseError(http.StatusBadRequest, codersdk.Response{
|
||||
@@ -1227,3 +1244,63 @@ func (api *API) postWorkspaceAgentTaskLogSnapshot(rw http.ResponseWriter, r *htt
|
||||
|
||||
rw.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// @Summary Pause task
|
||||
// @ID pause-task
|
||||
// @Security CoderSessionToken
|
||||
// @Accept json
|
||||
// @Tags Tasks
|
||||
// @Param user path string true "Username, user ID, or 'me' for the authenticated user"
|
||||
// @Param task path string true "Task ID" format(uuid)
|
||||
// @Success 202 {object} codersdk.PauseTaskResponse
|
||||
// @Router /tasks/{user}/{task}/pause [post]
|
||||
func (api *API) pauseTask(rw http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
apiKey = httpmw.APIKey(r)
|
||||
task = httpmw.TaskParam(r)
|
||||
)
|
||||
|
||||
if !task.WorkspaceID.Valid {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Task does not have a workspace.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
workspace, err := api.Database.GetWorkspaceByID(ctx, task.WorkspaceID.UUID)
|
||||
if err != nil {
|
||||
if httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching task workspace.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
buildReq := codersdk.CreateWorkspaceBuildRequest{
|
||||
Transition: codersdk.WorkspaceTransitionStop,
|
||||
Reason: codersdk.CreateWorkspaceBuildReasonTaskManualPause,
|
||||
}
|
||||
build, err := api.postWorkspaceBuildsInternal(
|
||||
ctx,
|
||||
apiKey,
|
||||
workspace,
|
||||
buildReq,
|
||||
func(action policy.Action, object rbac.Objecter) bool {
|
||||
return api.Authorize(r, action, object)
|
||||
},
|
||||
audit.WorkspaceBuildBaggageFromRequest(r),
|
||||
)
|
||||
if err != nil {
|
||||
httperror.WriteWorkspaceBuildError(ctx, rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusAccepted, codersdk.PauseTaskResponse{
|
||||
WorkspaceBuild: &build,
|
||||
})
|
||||
}
|
||||
|
||||
+654
-62
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
agentapisdk "github.com/coder/agentapi-sdk-go"
|
||||
"github.com/coder/coder/v2/agent"
|
||||
@@ -26,10 +27,14 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/notifications/notificationstest"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
@@ -39,6 +44,96 @@ import (
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
// createTaskInState is a helper to create a task in the desired state.
|
||||
// It returns a function that takes context, test, and status, and returns the task ID.
|
||||
// The caller is responsible for setting up the database, owner, and user.
|
||||
func createTaskInState(db database.Store, ownerSubject rbac.Subject, ownerOrgID, userID uuid.UUID) func(context.Context, *testing.T, database.TaskStatus) uuid.UUID {
|
||||
return func(ctx context.Context, t *testing.T, status database.TaskStatus) uuid.UUID {
|
||||
ctx = dbauthz.As(ctx, ownerSubject)
|
||||
|
||||
builder := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: ownerOrgID,
|
||||
OwnerID: userID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
OrganizationID: ownerOrgID,
|
||||
OwnerID: userID,
|
||||
}, nil)
|
||||
|
||||
switch status {
|
||||
case database.TaskStatusPending:
|
||||
builder = builder.Pending()
|
||||
case database.TaskStatusInitializing:
|
||||
builder = builder.Starting()
|
||||
case database.TaskStatusPaused:
|
||||
builder = builder.Seed(database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStop,
|
||||
})
|
||||
case database.TaskStatusError:
|
||||
// For error state, create a completed build then manipulate app health.
|
||||
default:
|
||||
require.Fail(t, "unsupported task status in test helper", "status: %s", status)
|
||||
}
|
||||
|
||||
resp := builder.Do()
|
||||
taskID := resp.Task.ID
|
||||
|
||||
// Post-process by manipulating agent and app state.
|
||||
if status == database.TaskStatusError {
|
||||
// First, set agent to ready state so agent_status returns 'active'.
|
||||
// This ensures the cascade reaches app_status.
|
||||
err := db.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
|
||||
ID: resp.Agents[0].ID,
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then set workspace app health to unhealthy to trigger error state.
|
||||
apps, err := db.GetWorkspaceAppsByAgentID(ctx, resp.Agents[0].ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, apps, 1, "expected exactly one app for task")
|
||||
|
||||
err = db.UpdateWorkspaceAppHealthByID(ctx, database.UpdateWorkspaceAppHealthByIDParams{
|
||||
ID: apps[0].ID,
|
||||
Health: database.WorkspaceAppHealthUnhealthy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return taskID
|
||||
}
|
||||
}
|
||||
|
||||
type aiTaskStoreWrapper struct {
|
||||
database.Store
|
||||
getWorkspaceByID func(ctx context.Context, id uuid.UUID) (database.Workspace, error)
|
||||
insertWorkspaceBuild func(ctx context.Context, arg database.InsertWorkspaceBuildParams) error
|
||||
}
|
||||
|
||||
func (s aiTaskStoreWrapper) GetWorkspaceByID(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
if s.getWorkspaceByID != nil {
|
||||
return s.getWorkspaceByID(ctx, id)
|
||||
}
|
||||
return s.Store.GetWorkspaceByID(ctx, id)
|
||||
}
|
||||
|
||||
func (s aiTaskStoreWrapper) InsertWorkspaceBuild(ctx context.Context, arg database.InsertWorkspaceBuildParams) error {
|
||||
if s.insertWorkspaceBuild != nil {
|
||||
return s.insertWorkspaceBuild(ctx, arg)
|
||||
}
|
||||
return s.Store.InsertWorkspaceBuild(ctx, arg)
|
||||
}
|
||||
|
||||
func (s aiTaskStoreWrapper) InTx(fn func(database.Store) error, opts *database.TxOptions) error {
|
||||
return s.Store.InTx(func(tx database.Store) error {
|
||||
return fn(aiTaskStoreWrapper{
|
||||
Store: tx,
|
||||
getWorkspaceByID: s.getWorkspaceByID,
|
||||
insertWorkspaceBuild: s.insertWorkspaceBuild,
|
||||
})
|
||||
}, opts)
|
||||
}
|
||||
|
||||
func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -398,6 +493,144 @@ func TestTasks(t *testing.T) {
|
||||
require.NoError(t, err, "should be possible to delete a task with no workspace")
|
||||
})
|
||||
|
||||
t.Run("SnapshotCleanupOnDeletion", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
template := createAITemplate(t, client, user)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
userObj, err := client.User(ctx, user.UserID.String())
|
||||
require.NoError(t, err)
|
||||
userSubject := coderdtest.AuthzUserSubject(userObj)
|
||||
|
||||
task, err := client.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "delete me with snapshot",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
// Create a snapshot for the task.
|
||||
snapshotJSON := `{"format":"agentapi","data":{"messages":[{"role":"user","content":"test"}]}}`
|
||||
err = db.UpsertTaskSnapshot(dbauthz.As(ctx, userSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: task.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSON),
|
||||
LogSnapshotCreatedAt: dbtime.Now(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify snapshot exists.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), task.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete the task.
|
||||
err = client.DeleteTask(ctx, "me", task.ID)
|
||||
require.NoError(t, err, "delete task request should be accepted")
|
||||
|
||||
// Verify snapshot no longer exists.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), task.ID)
|
||||
require.ErrorIs(t, err, sql.ErrNoRows, "snapshot should be deleted with task")
|
||||
})
|
||||
|
||||
t.Run("DeletionWithoutSnapshot", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
template := createAITemplate(t, client, user)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
userObj, err := client.User(ctx, user.UserID.String())
|
||||
require.NoError(t, err)
|
||||
userSubject := coderdtest.AuthzUserSubject(userObj)
|
||||
|
||||
task, err := client.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "delete me without snapshot",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
|
||||
|
||||
// Verify no snapshot exists.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), task.ID)
|
||||
require.ErrorIs(t, err, sql.ErrNoRows, "snapshot should not exist initially")
|
||||
|
||||
// Delete the task (should succeed even without snapshot).
|
||||
err = client.DeleteTask(ctx, "me", task.ID)
|
||||
require.NoError(t, err, "delete task should succeed even without snapshot")
|
||||
})
|
||||
|
||||
t.Run("PreservesOtherTaskSnapshots", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
template := createAITemplate(t, client, user)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
userObj, err := client.User(ctx, user.UserID.String())
|
||||
require.NoError(t, err)
|
||||
userSubject := coderdtest.AuthzUserSubject(userObj)
|
||||
|
||||
// Create task A.
|
||||
taskA, err := client.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "task A",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wsA, err := client.Workspace(ctx, taskA.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, wsA.LatestBuild.ID)
|
||||
|
||||
// Create task B.
|
||||
taskB, err := client.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "task B",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wsB, err := client.Workspace(ctx, taskB.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, wsB.LatestBuild.ID)
|
||||
|
||||
// Create snapshots for both tasks.
|
||||
snapshotJSONA := `{"format":"agentapi","data":{"messages":[{"role":"user","content":"task A"}]}}`
|
||||
err = db.UpsertTaskSnapshot(dbauthz.As(ctx, userSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskA.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSONA),
|
||||
LogSnapshotCreatedAt: dbtime.Now(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
snapshotJSONB := `{"format":"agentapi","data":{"messages":[{"role":"user","content":"task B"}]}}`
|
||||
err = db.UpsertTaskSnapshot(dbauthz.As(ctx, userSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskB.ID,
|
||||
LogSnapshot: json.RawMessage(snapshotJSONB),
|
||||
LogSnapshotCreatedAt: dbtime.Now(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete task A.
|
||||
err = client.DeleteTask(ctx, "me", taskA.ID)
|
||||
require.NoError(t, err, "delete task A should succeed")
|
||||
|
||||
// Verify task A's snapshot is removed.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), taskA.ID)
|
||||
require.ErrorIs(t, err, sql.ErrNoRows, "task A snapshot should be deleted")
|
||||
|
||||
// Verify task B's snapshot still exists.
|
||||
_, err = db.GetTaskSnapshot(dbauthz.As(ctx, userSubject), taskB.ID)
|
||||
require.NoError(t, err, "task B snapshot should still exist")
|
||||
})
|
||||
|
||||
t.Run("DeletingTaskWorkspaceDeletesTask", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -591,6 +824,94 @@ func TestTasks(t *testing.T) {
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("SendToNonActiveStates", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
|
||||
ownerUser, err := client.User(ctx, owner.UserID.String())
|
||||
require.NoError(t, err)
|
||||
ownerSubject := coderdtest.AuthzUserSubject(ownerUser)
|
||||
|
||||
// Create a regular user for task ownership.
|
||||
_, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
createTask := createTaskInState(db, ownerSubject, owner.OrganizationID, user.ID)
|
||||
|
||||
t.Run("Paused", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPaused)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusConflict, sdkErr.StatusCode())
|
||||
require.Contains(t, sdkErr.Message, "paused")
|
||||
require.Contains(t, sdkErr.Detail, "Resume")
|
||||
})
|
||||
|
||||
t.Run("Initializing", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusInitializing)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusConflict, sdkErr.StatusCode())
|
||||
require.Contains(t, sdkErr.Message, "initializing")
|
||||
require.Contains(t, sdkErr.Detail, "resuming")
|
||||
})
|
||||
|
||||
t.Run("Pending", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusConflict, sdkErr.StatusCode())
|
||||
require.Contains(t, sdkErr.Message, "pending")
|
||||
require.Contains(t, sdkErr.Detail, "resuming")
|
||||
})
|
||||
|
||||
t.Run("Error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTask(ctx, t, database.TaskStatusError)
|
||||
|
||||
err := client.TaskSend(ctx, "me", taskID, codersdk.TaskSendRequest{
|
||||
Input: "Hello",
|
||||
})
|
||||
|
||||
var sdkErr *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAs(t, err, &sdkErr)
|
||||
require.Equal(t, http.StatusBadRequest, sdkErr.StatusCode())
|
||||
require.Contains(t, sdkErr.Message, "must be active")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Logs", func(t *testing.T) {
|
||||
@@ -737,61 +1058,7 @@ func TestTasks(t *testing.T) {
|
||||
// Create a regular user to test snapshot access.
|
||||
client, user := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID)
|
||||
|
||||
// Helper to create a task in the desired state.
|
||||
createTaskInState := func(ctx context.Context, t *testing.T, status database.TaskStatus) uuid.UUID {
|
||||
ctx = dbauthz.As(ctx, ownerSubject)
|
||||
|
||||
builder := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: user.ID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: user.ID,
|
||||
}, nil)
|
||||
|
||||
switch status {
|
||||
case database.TaskStatusPending:
|
||||
builder = builder.Pending()
|
||||
case database.TaskStatusInitializing:
|
||||
builder = builder.Starting()
|
||||
case database.TaskStatusPaused:
|
||||
builder = builder.Seed(database.WorkspaceBuild{
|
||||
Transition: database.WorkspaceTransitionStop,
|
||||
})
|
||||
case database.TaskStatusError:
|
||||
// For error state, create a completed build then manipulate app health.
|
||||
default:
|
||||
require.Fail(t, "unsupported task status in test helper", "status: %s", status)
|
||||
}
|
||||
|
||||
resp := builder.Do()
|
||||
taskID := resp.Task.ID
|
||||
|
||||
// Post-process by manipulating agent and app state.
|
||||
if status == database.TaskStatusError {
|
||||
// First, set agent to ready state so agent_status returns 'active'.
|
||||
// This ensures the cascade reaches app_status.
|
||||
err := db.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
|
||||
ID: resp.Agents[0].ID,
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then set workspace app health to unhealthy to trigger error state.
|
||||
apps, err := db.GetWorkspaceAppsByAgentID(ctx, resp.Agents[0].ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, apps, 1, "expected exactly one app for task")
|
||||
|
||||
err = db.UpdateWorkspaceAppHealthByID(ctx, database.UpdateWorkspaceAppHealthByIDParams{
|
||||
ID: apps[0].ID,
|
||||
Health: database.WorkspaceAppHealthUnhealthy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return taskID
|
||||
}
|
||||
createTask := createTaskInState(db, ownerSubject, owner.OrganizationID, user.ID)
|
||||
|
||||
// Prepare snapshot data used across tests.
|
||||
snapshotMessages := []agentapisdk.Message{
|
||||
@@ -853,7 +1120,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
@@ -871,7 +1138,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusInitializing)
|
||||
taskID := createTask(ctx, t, database.TaskStatusInitializing)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
@@ -889,7 +1156,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPaused)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPaused)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
@@ -907,7 +1174,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
logsResp, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.NoError(t, err)
|
||||
@@ -921,7 +1188,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
invalidEnvelope := coderd.TaskLogSnapshotEnvelope{
|
||||
Format: "unknown-format",
|
||||
@@ -950,7 +1217,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusPending)
|
||||
taskID := createTask(ctx, t, database.TaskStatusPending)
|
||||
|
||||
err := db.UpsertTaskSnapshot(dbauthz.As(ctx, ownerSubject), database.UpsertTaskSnapshotParams{
|
||||
TaskID: taskID,
|
||||
@@ -971,7 +1238,7 @@ func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
taskID := createTaskInState(ctx, t, database.TaskStatusError)
|
||||
taskID := createTask(ctx, t, database.TaskStatusError)
|
||||
|
||||
_, err := client.TaskLogs(ctx, "me", taskID)
|
||||
require.Error(t, err)
|
||||
@@ -2189,3 +2456,328 @@ func TestPostWorkspaceAgentTaskSnapshot(t *testing.T) {
|
||||
require.Equal(t, http.StatusUnauthorized, res.StatusCode)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPauseTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupClient := func(t *testing.T, db database.Store, ps pubsub.Pubsub, authorizer rbac.Authorizer) *codersdk.Client {
|
||||
t.Helper()
|
||||
client, _, _ := coderdtest.NewWithAPI(t, &coderdtest.Options{
|
||||
Database: db,
|
||||
Pubsub: ps,
|
||||
Authorizer: authorizer,
|
||||
})
|
||||
return client
|
||||
}
|
||||
|
||||
setupWorkspaceTask := func(t *testing.T, db database.Store, user codersdk.CreateFirstUserResponse) (database.Task, uuid.UUID) {
|
||||
t.Helper()
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithTask(database.TaskTable{
|
||||
Prompt: "pause me",
|
||||
}, nil).Do()
|
||||
return workspaceBuild.Task, workspaceBuild.Workspace.ID
|
||||
}
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionGraph: []*proto.Response{
|
||||
{Type: &proto.Response_Graph{Graph: &proto.GraphComplete{
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
task, err := client.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "pause me",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, task.WorkspaceID.Valid)
|
||||
|
||||
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
resp, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
require.NoError(t, err)
|
||||
build := *resp.WorkspaceBuild
|
||||
require.NotNil(t, build)
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStop, build.Transition)
|
||||
require.Equal(t, task.WorkspaceID.UUID, build.WorkspaceID)
|
||||
require.Equal(t, workspace.LatestBuild.BuildNumber+1, build.BuildNumber)
|
||||
require.Equal(t, string(codersdk.CreateWorkspaceBuildReasonTaskManualPause), string(build.Reason))
|
||||
})
|
||||
|
||||
t.Run("Non-owner role access", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
roles []rbac.RoleIdentifier
|
||||
expectedStatus int
|
||||
}{
|
||||
{
|
||||
name: "org_member",
|
||||
expectedStatus: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
name: "org_admin",
|
||||
roles: []rbac.RoleIdentifier{rbac.ScopedRoleOrgAdmin(owner.OrganizationID)},
|
||||
expectedStatus: http.StatusAccepted,
|
||||
},
|
||||
{
|
||||
name: "sitewide_member",
|
||||
roles: []rbac.RoleIdentifier{rbac.RoleMember()},
|
||||
expectedStatus: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
name: "sitewide_admin",
|
||||
roles: []rbac.RoleIdentifier{rbac.RoleOwner()},
|
||||
expectedStatus: http.StatusAccepted,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
task, _ := setupWorkspaceTask(t, db, owner)
|
||||
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, tc.roles...)
|
||||
|
||||
resp, err := userClient.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
if tc.expectedStatus == http.StatusAccepted {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp.WorkspaceBuild)
|
||||
require.NotEqual(t, uuid.Nil, resp.WorkspaceBuild.ID)
|
||||
return
|
||||
}
|
||||
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, tc.expectedStatus, apiErr.StatusCode())
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Task not found", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, uuid.New())
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Task lookup forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionRead && object.Type == rbac.ResourceTask.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Workspace lookup forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionRead && object.Type == rbac.ResourceWorkspace.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("No Workspace for Task", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).Do()
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
TemplateVersionID: workspaceBuild.Build.TemplateVersionID,
|
||||
Prompt: "no workspace",
|
||||
})
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
require.Equal(t, "Task does not have a workspace.", apiErr.Message)
|
||||
})
|
||||
|
||||
t.Run("Workspace not found", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
var workspaceID uuid.UUID
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
getWorkspaceByID: func(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
if id == workspaceID && id != uuid.Nil {
|
||||
return database.Workspace{}, sql.ErrNoRows
|
||||
}
|
||||
return db.GetWorkspaceByID(ctx, id)
|
||||
},
|
||||
}
|
||||
client := setupClient(t, wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, workspaceIDValue := setupWorkspaceTask(t, db, user)
|
||||
workspaceID = workspaceIDValue
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Workspace lookup internal error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
var workspaceID uuid.UUID
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
getWorkspaceByID: func(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
if id == workspaceID && id != uuid.Nil {
|
||||
return database.Workspace{}, xerrors.New("boom")
|
||||
}
|
||||
return db.GetWorkspaceByID(ctx, id)
|
||||
},
|
||||
}
|
||||
client := setupClient(t, wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, workspaceIDValue := setupWorkspaceTask(t, db, user)
|
||||
workspaceID = workspaceIDValue
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
require.Equal(t, "Internal error fetching task workspace.", apiErr.Message)
|
||||
})
|
||||
|
||||
t.Run("Build Forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionWorkspaceStop && object.Type == rbac.ResourceWorkspace.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusForbidden, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Job already in progress", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: "pause me",
|
||||
}, nil).
|
||||
Starting().
|
||||
Do()
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, workspaceBuild.Task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusConflict, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Build Internal Error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
insertWorkspaceBuild: func(ctx context.Context, arg database.InsertWorkspaceBuildParams) error {
|
||||
return xerrors.New("insert failed")
|
||||
},
|
||||
}
|
||||
client := setupClient(t, wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
Generated
+98
-11
@@ -5824,6 +5824,48 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/pause": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Tasks"
|
||||
],
|
||||
"summary": "Pause task",
|
||||
"operationId": "pause-task",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Username, user ID, or 'me' for the authenticated user",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"202": {
|
||||
"description": "Accepted",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.PauseTaskResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/send": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -14102,14 +14144,16 @@ const docTemplate = `{
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_manual_pause"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"CreateWorkspaceBuildReasonDashboard",
|
||||
"CreateWorkspaceBuildReasonCLI",
|
||||
"CreateWorkspaceBuildReasonSSHConnection",
|
||||
"CreateWorkspaceBuildReasonVSCodeConnection",
|
||||
"CreateWorkspaceBuildReasonJetbrainsConnection"
|
||||
"CreateWorkspaceBuildReasonJetbrainsConnection",
|
||||
"CreateWorkspaceBuildReasonTaskManualPause"
|
||||
]
|
||||
},
|
||||
"codersdk.CreateWorkspaceBuildRequest": {
|
||||
@@ -14143,7 +14187,8 @@ const docTemplate = `{
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_manual_pause"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
@@ -14901,6 +14946,16 @@ const docTemplate = `{
|
||||
"ExperimentWorkspaceSharing": "Enables updating workspace ACLs for sharing with users and groups.",
|
||||
"ExperimentWorkspaceUsage": "Enables the new workspace usage tracking."
|
||||
},
|
||||
"x-enum-descriptions": [
|
||||
"This isn't used for anything.",
|
||||
"This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"Sends notifications via SMTP and webhooks following certain events.",
|
||||
"Enables the new workspace usage tracking.",
|
||||
"Enables web push notifications through the browser.",
|
||||
"Enables OAuth2 provider functionality.",
|
||||
"Enables the MCP HTTP server functionality.",
|
||||
"Enables updating workspace ACLs for sharing with users and groups."
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"ExperimentExample",
|
||||
"ExperimentAutoFillParameters",
|
||||
@@ -17004,6 +17059,14 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.PauseTaskResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"workspace_build": {
|
||||
"$ref": "#/definitions/codersdk.WorkspaceBuild"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.Permission": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -18813,6 +18876,10 @@ const docTemplate = `{
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"disable_module_cache": {
|
||||
"description": "DisableModuleCache disables the use of cached Terraform modules during\nprovisioning.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"display_name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -19769,6 +19836,10 @@ const docTemplate = `{
|
||||
"description": "DisableEveryoneGroupAccess allows optionally disabling the default\nbehavior of granting the 'everyone' group access to use the template.\nIf this is set to true, the template will not be available to all users,\nand must be explicitly granted to users or groups in the permissions settings\nof the template.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"disable_module_cache": {
|
||||
"description": "DisableModuleCache disables the using of cached Terraform modules during\nprovisioning. It is recommended not to disable this.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"display_name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -20819,6 +20890,14 @@ const docTemplate = `{
|
||||
}
|
||||
]
|
||||
},
|
||||
"subagent_id": {
|
||||
"format": "uuid",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/uuid.NullUUID"
|
||||
}
|
||||
]
|
||||
},
|
||||
"workspace_folder": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -21487,10 +21566,12 @@ const docTemplate = `{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"p50": {
|
||||
"type": "number"
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
},
|
||||
"p95": {
|
||||
"type": "number"
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -21876,10 +21957,12 @@ const docTemplate = `{
|
||||
]
|
||||
},
|
||||
"recv": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"sent": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -22506,21 +22589,24 @@ const docTemplate = `{
|
||||
"description": "keyed by DERP Region ID",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
},
|
||||
"regionV4Latency": {
|
||||
"description": "keyed by DERP Region ID",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
},
|
||||
"regionV6Latency": {
|
||||
"description": "keyed by DERP Region ID",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
},
|
||||
"udp": {
|
||||
@@ -22763,7 +22849,8 @@ const docTemplate = `{
|
||||
"description": "RegionScore scales latencies of DERP regions by a given scaling\nfactor when determining which region to use as the home\n(\"preferred\") DERP. Scores in the range (0, 1) will cause this\nregion to be proportionally more preferred, and scores in the range\n(1, ∞) will penalize a region.\n\nIf a region is not present in this map, it is treated as having a\nscore of 1.0.\n\nScores should not be 0 or negative; such scores will be ignored.\n\nA nil map means no change from the previous value (if any); an empty\nnon-nil map can be sent to reset all scores back to 1.0.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "number"
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Generated
+94
-11
@@ -5147,6 +5147,44 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/pause": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": ["application/json"],
|
||||
"tags": ["Tasks"],
|
||||
"summary": "Pause task",
|
||||
"operationId": "pause-task",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Username, user ID, or 'me' for the authenticated user",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"202": {
|
||||
"description": "Accepted",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.PauseTaskResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/send": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -12662,14 +12700,16 @@
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_manual_pause"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"CreateWorkspaceBuildReasonDashboard",
|
||||
"CreateWorkspaceBuildReasonCLI",
|
||||
"CreateWorkspaceBuildReasonSSHConnection",
|
||||
"CreateWorkspaceBuildReasonVSCodeConnection",
|
||||
"CreateWorkspaceBuildReasonJetbrainsConnection"
|
||||
"CreateWorkspaceBuildReasonJetbrainsConnection",
|
||||
"CreateWorkspaceBuildReasonTaskManualPause"
|
||||
]
|
||||
},
|
||||
"codersdk.CreateWorkspaceBuildRequest": {
|
||||
@@ -12699,7 +12739,8 @@
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_manual_pause"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
@@ -13442,6 +13483,16 @@
|
||||
"ExperimentWorkspaceSharing": "Enables updating workspace ACLs for sharing with users and groups.",
|
||||
"ExperimentWorkspaceUsage": "Enables the new workspace usage tracking."
|
||||
},
|
||||
"x-enum-descriptions": [
|
||||
"This isn't used for anything.",
|
||||
"This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"Sends notifications via SMTP and webhooks following certain events.",
|
||||
"Enables the new workspace usage tracking.",
|
||||
"Enables web push notifications through the browser.",
|
||||
"Enables OAuth2 provider functionality.",
|
||||
"Enables the MCP HTTP server functionality.",
|
||||
"Enables updating workspace ACLs for sharing with users and groups."
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"ExperimentExample",
|
||||
"ExperimentAutoFillParameters",
|
||||
@@ -15467,6 +15518,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.PauseTaskResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"workspace_build": {
|
||||
"$ref": "#/definitions/codersdk.WorkspaceBuild"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.Permission": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -17208,6 +17267,10 @@
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"disable_module_cache": {
|
||||
"description": "DisableModuleCache disables the use of cached Terraform modules during\nprovisioning.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"display_name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -18118,6 +18181,10 @@
|
||||
"description": "DisableEveryoneGroupAccess allows optionally disabling the default\nbehavior of granting the 'everyone' group access to use the template.\nIf this is set to true, the template will not be available to all users,\nand must be explicitly granted to users or groups in the permissions settings\nof the template.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"disable_module_cache": {
|
||||
"description": "DisableModuleCache disables the using of cached Terraform modules during\nprovisioning. It is recommended not to disable this.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"display_name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -19123,6 +19190,14 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"subagent_id": {
|
||||
"format": "uuid",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/uuid.NullUUID"
|
||||
}
|
||||
]
|
||||
},
|
||||
"workspace_folder": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -19736,10 +19811,12 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"p50": {
|
||||
"type": "number"
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
},
|
||||
"p95": {
|
||||
"type": "number"
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -20104,10 +20181,12 @@
|
||||
]
|
||||
},
|
||||
"recv": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"sent": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -20690,21 +20769,24 @@
|
||||
"description": "keyed by DERP Region ID",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
},
|
||||
"regionV4Latency": {
|
||||
"description": "keyed by DERP Region ID",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
},
|
||||
"regionV6Latency": {
|
||||
"description": "keyed by DERP Region ID",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
},
|
||||
"udp": {
|
||||
@@ -20941,7 +21023,8 @@
|
||||
"description": "RegionScore scales latencies of DERP regions by a given scaling\nfactor when determining which region to use as the home\n(\"preferred\") DERP. Scores in the range (0, 1) will cause this\nregion to be proportionally more preferred, and scores in the range\n(1, ∞) will penalize a region.\n\nIf a region is not present in this map, it is treated as having a\nscore of 1.0.\n\nScores should not be 0 or negative; such scores will be ignored.\n\nA nil map means no change from the previous value (if any); an empty\nnon-nil map can be sent to reset all scores back to 1.0.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "number"
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,15 +95,26 @@ func (t *Tracker) FlushToDB(ctx context.Context, db database.Store, replicaID uu
|
||||
t.mu.Unlock()
|
||||
|
||||
//nolint:gocritic // This is the actual package doing boundary usage tracking.
|
||||
_, err := db.UpsertBoundaryUsageStats(dbauthz.AsBoundaryUsageTracker(ctx), database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: workspaceCount, // cumulative, for UPDATE
|
||||
UniqueUsersCount: userCount, // cumulative, for UPDATE
|
||||
UniqueWorkspacesDelta: workspaceDelta, // delta, for INSERT
|
||||
UniqueUsersDelta: userDelta, // delta, for INSERT
|
||||
AllowedRequests: allowed,
|
||||
DeniedRequests: denied,
|
||||
})
|
||||
authCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
err := db.InTx(func(tx database.Store) error {
|
||||
// The advisory lock ensures a clean period cutover by preventing
|
||||
// this upsert from racing with the aggregate+delete in
|
||||
// GetAndResetBoundaryUsageSummary. Without it, upserted data
|
||||
// could be lost or miscounted across periods.
|
||||
if err := tx.AcquireLock(authCtx, database.LockIDBoundaryUsageStats); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := tx.UpsertBoundaryUsageStats(authCtx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replicaID,
|
||||
UniqueWorkspacesCount: workspaceCount, // cumulative, for UPDATE
|
||||
UniqueUsersCount: userCount, // cumulative, for UPDATE
|
||||
UniqueWorkspacesDelta: workspaceDelta, // delta, for INSERT
|
||||
UniqueUsersDelta: userDelta, // delta, for INSERT
|
||||
AllowedRequests: allowed,
|
||||
DeniedRequests: denied,
|
||||
})
|
||||
return err
|
||||
}, nil)
|
||||
|
||||
// Always reset cumulative counts to prevent unbounded memory growth (e.g.
|
||||
// if the DB is unreachable). Copy delta maps to preserve any Track() calls
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestTracker_Track_Single(t *testing.T) {
|
||||
|
||||
// Verify the data was written correctly.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.UniqueUsers)
|
||||
@@ -73,7 +73,7 @@ func TestTracker_Track_DuplicateWorkspaceUser(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces, "should be 1 unique workspace")
|
||||
require.Equal(t, int64(1), summary.UniqueUsers, "should be 1 unique user")
|
||||
@@ -102,7 +102,7 @@ func TestTracker_Track_MultipleWorkspacesUsers(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(3), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(2), summary.UniqueUsers)
|
||||
@@ -140,7 +140,7 @@ func TestTracker_Track_Concurrent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(numGoroutines), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(numGoroutines), summary.UniqueUsers)
|
||||
@@ -175,7 +175,7 @@ func TestTracker_FlushToDB_Accumulates(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.UniqueUsers)
|
||||
@@ -202,7 +202,7 @@ func TestTracker_FlushToDB_NewPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Simulate telemetry reset (new period).
|
||||
err = db.ResetBoundaryUsageStats(boundaryCtx)
|
||||
_, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Track new data.
|
||||
@@ -215,7 +215,7 @@ func TestTracker_FlushToDB_NewPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// The summary should only contain the new data after reset.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces, "should only count new workspace")
|
||||
require.Equal(t, int64(1), summary.UniqueUsers, "should only count new user")
|
||||
@@ -237,7 +237,7 @@ func TestTracker_FlushToDB_NoActivity(t *testing.T) {
|
||||
|
||||
// Verify nothing was written to DB.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
@@ -265,7 +265,7 @@ func TestUpsertBoundaryUsageStats_Insert(t *testing.T) {
|
||||
require.True(t, newPeriod, "should return true for insert")
|
||||
|
||||
// Verify INSERT used the delta values, not cumulative.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(5), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(3), summary.UniqueUsers)
|
||||
@@ -301,7 +301,7 @@ func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
|
||||
require.False(t, newPeriod, "should return false for update")
|
||||
|
||||
// Verify UPDATE used cumulative values.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(8), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(5), summary.UniqueUsers)
|
||||
@@ -309,7 +309,7 @@ func TestUpsertBoundaryUsageStats_Update(t *testing.T) {
|
||||
require.Equal(t, int64(10+20), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
func TestGetAndResetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
@@ -347,7 +347,7 @@ func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify aggregation (SUM of all replicas).
|
||||
@@ -357,13 +357,13 @@ func TestGetBoundaryUsageSummary_MultipleReplicas(t *testing.T) {
|
||||
require.Equal(t, int64(45), summary.DeniedRequests) // 10 + 15 + 20
|
||||
}
|
||||
|
||||
func TestGetBoundaryUsageSummary_Empty(t *testing.T) {
|
||||
func TestGetAndResetBoundaryUsageSummary_Empty(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// COALESCE should return 0 for all columns.
|
||||
@@ -373,7 +373,7 @@ func TestGetBoundaryUsageSummary_Empty(t *testing.T) {
|
||||
require.Equal(t, int64(0), summary.DeniedRequests)
|
||||
}
|
||||
|
||||
func TestResetBoundaryUsageStats(t *testing.T) {
|
||||
func TestGetAndResetBoundaryUsageSummary_DeletesData(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
@@ -391,61 +391,19 @@ func TestResetBoundaryUsageStats(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify data exists.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, summary.AllowedRequests, int64(0))
|
||||
|
||||
// Reset.
|
||||
err = db.ResetBoundaryUsageStats(ctx)
|
||||
// Should return the summary AND delete all data.
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1+2+3+4+5), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(10+20+30+40+50), summary.AllowedRequests)
|
||||
|
||||
// Verify all data is gone.
|
||||
summary, err = db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
summary, err = db.GetAndResetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestDeleteBoundaryUsageStatsByReplicaID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
ctx := dbauthz.AsBoundaryUsageTracker(context.Background())
|
||||
|
||||
replica1 := uuid.New()
|
||||
replica2 := uuid.New()
|
||||
|
||||
// Insert stats for 2 replicas. Delta fields are used for INSERT.
|
||||
_, err := db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica1,
|
||||
UniqueWorkspacesDelta: 10,
|
||||
UniqueUsersDelta: 5,
|
||||
AllowedRequests: 100,
|
||||
DeniedRequests: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.UpsertBoundaryUsageStats(ctx, database.UpsertBoundaryUsageStatsParams{
|
||||
ReplicaID: replica2,
|
||||
UniqueWorkspacesDelta: 20,
|
||||
UniqueUsersDelta: 10,
|
||||
AllowedRequests: 200,
|
||||
DeniedRequests: 20,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete replica1's stats.
|
||||
err = db.DeleteBoundaryUsageStatsByReplicaID(ctx, replica1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify only replica2's stats remain.
|
||||
summary, err := db.GetBoundaryUsageSummary(ctx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(20), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(200), summary.AllowedRequests)
|
||||
}
|
||||
|
||||
func TestTracker_TelemetryCycle(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -477,8 +435,8 @@ func TestTracker_TelemetryCycle(t *testing.T) {
|
||||
require.NoError(t, tracker2.FlushToDB(ctx, db, replica2))
|
||||
require.NoError(t, tracker3.FlushToDB(ctx, db, replica3))
|
||||
|
||||
// Telemetry aggregates.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
// Telemetry aggregates and resets (simulating telemetry report sent).
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify aggregation.
|
||||
@@ -487,15 +445,12 @@ func TestTracker_TelemetryCycle(t *testing.T) {
|
||||
require.Equal(t, int64(105), summary.AllowedRequests) // 25 + 75 + 5
|
||||
require.Equal(t, int64(15), summary.DeniedRequests) // 3 + 12 + 0
|
||||
|
||||
// Telemetry resets stats (simulating telemetry report sent).
|
||||
require.NoError(t, db.ResetBoundaryUsageStats(boundaryCtx))
|
||||
|
||||
// Next flush from trackers should detect new period.
|
||||
tracker1.Track(uuid.New(), uuid.New(), 1, 0)
|
||||
require.NoError(t, tracker1.FlushToDB(ctx, db, replica1))
|
||||
|
||||
// Verify trackers reset their in-memory state.
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(1), summary.AllowedRequests)
|
||||
@@ -513,30 +468,24 @@ func TestTracker_FlushToDB_NoStaleDataAfterReset(t *testing.T) {
|
||||
workspaceID := uuid.New()
|
||||
ownerID := uuid.New()
|
||||
|
||||
// Track some data, flush, and verify.
|
||||
// Track some data and flush.
|
||||
tracker.Track(workspaceID, ownerID, 10, 5)
|
||||
err := tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
// Simulate telemetry reset (new period) - this also verifies the data.
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(10), summary.AllowedRequests)
|
||||
|
||||
// Simulate telemetry reset (new period).
|
||||
err = db.ResetBoundaryUsageStats(boundaryCtx)
|
||||
require.NoError(t, err)
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.AllowedRequests)
|
||||
|
||||
// Flush again without any new Track() calls. This should not write stale
|
||||
// data back to the DB.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Summary should be empty (no stale data written).
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err = db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), summary.UniqueWorkspaces)
|
||||
require.Equal(t, int64(0), summary.UniqueUsers)
|
||||
@@ -582,7 +531,7 @@ func TestTracker_ConcurrentFlushAndTrack(t *testing.T) {
|
||||
|
||||
// Verify stats are non-negative.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.GreaterOrEqual(t, summary.AllowedRequests, int64(0))
|
||||
require.GreaterOrEqual(t, summary.DeniedRequests, int64(0))
|
||||
@@ -597,6 +546,17 @@ type trackDuringUpsertDB struct {
|
||||
userID uuid.UUID
|
||||
}
|
||||
|
||||
func (s *trackDuringUpsertDB) InTx(fn func(database.Store) error, opts *database.TxOptions) error {
|
||||
return s.Store.InTx(func(tx database.Store) error {
|
||||
return fn(&trackDuringUpsertDB{
|
||||
Store: tx,
|
||||
tracker: s.tracker,
|
||||
workspaceID: s.workspaceID,
|
||||
userID: s.userID,
|
||||
})
|
||||
}, opts)
|
||||
}
|
||||
|
||||
func (s *trackDuringUpsertDB) UpsertBoundaryUsageStats(ctx context.Context, arg database.UpsertBoundaryUsageStatsParams) (bool, error) {
|
||||
s.tracker.Track(s.workspaceID, s.userID, 20, 10)
|
||||
return s.Store.UpsertBoundaryUsageStats(ctx, arg)
|
||||
@@ -626,17 +586,12 @@ func TestTracker_TrackDuringFlush(t *testing.T) {
|
||||
err := tracker.FlushToDB(ctx, trackingDB, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify first flush only wrote the initial data.
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10), summary.AllowedRequests)
|
||||
|
||||
// The second flush should include the Track() call that happened during the
|
||||
// first flush's DB operation.
|
||||
// Second flush captures the Track() that happened during the first flush.
|
||||
err = tracker.FlushToDB(ctx, db, replicaID)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary, err = db.GetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
// Verify both flushes are in the summary.
|
||||
summary, err := db.GetAndResetBoundaryUsageSummary(boundaryCtx, 60000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10+20), summary.AllowedRequests)
|
||||
require.Equal(t, int64(5+10), summary.DeniedRequests)
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -0,0 +1,440 @@
|
||||
// Package cachecompress creates a compressed cache of static files based on an http.FS. It is modified from
|
||||
// https://github.com/go-chi/chi Compressor middleware. See the LICENSE file in this directory for copyright
|
||||
// information.
|
||||
package cachecompress
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
)
|
||||
|
||||
type cacheKey struct {
|
||||
encoding string
|
||||
urlPath string
|
||||
}
|
||||
|
||||
func (c cacheKey) filePath(cacheDir string) string {
|
||||
// URLs can have slashes or other characters we don't want the file system interpreting. So we just encode the path
|
||||
// to a flat base64 filename.
|
||||
filename := base64.URLEncoding.EncodeToString([]byte(c.urlPath))
|
||||
return filepath.Join(cacheDir, c.encoding, filename)
|
||||
}
|
||||
|
||||
func getCacheKey(encoding string, r *http.Request) cacheKey {
|
||||
return cacheKey{
|
||||
encoding: encoding,
|
||||
urlPath: r.URL.Path,
|
||||
}
|
||||
}
|
||||
|
||||
type ref struct {
|
||||
key cacheKey
|
||||
done chan struct{}
|
||||
err chan error
|
||||
}
|
||||
|
||||
// Compressor represents a set of encoding configurations.
|
||||
type Compressor struct {
|
||||
logger slog.Logger
|
||||
// The mapping of encoder names to encoder functions.
|
||||
encoders map[string]EncoderFunc
|
||||
// The mapping of pooled encoders to pools.
|
||||
pooledEncoders map[string]*sync.Pool
|
||||
// The list of encoders in order of decreasing precedence.
|
||||
encodingPrecedence []string
|
||||
level int // The compression level.
|
||||
cacheDir string
|
||||
orig http.FileSystem
|
||||
|
||||
mu sync.Mutex
|
||||
cache map[cacheKey]ref
|
||||
}
|
||||
|
||||
// NewCompressor creates a new Compressor that will handle encoding responses.
|
||||
//
|
||||
// The level should be one of the ones defined in the flate package.
|
||||
// The types are the content types that are allowed to be compressed.
|
||||
func NewCompressor(logger slog.Logger, level int, cacheDir string, orig http.FileSystem) *Compressor {
|
||||
c := &Compressor{
|
||||
logger: logger.Named("cachecompress"),
|
||||
level: level,
|
||||
encoders: make(map[string]EncoderFunc),
|
||||
pooledEncoders: make(map[string]*sync.Pool),
|
||||
cacheDir: cacheDir,
|
||||
orig: orig,
|
||||
cache: make(map[cacheKey]ref),
|
||||
}
|
||||
|
||||
// Set the default encoders. The precedence order uses the reverse
|
||||
// ordering that the encoders were added. This means adding new encoders
|
||||
// will move them to the front of the order.
|
||||
//
|
||||
// TODO:
|
||||
// lzma: Opera.
|
||||
// sdch: Chrome, Android. Gzip output + dictionary header.
|
||||
// br: Brotli, see https://github.com/go-chi/chi/pull/326
|
||||
|
||||
// HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
|
||||
// wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
|
||||
// checksum compared to CRC-32 used in "gzip" and thus is faster.
|
||||
//
|
||||
// But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
|
||||
// raw DEFLATE data only, without the mentioned zlib wrapper.
|
||||
// Because of this major confusion, most modern browsers try it
|
||||
// both ways, first looking for zlib headers.
|
||||
// Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
|
||||
//
|
||||
// The list of browsers having problems is quite big, see:
|
||||
// http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
|
||||
// https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
|
||||
//
|
||||
// That's why we prefer gzip over deflate. It's just more reliable
|
||||
// and not significantly slower than deflate.
|
||||
c.SetEncoder("deflate", encoderDeflate)
|
||||
|
||||
// TODO: Exception for old MSIE browsers that can't handle non-HTML?
|
||||
// https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
|
||||
c.SetEncoder("gzip", encoderGzip)
|
||||
|
||||
// NOTE: Not implemented, intentionally:
|
||||
// case "compress": // LZW. Deprecated.
|
||||
// case "bzip2": // Too slow on-the-fly.
|
||||
// case "zopfli": // Too slow on-the-fly.
|
||||
// case "xz": // Too slow on-the-fly.
|
||||
return c
|
||||
}
|
||||
|
||||
// SetEncoder can be used to set the implementation of a compression algorithm.
|
||||
//
|
||||
// The encoding should be a standardized identifier. See:
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
|
||||
//
|
||||
// For example, add the Brotli algorithm:
|
||||
//
|
||||
// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
|
||||
//
|
||||
// compressor := middleware.NewCompressor(5, "text/html")
|
||||
// compressor.SetEncoder("br", func(w io.Writer, level int) io.Writer {
|
||||
// params := brotli_enc.NewBrotliParams()
|
||||
// params.SetQuality(level)
|
||||
// return brotli_enc.NewBrotliWriter(params, w)
|
||||
// })
|
||||
func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) {
|
||||
encoding = strings.ToLower(encoding)
|
||||
if encoding == "" {
|
||||
panic("the encoding can not be empty")
|
||||
}
|
||||
if fn == nil {
|
||||
panic("attempted to set a nil encoder function")
|
||||
}
|
||||
|
||||
// If we are adding a new encoder that is already registered, we have to
|
||||
// clear that one out first.
|
||||
delete(c.pooledEncoders, encoding)
|
||||
delete(c.encoders, encoding)
|
||||
|
||||
// If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
|
||||
encoder := fn(io.Discard, c.level)
|
||||
if _, ok := encoder.(ioResetterWriter); ok {
|
||||
pool := &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return fn(io.Discard, c.level)
|
||||
},
|
||||
}
|
||||
c.pooledEncoders[encoding] = pool
|
||||
}
|
||||
// If the encoder is not in the pooledEncoders, add it to the normal encoders.
|
||||
if _, ok := c.pooledEncoders[encoding]; !ok {
|
||||
c.encoders[encoding] = fn
|
||||
}
|
||||
|
||||
for i, v := range c.encodingPrecedence {
|
||||
if v == encoding {
|
||||
c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...)
|
||||
}
|
||||
|
||||
// ServeHTTP returns the response from the orig file system, compressed if possible.
|
||||
func (c *Compressor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
encoding := c.selectEncoder(r.Header)
|
||||
|
||||
// we can only serve a cached response if all the following:
|
||||
// 1. they requested an encoding we support
|
||||
// 2. they are requesting the whole file, not a range
|
||||
// 3. the method is GET
|
||||
if encoding == "" || r.Header.Get("Range") != "" || r.Method != "GET" {
|
||||
http.FileServer(c.orig).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Whether we should serve a cached response also depends in a fairly complex way on the path and request
|
||||
// headers. In particular, we don't need a cached response for non-existing files/directories, and should not serve
|
||||
// a cached response if the correct Etag for the file is provided. This logic is all handled by the http.FileServer,
|
||||
// and we don't want to reimplement it here. So, what we'll do is send a HEAD request to the http.FileServer to see
|
||||
// what it would do.
|
||||
headReq := r.Clone(r.Context())
|
||||
headReq.Method = http.MethodHead
|
||||
headRW := &compressResponseWriter{
|
||||
w: io.Discard,
|
||||
headers: make(http.Header),
|
||||
}
|
||||
// deep-copy the headers already set on the response. This includes things like ETags.
|
||||
for key, values := range w.Header() {
|
||||
for _, value := range values {
|
||||
headRW.headers.Add(key, value)
|
||||
}
|
||||
}
|
||||
http.FileServer(c.orig).ServeHTTP(headRW, headReq)
|
||||
if headRW.code != http.StatusOK {
|
||||
// again, fall back to the file server. This is often a 404 Not Found, or a 304 Not Modified if they provided
|
||||
// the correct ETag.
|
||||
http.FileServer(c.orig).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
cref := c.getRef(encoding, r)
|
||||
c.serveRef(w, r, headRW.headers, cref)
|
||||
}
|
||||
|
||||
func (c *Compressor) serveRef(w http.ResponseWriter, r *http.Request, headers http.Header, cref ref) {
|
||||
select {
|
||||
case <-r.Context().Done():
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
return
|
||||
case <-cref.done:
|
||||
cachePath := cref.key.filePath(c.cacheDir)
|
||||
cacheFile, err := os.Open(cachePath)
|
||||
if err != nil {
|
||||
c.logger.Error(context.Background(), "failed to open compressed cache file",
|
||||
slog.F("cache_path", cachePath), slog.F("url_path", cref.key.urlPath), slog.Error(err))
|
||||
// fall back to uncompressed
|
||||
http.FileServer(c.orig).ServeHTTP(w, r)
|
||||
}
|
||||
defer cacheFile.Close()
|
||||
|
||||
// we need to remove or modify the Content-Length, if any, set by the FileServer because it will be for
|
||||
// uncompressed data and wrong.
|
||||
info, err := cacheFile.Stat()
|
||||
if err != nil {
|
||||
c.logger.Error(context.Background(), "failed to stat compressed cache file",
|
||||
slog.F("cache_path", cachePath), slog.F("url_path", cref.key.urlPath), slog.Error(err))
|
||||
headers.Del("Content-Length")
|
||||
} else {
|
||||
headers.Set("Content-Length", fmt.Sprintf("%d", info.Size()))
|
||||
}
|
||||
|
||||
for key, values := range headers {
|
||||
for _, value := range values {
|
||||
w.Header().Add(key, value)
|
||||
}
|
||||
}
|
||||
w.Header().Set("Content-Encoding", cref.key.encoding)
|
||||
w.Header().Add("Vary", "Accept-Encoding")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = io.Copy(w, cacheFile)
|
||||
if err != nil {
|
||||
// most commonly, the writer will hang up before we are done.
|
||||
c.logger.Debug(context.Background(), "failed to write compressed cache file", slog.Error(err))
|
||||
}
|
||||
return
|
||||
case <-cref.err:
|
||||
// fall back to uncompressed
|
||||
http.FileServer(c.orig).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Compressor) getRef(encoding string, r *http.Request) ref {
|
||||
ck := getCacheKey(encoding, r)
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
cref, ok := c.cache[ck]
|
||||
if ok {
|
||||
return cref
|
||||
}
|
||||
// we are the first to encode
|
||||
cref = ref{
|
||||
key: ck,
|
||||
|
||||
done: make(chan struct{}),
|
||||
err: make(chan error),
|
||||
}
|
||||
c.cache[ck] = cref
|
||||
go c.compress(context.Background(), encoding, cref, r)
|
||||
return cref
|
||||
}
|
||||
|
||||
func (c *Compressor) compress(ctx context.Context, encoding string, cref ref, r *http.Request) {
|
||||
cachePath := cref.key.filePath(c.cacheDir)
|
||||
var err error
|
||||
// we want to handle closing either cref.done or cref.err in a defer at the bottom of the stack so that the encoder
|
||||
// and cache file are both closed first (higher in the defer stack). This prevents data races where waiting HTTP
|
||||
// handlers start reading the file before all the data has been flushed.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if rErr := os.Remove(cachePath); rErr != nil {
|
||||
// nolint: gocritic // best effort, just debug log any errors
|
||||
c.logger.Debug(ctx, "failed to remove cache file",
|
||||
slog.F("main_err", err), slog.F("remove_err", rErr), slog.F("cache_path", cachePath))
|
||||
}
|
||||
c.mu.Lock()
|
||||
delete(c.cache, cref.key)
|
||||
c.mu.Unlock()
|
||||
close(cref.err)
|
||||
return
|
||||
}
|
||||
close(cref.done)
|
||||
}()
|
||||
|
||||
cacheDir := filepath.Dir(cachePath)
|
||||
err = os.MkdirAll(cacheDir, 0o700)
|
||||
if err != nil {
|
||||
c.logger.Error(ctx, "failed to create cache directory", slog.F("cache_dir", cacheDir))
|
||||
return
|
||||
}
|
||||
|
||||
// We will truncate and overwrite any existing files. This is important in the case that we get restarted
|
||||
// with the same cache dir, possibly with different source files.
|
||||
cacheFile, err := os.OpenFile(cachePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||
if err != nil {
|
||||
c.logger.Error(ctx, "failed to open compression cache file",
|
||||
slog.F("path", cachePath), slog.Error(err))
|
||||
return
|
||||
}
|
||||
defer cacheFile.Close()
|
||||
encoder, cleanup := c.getEncoder(encoding, cacheFile)
|
||||
if encoder == nil {
|
||||
// can only hit this if there is a programming error
|
||||
c.logger.Critical(ctx, "got nil encoder", slog.F("encoding", encoding))
|
||||
err = xerrors.New("nil encoder")
|
||||
return
|
||||
}
|
||||
defer cleanup()
|
||||
defer encoder.Close() // ensures we flush, needs to be called before cleanup(), so we defer after it.
|
||||
|
||||
cw := &compressResponseWriter{
|
||||
w: encoder,
|
||||
headers: make(http.Header), // ignored
|
||||
}
|
||||
http.FileServer(c.orig).ServeHTTP(cw, r)
|
||||
if cw.code != http.StatusOK {
|
||||
// log at debug because this is likely just a 404
|
||||
c.logger.Debug(ctx, "file server failed to serve",
|
||||
slog.F("encoding", encoding), slog.F("url_path", cref.key.urlPath), slog.F("http_code", cw.code))
|
||||
// mark the error so that we clean up correctly
|
||||
err = xerrors.New("file server failed to serve")
|
||||
return
|
||||
}
|
||||
// success!
|
||||
}
|
||||
|
||||
// selectEncoder returns the name of the encoder
|
||||
func (c *Compressor) selectEncoder(h http.Header) string {
|
||||
header := h.Get("Accept-Encoding")
|
||||
|
||||
// Parse the names of all accepted algorithms from the header.
|
||||
accepted := strings.Split(strings.ToLower(header), ",")
|
||||
|
||||
// Find supported encoder by accepted list by precedence
|
||||
for _, name := range c.encodingPrecedence {
|
||||
if matchAcceptEncoding(accepted, name) {
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
// No encoder found to match the accepted encoding
|
||||
return ""
|
||||
}
|
||||
|
||||
// getEncoder returns a writer that encodes and writes to the provided writer, and a cleanup func.
|
||||
func (c *Compressor) getEncoder(name string, w io.Writer) (io.WriteCloser, func()) {
|
||||
if pool, ok := c.pooledEncoders[name]; ok {
|
||||
encoder, typeOK := pool.Get().(ioResetterWriter)
|
||||
if !typeOK {
|
||||
return nil, nil
|
||||
}
|
||||
cleanup := func() {
|
||||
pool.Put(encoder)
|
||||
}
|
||||
encoder.Reset(w)
|
||||
return encoder, cleanup
|
||||
}
|
||||
if fn, ok := c.encoders[name]; ok {
|
||||
return fn(w, c.level), func() {}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func matchAcceptEncoding(accepted []string, encoding string) bool {
|
||||
for _, v := range accepted {
|
||||
if strings.Contains(v, encoding) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An EncoderFunc is a function that wraps the provided io.Writer with a
|
||||
// streaming compression algorithm and returns it.
|
||||
//
|
||||
// In case of failure, the function should return nil.
|
||||
type EncoderFunc func(w io.Writer, level int) io.WriteCloser
|
||||
|
||||
// Interface for types that allow resetting io.Writers.
|
||||
type ioResetterWriter interface {
|
||||
io.WriteCloser
|
||||
Reset(w io.Writer)
|
||||
}
|
||||
|
||||
func encoderGzip(w io.Writer, level int) io.WriteCloser {
|
||||
gw, err := gzip.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return gw
|
||||
}
|
||||
|
||||
func encoderDeflate(w io.Writer, level int) io.WriteCloser {
|
||||
dw, err := flate.NewWriter(w, level)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return dw
|
||||
}
|
||||
|
||||
type compressResponseWriter struct {
|
||||
w io.Writer
|
||||
headers http.Header
|
||||
code int
|
||||
}
|
||||
|
||||
func (cw *compressResponseWriter) Header() http.Header {
|
||||
return cw.headers
|
||||
}
|
||||
|
||||
func (cw *compressResponseWriter) WriteHeader(code int) {
|
||||
cw.code = code
|
||||
}
|
||||
|
||||
func (cw *compressResponseWriter) Write(p []byte) (int, error) {
|
||||
if cw.code == 0 {
|
||||
cw.code = http.StatusOK
|
||||
}
|
||||
return cw.w.Write(p)
|
||||
}
|
||||
@@ -0,0 +1,227 @@
|
||||
package cachecompress
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestCompressorEncodings(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
expectedEncoding string
|
||||
acceptedEncodings []string
|
||||
}{
|
||||
{
|
||||
name: "no expected encodings due to no accepted encodings",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: nil,
|
||||
expectedEncoding: "",
|
||||
},
|
||||
{
|
||||
name: "gzip is only encoding",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: []string{"gzip"},
|
||||
expectedEncoding: "gzip",
|
||||
},
|
||||
{
|
||||
name: "gzip is preferred over deflate",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: []string{"gzip", "deflate"},
|
||||
expectedEncoding: "gzip",
|
||||
},
|
||||
{
|
||||
name: "deflate is used",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: []string{"deflate"},
|
||||
expectedEncoding: "deflate",
|
||||
},
|
||||
{
|
||||
name: "nop is preferred",
|
||||
path: "/file.html",
|
||||
acceptedEncodings: []string{"nop, gzip, deflate"},
|
||||
expectedEncoding: "nop",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testutil.Logger(t)
|
||||
tempDir := t.TempDir()
|
||||
cacheDir := filepath.Join(tempDir, "cache")
|
||||
err := os.MkdirAll(cacheDir, 0o700)
|
||||
require.NoError(t, err)
|
||||
srcDir := filepath.Join(tempDir, "src")
|
||||
err = os.MkdirAll(srcDir, 0o700)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(srcDir, "file.html"), []byte("textstring"), 0o600)
|
||||
require.NoError(t, err)
|
||||
|
||||
compressor := NewCompressor(logger, 5, cacheDir, http.FS(os.DirFS(srcDir)))
|
||||
if len(compressor.encoders) != 0 || len(compressor.pooledEncoders) != 2 {
|
||||
t.Errorf("gzip and deflate should be pooled")
|
||||
}
|
||||
logger.Debug(context.Background(), "started compressor")
|
||||
|
||||
compressor.SetEncoder("nop", func(w io.Writer, _ int) io.WriteCloser {
|
||||
return nopEncoder{w}
|
||||
})
|
||||
|
||||
if len(compressor.encoders) != 1 {
|
||||
t.Errorf("nop encoder should be stored in the encoders map")
|
||||
}
|
||||
|
||||
ts := httptest.NewServer(compressor)
|
||||
defer ts.Close()
|
||||
// ctx := testutil.Context(t, testutil.WaitShort)
|
||||
ctx := context.Background()
|
||||
header, respString := testRequestWithAcceptedEncodings(ctx, t, ts, "GET", tc.path, tc.acceptedEncodings...)
|
||||
if respString != "textstring" {
|
||||
t.Errorf("response text doesn't match; expected:%q, got:%q", "textstring", respString)
|
||||
}
|
||||
if got := header.Get("Content-Encoding"); got != tc.expectedEncoding {
|
||||
t.Errorf("expected encoding %q but got %q", tc.expectedEncoding, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testRequestWithAcceptedEncodings(ctx context.Context, t *testing.T, ts *httptest.Server, method, path string, encodings ...string) (http.Header, string) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, ts.URL+path, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return nil, ""
|
||||
}
|
||||
if len(encodings) > 0 {
|
||||
encodingsString := strings.Join(encodings, ",")
|
||||
req.Header.Set("Accept-Encoding", encodingsString)
|
||||
}
|
||||
|
||||
transport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
transport.DisableCompression = true // prevent automatically setting gzip
|
||||
|
||||
resp, err := (&http.Client{Transport: transport}).Do(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
respBody := decodeResponseBody(t, resp)
|
||||
defer resp.Body.Close()
|
||||
|
||||
return resp.Header, respBody
|
||||
}
|
||||
|
||||
func decodeResponseBody(t *testing.T, resp *http.Response) string {
|
||||
var reader io.ReadCloser
|
||||
t.Logf("encoding: '%s'", resp.Header.Get("Content-Encoding"))
|
||||
rawBody, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
t.Logf("raw body: %x", rawBody)
|
||||
switch resp.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
var err error
|
||||
reader, err = gzip.NewReader(bytes.NewReader(rawBody))
|
||||
require.NoError(t, err)
|
||||
case "deflate":
|
||||
reader = flate.NewReader(bytes.NewReader(rawBody))
|
||||
default:
|
||||
return string(rawBody)
|
||||
}
|
||||
respBody, err := io.ReadAll(reader)
|
||||
require.NoError(t, err, "failed to read response body: %T %+v", err, err)
|
||||
err = reader.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
return string(respBody)
|
||||
}
|
||||
|
||||
type nopEncoder struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (nopEncoder) Close() error { return nil }
|
||||
|
||||
// nolint: tparallel // we want to assert the state of the cache, so run synchronously
|
||||
func TestCompressorHeadings(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger := testutil.Logger(t)
|
||||
tempDir := t.TempDir()
|
||||
cacheDir := filepath.Join(tempDir, "cache")
|
||||
err := os.MkdirAll(cacheDir, 0o700)
|
||||
require.NoError(t, err)
|
||||
srcDir := filepath.Join(tempDir, "src")
|
||||
err = os.MkdirAll(srcDir, 0o700)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(srcDir, "file.html"), []byte("textstring"), 0o600)
|
||||
require.NoError(t, err)
|
||||
|
||||
compressor := NewCompressor(logger, 5, cacheDir, http.FS(os.DirFS(srcDir)))
|
||||
|
||||
ts := httptest.NewServer(compressor)
|
||||
defer ts.Close()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
}{
|
||||
{
|
||||
name: "exists",
|
||||
path: "/file.html",
|
||||
},
|
||||
{
|
||||
name: "not found",
|
||||
path: "/missing.html",
|
||||
},
|
||||
{
|
||||
name: "not found directory",
|
||||
path: "/a_directory/",
|
||||
},
|
||||
}
|
||||
|
||||
// nolint: paralleltest // we want to assert the state of the cache, so run synchronously
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
req := httptest.NewRequestWithContext(ctx, "GET", tc.path, nil)
|
||||
|
||||
// request directly from http.FileServer as our baseline response
|
||||
respROrig := httptest.NewRecorder()
|
||||
http.FileServer(http.Dir(srcDir)).ServeHTTP(respROrig, req)
|
||||
respOrig := respROrig.Result()
|
||||
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
// serve twice so that we go thru cache hit and cache miss code
|
||||
for range 2 {
|
||||
respRec := httptest.NewRecorder()
|
||||
compressor.ServeHTTP(respRec, req)
|
||||
respComp := respRec.Result()
|
||||
|
||||
require.Equal(t, respOrig.StatusCode, respComp.StatusCode)
|
||||
for key, values := range respOrig.Header {
|
||||
if key == "Content-Length" {
|
||||
continue // we don't get length on compressed responses
|
||||
}
|
||||
require.Equal(t, values, respComp.Header[key])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
// only the cache hit should leave a file around
|
||||
files, err := os.ReadDir(srcDir)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, files, 1)
|
||||
}
|
||||
+18
-21
@@ -21,11 +21,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/google/uuid"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
@@ -44,6 +42,7 @@ import (
|
||||
"cdr.dev/slog/v3"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/coderd/agentapi"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/metadatabatcher"
|
||||
_ "github.com/coder/coder/v2/coderd/apidoc" // Used for swagger docs.
|
||||
"github.com/coder/coder/v2/coderd/appearance"
|
||||
@@ -462,10 +461,6 @@ func New(options *Options) *API {
|
||||
if siteCacheDir != "" {
|
||||
siteCacheDir = filepath.Join(siteCacheDir, "site")
|
||||
}
|
||||
binFS, binHashes, err := site.ExtractOrReadBinFS(siteCacheDir, site.FS())
|
||||
if err != nil {
|
||||
panic(xerrors.Errorf("read site bin failed: %w", err))
|
||||
}
|
||||
|
||||
metricsCache := metricscache.New(
|
||||
options.Database,
|
||||
@@ -658,9 +653,8 @@ func New(options *Options) *API {
|
||||
WebPushPublicKey: api.WebpushDispatcher.PublicKey(),
|
||||
Telemetry: api.Telemetry.Enabled(),
|
||||
}
|
||||
api.SiteHandler = site.New(&site.Options{
|
||||
BinFS: binFS,
|
||||
BinHashes: binHashes,
|
||||
api.SiteHandler, err = site.New(&site.Options{
|
||||
CacheDir: siteCacheDir,
|
||||
Database: options.Database,
|
||||
SiteFS: site.FS(),
|
||||
OAuth2Configs: oauthConfigs,
|
||||
@@ -672,6 +666,9 @@ func New(options *Options) *API {
|
||||
Logger: options.Logger.Named("site"),
|
||||
HideAITasks: options.DeploymentValues.HideAITasks.Value(),
|
||||
})
|
||||
if err != nil {
|
||||
options.Logger.Fatal(ctx, "failed to initialize site handler", slog.Error(err))
|
||||
}
|
||||
api.SiteHandler.Experiments.Store(&experiments)
|
||||
|
||||
if options.UpdateCheckOptions != nil {
|
||||
@@ -758,6 +755,7 @@ func New(options *Options) *API {
|
||||
api.agentProvider = stn
|
||||
if options.DeploymentValues.Prometheus.Enable {
|
||||
options.PrometheusRegistry.MustRegister(stn)
|
||||
api.lifecycleMetrics = agentapi.NewLifecycleMetrics(options.PrometheusRegistry)
|
||||
}
|
||||
api.NetworkTelemetryBatcher = tailnet.NewNetworkTelemetryBatcher(
|
||||
quartz.NewReal(),
|
||||
@@ -1080,6 +1078,7 @@ func New(options *Options) *API {
|
||||
r.Patch("/input", api.taskUpdateInput)
|
||||
r.Post("/send", api.taskSend)
|
||||
r.Get("/logs", api.taskLogs)
|
||||
r.Post("/pause", api.pauseTask)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1892,8 +1891,9 @@ type API struct {
|
||||
healthCheckCache atomic.Pointer[healthsdk.HealthcheckReport]
|
||||
healthCheckProgress healthcheck.Progress
|
||||
|
||||
statsReporter *workspacestats.Reporter
|
||||
metadataBatcher *metadatabatcher.Batcher
|
||||
statsReporter *workspacestats.Reporter
|
||||
metadataBatcher *metadatabatcher.Batcher
|
||||
lifecycleMetrics *agentapi.LifecycleMetrics
|
||||
|
||||
Acquirer *provisionerdserver.Acquirer
|
||||
// dbRolluper rolls up template usage stats from raw agent and app
|
||||
@@ -1974,16 +1974,13 @@ func compressHandler(h http.Handler) http.Handler {
|
||||
"application/*",
|
||||
"image/*",
|
||||
)
|
||||
cmp.SetEncoder("br", func(w io.Writer, level int) io.Writer {
|
||||
return brotli.NewWriterLevel(w, level)
|
||||
})
|
||||
cmp.SetEncoder("zstd", func(w io.Writer, level int) io.Writer {
|
||||
zw, err := zstd.NewWriter(w, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level)))
|
||||
if err != nil {
|
||||
panic("invalid zstd compressor: " + err.Error())
|
||||
}
|
||||
return zw
|
||||
})
|
||||
for encoding := range site.StandardEncoders {
|
||||
writeCloserFn := site.StandardEncoders[encoding]
|
||||
cmp.SetEncoder(encoding, func(w io.Writer, level int) io.Writer {
|
||||
writeCloser := writeCloserFn(w, level)
|
||||
return writeCloser
|
||||
})
|
||||
}
|
||||
|
||||
return cmp.Handler(h)
|
||||
}
|
||||
|
||||
@@ -1703,13 +1703,6 @@ func (q *querier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, u
|
||||
return q.db.DeleteApplicationConnectAPIKeysByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteCryptoKey(ctx context.Context, arg database.DeleteCryptoKeyParams) (database.CryptoKey, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceCryptoKey); err != nil {
|
||||
return database.CryptoKey{}, err
|
||||
@@ -1932,14 +1925,14 @@ func (q *querier) DeleteTailnetTunnel(ctx context.Context, arg database.DeleteTa
|
||||
return q.db.DeleteTailnetTunnel(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (database.TaskTable, error) {
|
||||
func (q *querier) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (uuid.UUID, error) {
|
||||
task, err := q.db.GetTaskByID(ctx, arg.ID)
|
||||
if err != nil {
|
||||
return database.TaskTable{}, err
|
||||
return uuid.UUID{}, err
|
||||
}
|
||||
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, task.RBACObject()); err != nil {
|
||||
return database.TaskTable{}, err
|
||||
return uuid.UUID{}, err
|
||||
}
|
||||
|
||||
return q.db.DeleteTask(ctx, arg)
|
||||
@@ -2223,6 +2216,13 @@ func (q *querier) GetAllTailnetTunnels(ctx context.Context) ([]database.TailnetT
|
||||
return q.db.GetAllTailnetTunnels(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) GetAndResetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetAndResetBoundaryUsageSummaryRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return database.GetAndResetBoundaryUsageSummaryRow{}, err
|
||||
}
|
||||
return q.db.GetAndResetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
}
|
||||
|
||||
func (q *querier) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
||||
// No authz checks
|
||||
return q.db.GetAnnouncementBanners(ctx)
|
||||
@@ -2271,13 +2271,6 @@ func (q *querier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUI
|
||||
return q.db.GetAuthorizationUserRoles(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return database.GetBoundaryUsageSummaryRow{}, err
|
||||
}
|
||||
return q.db.GetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
}
|
||||
|
||||
func (q *querier) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) {
|
||||
// Just like with the audit logs query, shortcut if the user is an owner.
|
||||
err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceConnectionLog)
|
||||
@@ -3893,6 +3886,14 @@ func (q *querier) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx context.Conte
|
||||
return q.db.GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) GetWorkspaceBuildMetricsByResourceID(ctx context.Context, id uuid.UUID) (database.GetWorkspaceBuildMetricsByResourceIDRow, error) {
|
||||
// Verify access to the resource first.
|
||||
if _, err := q.GetWorkspaceResourceByID(ctx, id); err != nil {
|
||||
return database.GetWorkspaceBuildMetricsByResourceIDRow{}, err
|
||||
}
|
||||
return q.db.GetWorkspaceBuildMetricsByResourceID(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]database.WorkspaceBuildParameter, error) {
|
||||
// Authorized call to get the workspace build. If we can read the build,
|
||||
// we can read the params.
|
||||
@@ -4891,13 +4892,6 @@ func (q *querier) RemoveUserFromGroups(ctx context.Context, arg database.RemoveU
|
||||
return q.db.RemoveUserFromGroups(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceBoundaryUsage); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.ResetBoundaryUsageStats(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
|
||||
@@ -277,11 +277,6 @@ func (s *MethodTestSuite) TestAPIKey() {
|
||||
dbm.EXPECT().DeleteApplicationConnectAPIKeysByUserID(gomock.Any(), a.UserID).Return(nil).AnyTimes()
|
||||
check.Args(a.UserID).Asserts(rbac.ResourceApiKey.WithOwner(a.UserID.String()), policy.ActionDelete).Returns()
|
||||
}))
|
||||
s.Run("DeleteBoundaryUsageStatsByReplicaID", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
replicaID := uuid.New()
|
||||
dbm.EXPECT().DeleteBoundaryUsageStatsByReplicaID(gomock.Any(), replicaID).Return(nil).AnyTimes()
|
||||
check.Args(replicaID).Asserts(rbac.ResourceBoundaryUsage, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteExternalAuthLink", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
a := testutil.Fake(s.T(), faker, database.ExternalAuthLink{})
|
||||
dbm.EXPECT().GetExternalAuthLink(gomock.Any(), database.GetExternalAuthLinkParams{ProviderID: a.ProviderID, UserID: a.UserID}).Return(a, nil).AnyTimes()
|
||||
@@ -532,9 +527,9 @@ func (s *MethodTestSuite) TestGroup() {
|
||||
dbm.EXPECT().RemoveUserFromGroups(gomock.Any(), arg).Return(slice.New(g1.ID, g2.ID), nil).AnyTimes()
|
||||
check.Args(arg).Asserts(rbac.ResourceSystem, policy.ActionUpdate).Returns(slice.New(g1.ID, g2.ID))
|
||||
}))
|
||||
s.Run("ResetBoundaryUsageStats", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().ResetBoundaryUsageStats(gomock.Any()).Return(nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceBoundaryUsage, policy.ActionDelete)
|
||||
s.Run("GetAndResetBoundaryUsageSummary", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetAndResetBoundaryUsageSummary(gomock.Any(), int64(1000)).Return(database.GetAndResetBoundaryUsageSummaryRow{}, nil).AnyTimes()
|
||||
check.Args(int64(1000)).Asserts(rbac.ResourceBoundaryUsage, policy.ActionDelete)
|
||||
}))
|
||||
|
||||
s.Run("UpdateGroupByID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
@@ -2041,6 +2036,18 @@ func (s *MethodTestSuite) TestWorkspace() {
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), build.WorkspaceID).Return(ws, nil).AnyTimes()
|
||||
check.Args(res.ID).Asserts(ws, policy.ActionRead).Returns(res)
|
||||
}))
|
||||
s.Run("GetWorkspaceBuildMetricsByResourceID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
ws := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
build := testutil.Fake(s.T(), faker, database.WorkspaceBuild{WorkspaceID: ws.ID})
|
||||
job := testutil.Fake(s.T(), faker, database.ProvisionerJob{ID: build.JobID, Type: database.ProvisionerJobTypeWorkspaceBuild})
|
||||
res := testutil.Fake(s.T(), faker, database.WorkspaceResource{JobID: build.JobID})
|
||||
dbm.EXPECT().GetWorkspaceResourceByID(gomock.Any(), res.ID).Return(res, nil).AnyTimes()
|
||||
dbm.EXPECT().GetProvisionerJobByID(gomock.Any(), res.JobID).Return(job, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceBuildByJobID(gomock.Any(), res.JobID).Return(build, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceByID(gomock.Any(), build.WorkspaceID).Return(ws, nil).AnyTimes()
|
||||
dbm.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), res.ID).Return(database.GetWorkspaceBuildMetricsByResourceIDRow{}, nil).AnyTimes()
|
||||
check.Args(res.ID).Asserts(ws, policy.ActionRead).Returns(database.GetWorkspaceBuildMetricsByResourceIDRow{})
|
||||
}))
|
||||
s.Run("Build/GetWorkspaceResourcesByJobID", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
ws := testutil.Fake(s.T(), faker, database.Workspace{})
|
||||
build := testutil.Fake(s.T(), faker, database.WorkspaceBuild{WorkspaceID: ws.ID})
|
||||
@@ -2517,8 +2524,8 @@ func (s *MethodTestSuite) TestTasks() {
|
||||
DeletedAt: dbtime.Now(),
|
||||
}
|
||||
dbm.EXPECT().GetTaskByID(gomock.Any(), task.ID).Return(task, nil).AnyTimes()
|
||||
dbm.EXPECT().DeleteTask(gomock.Any(), arg).Return(database.TaskTable{}, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(task, policy.ActionDelete).Returns(database.TaskTable{})
|
||||
dbm.EXPECT().DeleteTask(gomock.Any(), arg).Return(task.ID, nil).AnyTimes()
|
||||
check.Args(arg).Asserts(task, policy.ActionDelete).Returns(task.ID)
|
||||
}))
|
||||
s.Run("InsertTask", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
tpl := testutil.Fake(s.T(), faker, database.Template{})
|
||||
@@ -2991,10 +2998,6 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
dbm.EXPECT().GetAuthorizationUserRoles(gomock.Any(), u.ID).Return(database.GetAuthorizationUserRolesRow{}, nil).AnyTimes()
|
||||
check.Args(u.ID).Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetBoundaryUsageSummary", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetBoundaryUsageSummary(gomock.Any(), int64(1000)).Return(database.GetBoundaryUsageSummaryRow{}, nil).AnyTimes()
|
||||
check.Args(int64(1000)).Asserts(rbac.ResourceBoundaryUsage, policy.ActionRead)
|
||||
}))
|
||||
s.Run("GetDERPMeshKey", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().GetDERPMeshKey(gomock.Any()).Return("testing", nil).AnyTimes()
|
||||
check.Args().Asserts(rbac.ResourceSystem, policy.ActionRead)
|
||||
|
||||
@@ -58,6 +58,61 @@ type WorkspaceBuildBuilder struct {
|
||||
jobStatus database.ProvisionerJobStatus
|
||||
taskAppID uuid.UUID
|
||||
taskSeed database.TaskTable
|
||||
|
||||
// Individual timestamp fields for job customization.
|
||||
jobCreatedAt time.Time
|
||||
jobStartedAt time.Time
|
||||
jobUpdatedAt time.Time
|
||||
jobCompletedAt time.Time
|
||||
|
||||
jobError string // Error message for failed jobs
|
||||
jobErrorCode string // Error code for failed jobs
|
||||
}
|
||||
|
||||
// BuilderOption is a functional option for customizing job timestamps
|
||||
// on status methods.
|
||||
type BuilderOption func(*WorkspaceBuildBuilder)
|
||||
|
||||
// WithJobCreatedAt sets the CreatedAt timestamp for the provisioner job.
|
||||
func WithJobCreatedAt(t time.Time) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobCreatedAt = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobStartedAt sets the StartedAt timestamp for the provisioner job.
|
||||
func WithJobStartedAt(t time.Time) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobStartedAt = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobUpdatedAt sets the UpdatedAt timestamp for the provisioner job.
|
||||
func WithJobUpdatedAt(t time.Time) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobUpdatedAt = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobCompletedAt sets the CompletedAt timestamp for the provisioner job.
|
||||
func WithJobCompletedAt(t time.Time) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobCompletedAt = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobError sets the error message for the provisioner job.
|
||||
func WithJobError(msg string) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobError = msg
|
||||
}
|
||||
}
|
||||
|
||||
// WithJobErrorCode sets the error code for the provisioner job.
|
||||
func WithJobErrorCode(code string) BuilderOption {
|
||||
return func(b *WorkspaceBuildBuilder) {
|
||||
b.jobErrorCode = code
|
||||
}
|
||||
}
|
||||
|
||||
// WorkspaceBuild generates a workspace build for the provided workspace.
|
||||
@@ -141,18 +196,59 @@ func (b WorkspaceBuildBuilder) WithTask(taskSeed database.TaskTable, appSeed *sd
|
||||
})
|
||||
}
|
||||
|
||||
func (b WorkspaceBuildBuilder) Starting() WorkspaceBuildBuilder {
|
||||
// Starting sets the job to running status.
|
||||
func (b WorkspaceBuildBuilder) Starting(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusRunning
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b WorkspaceBuildBuilder) Pending() WorkspaceBuildBuilder {
|
||||
// Pending sets the job to pending status.
|
||||
func (b WorkspaceBuildBuilder) Pending(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusPending
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b WorkspaceBuildBuilder) Canceled() WorkspaceBuildBuilder {
|
||||
// Canceled sets the job to canceled status.
|
||||
func (b WorkspaceBuildBuilder) Canceled(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusCanceled
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Succeeded sets the job to succeeded status.
|
||||
// This is the default status.
|
||||
func (b WorkspaceBuildBuilder) Succeeded(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusSucceeded
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Failed sets the provisioner job to a failed state. Use WithJobError and
|
||||
// WithJobErrorCode options to set the error message and code. If no error
|
||||
// message is provided, "failed" is used as the default.
|
||||
func (b WorkspaceBuildBuilder) Failed(opts ...BuilderOption) WorkspaceBuildBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.jobStatus = database.ProvisionerJobStatusFailed
|
||||
for _, opt := range opts {
|
||||
opt(&b)
|
||||
}
|
||||
if b.jobError == "" {
|
||||
b.jobError = "failed"
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -267,8 +363,8 @@ func (b WorkspaceBuildBuilder) doInTX() WorkspaceResponse {
|
||||
|
||||
job, err := b.db.InsertProvisionerJob(ownerCtx, database.InsertProvisionerJobParams{
|
||||
ID: jobID,
|
||||
CreatedAt: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
CreatedAt: takeFirstTime(b.jobCreatedAt, b.ws.CreatedAt, dbtime.Now()),
|
||||
UpdatedAt: takeFirstTime(b.jobCreatedAt, b.ws.CreatedAt, dbtime.Now()),
|
||||
OrganizationID: b.ws.OrganizationID,
|
||||
InitiatorID: b.ws.OwnerID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
@@ -291,11 +387,12 @@ func (b WorkspaceBuildBuilder) doInTX() WorkspaceResponse {
|
||||
// might need to do this multiple times if we got a template version
|
||||
// import job as well
|
||||
b.logger.Debug(context.Background(), "looping to acquire provisioner job")
|
||||
startedAt := takeFirstTime(b.jobStartedAt, dbtime.Now())
|
||||
for {
|
||||
j, err := b.db.AcquireProvisionerJob(ownerCtx, database.AcquireProvisionerJobParams{
|
||||
OrganizationID: job.OrganizationID,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: dbtime.Now(),
|
||||
Time: startedAt,
|
||||
Valid: true,
|
||||
},
|
||||
WorkerID: uuid.NullUUID{
|
||||
@@ -311,32 +408,54 @@ func (b WorkspaceBuildBuilder) doInTX() WorkspaceResponse {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !b.jobUpdatedAt.IsZero() {
|
||||
err = b.db.UpdateProvisionerJobByID(ownerCtx, database.UpdateProvisionerJobByIDParams{
|
||||
ID: job.ID,
|
||||
UpdatedAt: b.jobUpdatedAt,
|
||||
})
|
||||
require.NoError(b.t, err, "update job updated_at")
|
||||
}
|
||||
case database.ProvisionerJobStatusCanceled:
|
||||
// Set provisioner job status to 'canceled'
|
||||
b.logger.Debug(context.Background(), "canceling the provisioner job")
|
||||
now := dbtime.Now()
|
||||
completedAt := takeFirstTime(b.jobCompletedAt, dbtime.Now())
|
||||
err = b.db.UpdateProvisionerJobWithCancelByID(ownerCtx, database.UpdateProvisionerJobWithCancelByIDParams{
|
||||
ID: jobID,
|
||||
CanceledAt: sql.NullTime{
|
||||
Time: now,
|
||||
Time: completedAt,
|
||||
Valid: true,
|
||||
},
|
||||
CompletedAt: sql.NullTime{
|
||||
Time: now,
|
||||
Time: completedAt,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
require.NoError(b.t, err, "cancel job")
|
||||
case database.ProvisionerJobStatusFailed:
|
||||
b.logger.Debug(context.Background(), "failing the provisioner job")
|
||||
completedAt := takeFirstTime(b.jobCompletedAt, dbtime.Now())
|
||||
err = b.db.UpdateProvisionerJobWithCompleteByID(ownerCtx, database.UpdateProvisionerJobWithCompleteByIDParams{
|
||||
ID: job.ID,
|
||||
UpdatedAt: completedAt,
|
||||
Error: sql.NullString{String: b.jobError, Valid: b.jobError != ""},
|
||||
ErrorCode: sql.NullString{String: b.jobErrorCode, Valid: b.jobErrorCode != ""},
|
||||
CompletedAt: sql.NullTime{
|
||||
Time: completedAt,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
require.NoError(b.t, err, "fail job")
|
||||
default:
|
||||
// By default, consider jobs in 'succeeded' status
|
||||
b.logger.Debug(context.Background(), "completing the provisioner job")
|
||||
completedAt := takeFirstTime(b.jobCompletedAt, dbtime.Now())
|
||||
err = b.db.UpdateProvisionerJobWithCompleteByID(ownerCtx, database.UpdateProvisionerJobWithCompleteByIDParams{
|
||||
ID: job.ID,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
UpdatedAt: completedAt,
|
||||
Error: sql.NullString{},
|
||||
ErrorCode: sql.NullString{},
|
||||
CompletedAt: sql.NullTime{
|
||||
Time: dbtime.Now(),
|
||||
Time: completedAt,
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
@@ -751,6 +870,16 @@ func takeFirst[Value comparable](values ...Value) Value {
|
||||
})
|
||||
}
|
||||
|
||||
// takeFirstTime returns the first non-zero time.Time.
|
||||
func takeFirstTime(values ...time.Time) time.Time {
|
||||
for _, v := range values {
|
||||
if !v.IsZero() {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// mustWorkspaceAppByWorkspaceAndBuildAndAppID finds a workspace app by
|
||||
// workspace ID, build number, and app ID. It returns the workspace app
|
||||
// if found, otherwise fails the test.
|
||||
|
||||
@@ -335,14 +335,6 @@ func (m queryMetricsStore) DeleteApplicationConnectAPIKeysByUserID(ctx context.C
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID)
|
||||
m.queryLatencies.WithLabelValues("DeleteBoundaryUsageStatsByReplicaID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "DeleteBoundaryUsageStatsByReplicaID").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteCryptoKey(ctx context.Context, arg database.DeleteCryptoKeyParams) (database.CryptoKey, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteCryptoKey(ctx, arg)
|
||||
@@ -575,7 +567,7 @@ func (m queryMetricsStore) DeleteTailnetTunnel(ctx context.Context, arg database
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (database.TaskTable, error) {
|
||||
func (m queryMetricsStore) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (uuid.UUID, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteTask(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteTask").Observe(time.Since(start).Seconds())
|
||||
@@ -854,6 +846,14 @@ func (m queryMetricsStore) GetAllTailnetTunnels(ctx context.Context) ([]database
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAndResetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetAndResetBoundaryUsageSummaryRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAndResetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
m.queryLatencies.WithLabelValues("GetAndResetBoundaryUsageSummary").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetAndResetBoundaryUsageSummary").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetAnnouncementBanners(ctx)
|
||||
@@ -902,14 +902,6 @@ func (m queryMetricsStore) GetAuthorizationUserRoles(ctx context.Context, userID
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetBoundaryUsageSummary(ctx, maxStalenessMs)
|
||||
m.queryLatencies.WithLabelValues("GetBoundaryUsageSummary").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetBoundaryUsageSummary").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetConnectionLogsOffset(ctx, arg)
|
||||
@@ -2414,6 +2406,14 @@ func (m queryMetricsStore) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx cont
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetWorkspaceBuildMetricsByResourceID(ctx context.Context, id uuid.UUID) (database.GetWorkspaceBuildMetricsByResourceIDRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetWorkspaceBuildMetricsByResourceID(ctx, id)
|
||||
m.queryLatencies.WithLabelValues("GetWorkspaceBuildMetricsByResourceID").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "GetWorkspaceBuildMetricsByResourceID").Inc()
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]database.WorkspaceBuildParameter, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetWorkspaceBuildParameters(ctx, workspaceBuildID)
|
||||
@@ -3334,14 +3334,6 @@ func (m queryMetricsStore) RemoveUserFromGroups(ctx context.Context, arg databas
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.ResetBoundaryUsageStats(ctx)
|
||||
m.queryLatencies.WithLabelValues("ResetBoundaryUsageStats").Observe(time.Since(start).Seconds())
|
||||
m.queryCounts.WithLabelValues(httpmw.ExtractHTTPRoute(ctx), httpmw.ExtractHTTPMethod(ctx), "ResetBoundaryUsageStats").Inc()
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.RevokeDBCryptKey(ctx, activeKeyDigest)
|
||||
|
||||
@@ -511,20 +511,6 @@ func (mr *MockStoreMockRecorder) DeleteApplicationConnectAPIKeysByUserID(ctx, us
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteApplicationConnectAPIKeysByUserID", reflect.TypeOf((*MockStore)(nil).DeleteApplicationConnectAPIKeysByUserID), ctx, userID)
|
||||
}
|
||||
|
||||
// DeleteBoundaryUsageStatsByReplicaID mocks base method.
|
||||
func (m *MockStore) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteBoundaryUsageStatsByReplicaID", ctx, replicaID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteBoundaryUsageStatsByReplicaID indicates an expected call of DeleteBoundaryUsageStatsByReplicaID.
|
||||
func (mr *MockStoreMockRecorder) DeleteBoundaryUsageStatsByReplicaID(ctx, replicaID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBoundaryUsageStatsByReplicaID", reflect.TypeOf((*MockStore)(nil).DeleteBoundaryUsageStatsByReplicaID), ctx, replicaID)
|
||||
}
|
||||
|
||||
// DeleteCryptoKey mocks base method.
|
||||
func (m *MockStore) DeleteCryptoKey(ctx context.Context, arg database.DeleteCryptoKeyParams) (database.CryptoKey, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -941,10 +927,10 @@ func (mr *MockStoreMockRecorder) DeleteTailnetTunnel(ctx, arg any) *gomock.Call
|
||||
}
|
||||
|
||||
// DeleteTask mocks base method.
|
||||
func (m *MockStore) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (database.TaskTable, error) {
|
||||
func (m *MockStore) DeleteTask(ctx context.Context, arg database.DeleteTaskParams) (uuid.UUID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteTask", ctx, arg)
|
||||
ret0, _ := ret[0].(database.TaskTable)
|
||||
ret0, _ := ret[0].(uuid.UUID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@@ -1453,6 +1439,21 @@ func (mr *MockStoreMockRecorder) GetAllTailnetTunnels(ctx any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllTailnetTunnels", reflect.TypeOf((*MockStore)(nil).GetAllTailnetTunnels), ctx)
|
||||
}
|
||||
|
||||
// GetAndResetBoundaryUsageSummary mocks base method.
|
||||
func (m *MockStore) GetAndResetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetAndResetBoundaryUsageSummaryRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAndResetBoundaryUsageSummary", ctx, maxStalenessMs)
|
||||
ret0, _ := ret[0].(database.GetAndResetBoundaryUsageSummaryRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetAndResetBoundaryUsageSummary indicates an expected call of GetAndResetBoundaryUsageSummary.
|
||||
func (mr *MockStoreMockRecorder) GetAndResetBoundaryUsageSummary(ctx, maxStalenessMs any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAndResetBoundaryUsageSummary", reflect.TypeOf((*MockStore)(nil).GetAndResetBoundaryUsageSummary), ctx, maxStalenessMs)
|
||||
}
|
||||
|
||||
// GetAnnouncementBanners mocks base method.
|
||||
func (m *MockStore) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1648,21 +1649,6 @@ func (mr *MockStoreMockRecorder) GetAuthorizedWorkspacesAndAgentsByOwnerID(ctx,
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizedWorkspacesAndAgentsByOwnerID", reflect.TypeOf((*MockStore)(nil).GetAuthorizedWorkspacesAndAgentsByOwnerID), ctx, ownerID, prepared)
|
||||
}
|
||||
|
||||
// GetBoundaryUsageSummary mocks base method.
|
||||
func (m *MockStore) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (database.GetBoundaryUsageSummaryRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetBoundaryUsageSummary", ctx, maxStalenessMs)
|
||||
ret0, _ := ret[0].(database.GetBoundaryUsageSummaryRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetBoundaryUsageSummary indicates an expected call of GetBoundaryUsageSummary.
|
||||
func (mr *MockStoreMockRecorder) GetBoundaryUsageSummary(ctx, maxStalenessMs any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBoundaryUsageSummary", reflect.TypeOf((*MockStore)(nil).GetBoundaryUsageSummary), ctx, maxStalenessMs)
|
||||
}
|
||||
|
||||
// GetConnectionLogsOffset mocks base method.
|
||||
func (m *MockStore) GetConnectionLogsOffset(ctx context.Context, arg database.GetConnectionLogsOffsetParams) ([]database.GetConnectionLogsOffsetRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -4513,6 +4499,21 @@ func (mr *MockStoreMockRecorder) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ct
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkspaceBuildByWorkspaceIDAndBuildNumber", reflect.TypeOf((*MockStore)(nil).GetWorkspaceBuildByWorkspaceIDAndBuildNumber), ctx, arg)
|
||||
}
|
||||
|
||||
// GetWorkspaceBuildMetricsByResourceID mocks base method.
|
||||
func (m *MockStore) GetWorkspaceBuildMetricsByResourceID(ctx context.Context, id uuid.UUID) (database.GetWorkspaceBuildMetricsByResourceIDRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetWorkspaceBuildMetricsByResourceID", ctx, id)
|
||||
ret0, _ := ret[0].(database.GetWorkspaceBuildMetricsByResourceIDRow)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetWorkspaceBuildMetricsByResourceID indicates an expected call of GetWorkspaceBuildMetricsByResourceID.
|
||||
func (mr *MockStoreMockRecorder) GetWorkspaceBuildMetricsByResourceID(ctx, id any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkspaceBuildMetricsByResourceID", reflect.TypeOf((*MockStore)(nil).GetWorkspaceBuildMetricsByResourceID), ctx, id)
|
||||
}
|
||||
|
||||
// GetWorkspaceBuildParameters mocks base method.
|
||||
func (m *MockStore) GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]database.WorkspaceBuildParameter, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -6278,20 +6279,6 @@ func (mr *MockStoreMockRecorder) RemoveUserFromGroups(ctx, arg any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveUserFromGroups", reflect.TypeOf((*MockStore)(nil).RemoveUserFromGroups), ctx, arg)
|
||||
}
|
||||
|
||||
// ResetBoundaryUsageStats mocks base method.
|
||||
func (m *MockStore) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ResetBoundaryUsageStats", ctx)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ResetBoundaryUsageStats indicates an expected call of ResetBoundaryUsageStats.
|
||||
func (mr *MockStoreMockRecorder) ResetBoundaryUsageStats(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetBoundaryUsageStats", reflect.TypeOf((*MockStore)(nil).ResetBoundaryUsageStats), ctx)
|
||||
}
|
||||
|
||||
// RevokeDBCryptKey mocks base method.
|
||||
func (m *MockStore) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
Generated
+3
-1
@@ -2290,7 +2290,8 @@ CREATE TABLE templates (
|
||||
activity_bump bigint DEFAULT '3600000000000'::bigint NOT NULL,
|
||||
max_port_sharing_level app_sharing_level DEFAULT 'owner'::app_sharing_level NOT NULL,
|
||||
use_classic_parameter_flow boolean DEFAULT false NOT NULL,
|
||||
cors_behavior cors_behavior DEFAULT 'simple'::cors_behavior NOT NULL
|
||||
cors_behavior cors_behavior DEFAULT 'simple'::cors_behavior NOT NULL,
|
||||
disable_module_cache boolean DEFAULT false NOT NULL
|
||||
);
|
||||
|
||||
COMMENT ON COLUMN templates.default_ttl IS 'The default duration for autostop for workspaces created from this template.';
|
||||
@@ -2344,6 +2345,7 @@ CREATE VIEW template_with_names AS
|
||||
templates.max_port_sharing_level,
|
||||
templates.use_classic_parameter_flow,
|
||||
templates.cors_behavior,
|
||||
templates.disable_module_cache,
|
||||
COALESCE(visible_users.avatar_url, ''::text) AS created_by_avatar_url,
|
||||
COALESCE(visible_users.username, ''::text) AS created_by_username,
|
||||
COALESCE(visible_users.name, ''::text) AS created_by_name,
|
||||
|
||||
@@ -14,6 +14,7 @@ const (
|
||||
LockIDCryptoKeyRotation
|
||||
LockIDReconcilePrebuilds
|
||||
LockIDReconcileSystemRoles
|
||||
LockIDBoundaryUsageStats
|
||||
)
|
||||
|
||||
// GenLockID generates a unique and consistent lock ID from a given string.
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
DROP VIEW template_with_names;
|
||||
ALTER TABLE templates DROP COLUMN disable_module_cache;
|
||||
|
||||
CREATE VIEW template_with_names AS
|
||||
SELECT templates.*,
|
||||
COALESCE(visible_users.avatar_url, ''::text) AS created_by_avatar_url,
|
||||
COALESCE(visible_users.username, ''::text) AS created_by_username,
|
||||
COALESCE(visible_users.name, ''::text) AS created_by_name,
|
||||
COALESCE(organizations.name, ''::text) AS organization_name,
|
||||
COALESCE(organizations.display_name, ''::text) AS organization_display_name,
|
||||
COALESCE(organizations.icon, ''::text) AS organization_icon
|
||||
FROM ((templates
|
||||
LEFT JOIN visible_users ON ((templates.created_by = visible_users.id)))
|
||||
LEFT JOIN organizations ON ((templates.organization_id = organizations.id)));
|
||||
|
||||
COMMENT ON VIEW template_with_names IS 'Joins in the display name information such as username, avatar, and organization name.';
|
||||
@@ -0,0 +1,16 @@
|
||||
DROP VIEW template_with_names;
|
||||
ALTER TABLE templates ADD COLUMN disable_module_cache BOOL NOT NULL DEFAULT false;
|
||||
|
||||
CREATE VIEW template_with_names AS
|
||||
SELECT templates.*,
|
||||
COALESCE(visible_users.avatar_url, ''::text) AS created_by_avatar_url,
|
||||
COALESCE(visible_users.username, ''::text) AS created_by_username,
|
||||
COALESCE(visible_users.name, ''::text) AS created_by_name,
|
||||
COALESCE(organizations.name, ''::text) AS organization_name,
|
||||
COALESCE(organizations.display_name, ''::text) AS organization_display_name,
|
||||
COALESCE(organizations.icon, ''::text) AS organization_icon
|
||||
FROM ((templates
|
||||
LEFT JOIN visible_users ON ((templates.created_by = visible_users.id)))
|
||||
LEFT JOIN organizations ON ((templates.organization_id = organizations.id)));
|
||||
|
||||
COMMENT ON VIEW template_with_names IS 'Joins in the display name information such as username, avatar, and organization name.';
|
||||
@@ -127,6 +127,7 @@ func (q *sqlQuerier) GetAuthorizedTemplates(ctx context.Context, arg GetTemplate
|
||||
&i.MaxPortSharingLevel,
|
||||
&i.UseClassicParameterFlow,
|
||||
&i.CorsBehavior,
|
||||
&i.DisableModuleCache,
|
||||
&i.CreatedByAvatarURL,
|
||||
&i.CreatedByUsername,
|
||||
&i.CreatedByName,
|
||||
|
||||
@@ -4338,6 +4338,7 @@ type Template struct {
|
||||
MaxPortSharingLevel AppSharingLevel `db:"max_port_sharing_level" json:"max_port_sharing_level"`
|
||||
UseClassicParameterFlow bool `db:"use_classic_parameter_flow" json:"use_classic_parameter_flow"`
|
||||
CorsBehavior CorsBehavior `db:"cors_behavior" json:"cors_behavior"`
|
||||
DisableModuleCache bool `db:"disable_module_cache" json:"disable_module_cache"`
|
||||
CreatedByAvatarURL string `db:"created_by_avatar_url" json:"created_by_avatar_url"`
|
||||
CreatedByUsername string `db:"created_by_username" json:"created_by_username"`
|
||||
CreatedByName string `db:"created_by_name" json:"created_by_name"`
|
||||
@@ -4387,6 +4388,7 @@ type TemplateTable struct {
|
||||
// Determines whether to default to the dynamic parameter creation flow for this template or continue using the legacy classic parameter creation flow.This is a template wide setting, the template admin can revert to the classic flow if there are any issues. An escape hatch is required, as workspace creation is a core workflow and cannot break. This column will be removed when the dynamic parameter creation flow is stable.
|
||||
UseClassicParameterFlow bool `db:"use_classic_parameter_flow" json:"use_classic_parameter_flow"`
|
||||
CorsBehavior CorsBehavior `db:"cors_behavior" json:"cors_behavior"`
|
||||
DisableModuleCache bool `db:"disable_module_cache" json:"disable_module_cache"`
|
||||
}
|
||||
|
||||
// Records aggregated usage statistics for templates/users. All usage is rounded up to the nearest minute.
|
||||
|
||||
@@ -88,8 +88,6 @@ type sqlcQuerier interface {
|
||||
// be recreated.
|
||||
DeleteAllWebpushSubscriptions(ctx context.Context) error
|
||||
DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error
|
||||
// Deletes boundary usage statistics for a specific replica.
|
||||
DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error
|
||||
DeleteCryptoKey(ctx context.Context, arg DeleteCryptoKeyParams) (CryptoKey, error)
|
||||
DeleteCustomRole(ctx context.Context, arg DeleteCustomRoleParams) error
|
||||
DeleteExpiredAPIKeys(ctx context.Context, arg DeleteExpiredAPIKeysParams) (int64, error)
|
||||
@@ -132,7 +130,7 @@ type sqlcQuerier interface {
|
||||
DeleteRuntimeConfig(ctx context.Context, key string) error
|
||||
DeleteTailnetPeer(ctx context.Context, arg DeleteTailnetPeerParams) (DeleteTailnetPeerRow, error)
|
||||
DeleteTailnetTunnel(ctx context.Context, arg DeleteTailnetTunnelParams) (DeleteTailnetTunnelRow, error)
|
||||
DeleteTask(ctx context.Context, arg DeleteTaskParams) (TaskTable, error)
|
||||
DeleteTask(ctx context.Context, arg DeleteTaskParams) (uuid.UUID, error)
|
||||
DeleteUserSecret(ctx context.Context, id uuid.UUID) error
|
||||
DeleteWebpushSubscriptionByUserIDAndEndpoint(ctx context.Context, arg DeleteWebpushSubscriptionByUserIDAndEndpointParams) error
|
||||
DeleteWebpushSubscriptions(ctx context.Context, ids []uuid.UUID) error
|
||||
@@ -181,6 +179,11 @@ type sqlcQuerier interface {
|
||||
GetAllTailnetCoordinators(ctx context.Context) ([]TailnetCoordinator, error)
|
||||
GetAllTailnetPeers(ctx context.Context) ([]TailnetPeer, error)
|
||||
GetAllTailnetTunnels(ctx context.Context) ([]TailnetTunnel, error)
|
||||
// Atomic read+delete prevents replicas that flush between a separate read and
|
||||
// reset from having their data deleted before the next snapshot. Uses a common
|
||||
// table expression with DELETE...RETURNING so the rows we sum are exactly the
|
||||
// rows we delete. Stale rows are excluded from the sum but still deleted.
|
||||
GetAndResetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (GetAndResetBoundaryUsageSummaryRow, error)
|
||||
GetAnnouncementBanners(ctx context.Context) (string, error)
|
||||
GetAppSecurityKey(ctx context.Context) (string, error)
|
||||
GetApplicationName(ctx context.Context) (string, error)
|
||||
@@ -196,10 +199,6 @@ type sqlcQuerier interface {
|
||||
// This function returns roles for authorization purposes. Implied member roles
|
||||
// are included.
|
||||
GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUID) (GetAuthorizationUserRolesRow, error)
|
||||
// Aggregates boundary usage statistics across all replicas. Filters to only
|
||||
// include data where window_start is within the given interval to exclude
|
||||
// stale data.
|
||||
GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (GetBoundaryUsageSummaryRow, error)
|
||||
GetConnectionLogsOffset(ctx context.Context, arg GetConnectionLogsOffsetParams) ([]GetConnectionLogsOffsetRow, error)
|
||||
GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error)
|
||||
GetCryptoKeyByFeatureAndSequence(ctx context.Context, arg GetCryptoKeyByFeatureAndSequenceParams) (CryptoKey, error)
|
||||
@@ -502,6 +501,9 @@ type sqlcQuerier interface {
|
||||
GetWorkspaceBuildByID(ctx context.Context, id uuid.UUID) (WorkspaceBuild, error)
|
||||
GetWorkspaceBuildByJobID(ctx context.Context, jobID uuid.UUID) (WorkspaceBuild, error)
|
||||
GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx context.Context, arg GetWorkspaceBuildByWorkspaceIDAndBuildNumberParams) (WorkspaceBuild, error)
|
||||
// Returns build metadata for e2e workspace build duration metrics.
|
||||
// Also checks if all agents are ready and returns the worst status.
|
||||
GetWorkspaceBuildMetricsByResourceID(ctx context.Context, id uuid.UUID) (GetWorkspaceBuildMetricsByResourceIDRow, error)
|
||||
GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]WorkspaceBuildParameter, error)
|
||||
GetWorkspaceBuildParametersByBuildIDs(ctx context.Context, workspaceBuildIds []uuid.UUID) ([]WorkspaceBuildParameter, error)
|
||||
GetWorkspaceBuildStatsByTemplates(ctx context.Context, since time.Time) ([]GetWorkspaceBuildStatsByTemplatesRow, error)
|
||||
@@ -652,9 +654,6 @@ type sqlcQuerier interface {
|
||||
RegisterWorkspaceProxy(ctx context.Context, arg RegisterWorkspaceProxyParams) (WorkspaceProxy, error)
|
||||
RemoveUserFromAllGroups(ctx context.Context, userID uuid.UUID) error
|
||||
RemoveUserFromGroups(ctx context.Context, arg RemoveUserFromGroupsParams) ([]uuid.UUID, error)
|
||||
// Deletes all boundary usage statistics. Called after telemetry reports the
|
||||
// aggregated stats. Each replica will insert a fresh row on its next flush.
|
||||
ResetBoundaryUsageStats(ctx context.Context) error
|
||||
RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error
|
||||
// Note that this selects from the CTE, not the original table. The CTE is named
|
||||
// the same as the original table to trick sqlc into reusing the existing struct
|
||||
|
||||
+112
-64
@@ -1980,39 +1980,41 @@ func (q *sqlQuerier) InsertAuditLog(ctx context.Context, arg InsertAuditLogParam
|
||||
return i, err
|
||||
}
|
||||
|
||||
const deleteBoundaryUsageStatsByReplicaID = `-- name: DeleteBoundaryUsageStatsByReplicaID :exec
|
||||
DELETE FROM boundary_usage_stats WHERE replica_id = $1
|
||||
`
|
||||
|
||||
// Deletes boundary usage statistics for a specific replica.
|
||||
func (q *sqlQuerier) DeleteBoundaryUsageStatsByReplicaID(ctx context.Context, replicaID uuid.UUID) error {
|
||||
_, err := q.db.ExecContext(ctx, deleteBoundaryUsageStatsByReplicaID, replicaID)
|
||||
return err
|
||||
}
|
||||
|
||||
const getBoundaryUsageSummary = `-- name: GetBoundaryUsageSummary :one
|
||||
const getAndResetBoundaryUsageSummary = `-- name: GetAndResetBoundaryUsageSummary :one
|
||||
WITH deleted AS (
|
||||
DELETE FROM boundary_usage_stats
|
||||
RETURNING replica_id, unique_workspaces_count, unique_users_count, allowed_requests, denied_requests, window_start, updated_at
|
||||
)
|
||||
SELECT
|
||||
COALESCE(SUM(unique_workspaces_count), 0)::bigint AS unique_workspaces,
|
||||
COALESCE(SUM(unique_users_count), 0)::bigint AS unique_users,
|
||||
COALESCE(SUM(allowed_requests), 0)::bigint AS allowed_requests,
|
||||
COALESCE(SUM(denied_requests), 0)::bigint AS denied_requests
|
||||
FROM boundary_usage_stats
|
||||
WHERE window_start >= NOW() - ($1::bigint || ' ms')::interval
|
||||
COALESCE(SUM(unique_workspaces_count) FILTER (
|
||||
WHERE window_start >= NOW() - ($1::bigint || ' ms')::interval
|
||||
), 0)::bigint AS unique_workspaces,
|
||||
COALESCE(SUM(unique_users_count) FILTER (
|
||||
WHERE window_start >= NOW() - ($1::bigint || ' ms')::interval
|
||||
), 0)::bigint AS unique_users,
|
||||
COALESCE(SUM(allowed_requests) FILTER (
|
||||
WHERE window_start >= NOW() - ($1::bigint || ' ms')::interval
|
||||
), 0)::bigint AS allowed_requests,
|
||||
COALESCE(SUM(denied_requests) FILTER (
|
||||
WHERE window_start >= NOW() - ($1::bigint || ' ms')::interval
|
||||
), 0)::bigint AS denied_requests
|
||||
FROM deleted
|
||||
`
|
||||
|
||||
type GetBoundaryUsageSummaryRow struct {
|
||||
type GetAndResetBoundaryUsageSummaryRow struct {
|
||||
UniqueWorkspaces int64 `db:"unique_workspaces" json:"unique_workspaces"`
|
||||
UniqueUsers int64 `db:"unique_users" json:"unique_users"`
|
||||
AllowedRequests int64 `db:"allowed_requests" json:"allowed_requests"`
|
||||
DeniedRequests int64 `db:"denied_requests" json:"denied_requests"`
|
||||
}
|
||||
|
||||
// Aggregates boundary usage statistics across all replicas. Filters to only
|
||||
// include data where window_start is within the given interval to exclude
|
||||
// stale data.
|
||||
func (q *sqlQuerier) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (GetBoundaryUsageSummaryRow, error) {
|
||||
row := q.db.QueryRowContext(ctx, getBoundaryUsageSummary, maxStalenessMs)
|
||||
var i GetBoundaryUsageSummaryRow
|
||||
// Atomic read+delete prevents replicas that flush between a separate read and
|
||||
// reset from having their data deleted before the next snapshot. Uses a common
|
||||
// table expression with DELETE...RETURNING so the rows we sum are exactly the
|
||||
// rows we delete. Stale rows are excluded from the sum but still deleted.
|
||||
func (q *sqlQuerier) GetAndResetBoundaryUsageSummary(ctx context.Context, maxStalenessMs int64) (GetAndResetBoundaryUsageSummaryRow, error) {
|
||||
row := q.db.QueryRowContext(ctx, getAndResetBoundaryUsageSummary, maxStalenessMs)
|
||||
var i GetAndResetBoundaryUsageSummaryRow
|
||||
err := row.Scan(
|
||||
&i.UniqueWorkspaces,
|
||||
&i.UniqueUsers,
|
||||
@@ -2022,17 +2024,6 @@ func (q *sqlQuerier) GetBoundaryUsageSummary(ctx context.Context, maxStalenessMs
|
||||
return i, err
|
||||
}
|
||||
|
||||
const resetBoundaryUsageStats = `-- name: ResetBoundaryUsageStats :exec
|
||||
DELETE FROM boundary_usage_stats
|
||||
`
|
||||
|
||||
// Deletes all boundary usage statistics. Called after telemetry reports the
|
||||
// aggregated stats. Each replica will insert a fresh row on its next flush.
|
||||
func (q *sqlQuerier) ResetBoundaryUsageStats(ctx context.Context) error {
|
||||
_, err := q.db.ExecContext(ctx, resetBoundaryUsageStats)
|
||||
return err
|
||||
}
|
||||
|
||||
const upsertBoundaryUsageStats = `-- name: UpsertBoundaryUsageStats :one
|
||||
INSERT INTO boundary_usage_stats (
|
||||
replica_id,
|
||||
@@ -13151,13 +13142,19 @@ func (q *sqlQuerier) UpsertTailnetTunnel(ctx context.Context, arg UpsertTailnetT
|
||||
}
|
||||
|
||||
const deleteTask = `-- name: DeleteTask :one
|
||||
UPDATE tasks
|
||||
SET
|
||||
deleted_at = $1::timestamptz
|
||||
WHERE
|
||||
id = $2::uuid
|
||||
AND deleted_at IS NULL
|
||||
RETURNING id, organization_id, owner_id, name, workspace_id, template_version_id, template_parameters, prompt, created_at, deleted_at, display_name
|
||||
WITH deleted_task AS (
|
||||
UPDATE tasks
|
||||
SET
|
||||
deleted_at = $1::timestamptz
|
||||
WHERE
|
||||
id = $2::uuid
|
||||
AND deleted_at IS NULL
|
||||
RETURNING id
|
||||
), deleted_snapshot AS (
|
||||
DELETE FROM task_snapshots
|
||||
WHERE task_id = $2::uuid
|
||||
)
|
||||
SELECT id FROM deleted_task
|
||||
`
|
||||
|
||||
type DeleteTaskParams struct {
|
||||
@@ -13165,23 +13162,11 @@ type DeleteTaskParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) DeleteTask(ctx context.Context, arg DeleteTaskParams) (TaskTable, error) {
|
||||
func (q *sqlQuerier) DeleteTask(ctx context.Context, arg DeleteTaskParams) (uuid.UUID, error) {
|
||||
row := q.db.QueryRowContext(ctx, deleteTask, arg.DeletedAt, arg.ID)
|
||||
var i TaskTable
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.OrganizationID,
|
||||
&i.OwnerID,
|
||||
&i.Name,
|
||||
&i.WorkspaceID,
|
||||
&i.TemplateVersionID,
|
||||
&i.TemplateParameters,
|
||||
&i.Prompt,
|
||||
&i.CreatedAt,
|
||||
&i.DeletedAt,
|
||||
&i.DisplayName,
|
||||
)
|
||||
return i, err
|
||||
var id uuid.UUID
|
||||
err := row.Scan(&id)
|
||||
return id, err
|
||||
}
|
||||
|
||||
const getTaskByID = `-- name: GetTaskByID :one
|
||||
@@ -13729,7 +13714,7 @@ func (q *sqlQuerier) GetTemplateAverageBuildTime(ctx context.Context, templateID
|
||||
|
||||
const getTemplateByID = `-- name: GetTemplateByID :one
|
||||
SELECT
|
||||
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, cors_behavior, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon
|
||||
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, cors_behavior, disable_module_cache, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon
|
||||
FROM
|
||||
template_with_names
|
||||
WHERE
|
||||
@@ -13772,6 +13757,7 @@ func (q *sqlQuerier) GetTemplateByID(ctx context.Context, id uuid.UUID) (Templat
|
||||
&i.MaxPortSharingLevel,
|
||||
&i.UseClassicParameterFlow,
|
||||
&i.CorsBehavior,
|
||||
&i.DisableModuleCache,
|
||||
&i.CreatedByAvatarURL,
|
||||
&i.CreatedByUsername,
|
||||
&i.CreatedByName,
|
||||
@@ -13784,7 +13770,7 @@ func (q *sqlQuerier) GetTemplateByID(ctx context.Context, id uuid.UUID) (Templat
|
||||
|
||||
const getTemplateByOrganizationAndName = `-- name: GetTemplateByOrganizationAndName :one
|
||||
SELECT
|
||||
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, cors_behavior, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon
|
||||
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, cors_behavior, disable_module_cache, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon
|
||||
FROM
|
||||
template_with_names AS templates
|
||||
WHERE
|
||||
@@ -13835,6 +13821,7 @@ func (q *sqlQuerier) GetTemplateByOrganizationAndName(ctx context.Context, arg G
|
||||
&i.MaxPortSharingLevel,
|
||||
&i.UseClassicParameterFlow,
|
||||
&i.CorsBehavior,
|
||||
&i.DisableModuleCache,
|
||||
&i.CreatedByAvatarURL,
|
||||
&i.CreatedByUsername,
|
||||
&i.CreatedByName,
|
||||
@@ -13846,7 +13833,7 @@ func (q *sqlQuerier) GetTemplateByOrganizationAndName(ctx context.Context, arg G
|
||||
}
|
||||
|
||||
const getTemplates = `-- name: GetTemplates :many
|
||||
SELECT id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, cors_behavior, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon FROM template_with_names AS templates
|
||||
SELECT id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, cors_behavior, disable_module_cache, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon FROM template_with_names AS templates
|
||||
ORDER BY (name, id) ASC
|
||||
`
|
||||
|
||||
@@ -13890,6 +13877,7 @@ func (q *sqlQuerier) GetTemplates(ctx context.Context) ([]Template, error) {
|
||||
&i.MaxPortSharingLevel,
|
||||
&i.UseClassicParameterFlow,
|
||||
&i.CorsBehavior,
|
||||
&i.DisableModuleCache,
|
||||
&i.CreatedByAvatarURL,
|
||||
&i.CreatedByUsername,
|
||||
&i.CreatedByName,
|
||||
@@ -13912,7 +13900,7 @@ func (q *sqlQuerier) GetTemplates(ctx context.Context) ([]Template, error) {
|
||||
|
||||
const getTemplatesWithFilter = `-- name: GetTemplatesWithFilter :many
|
||||
SELECT
|
||||
t.id, t.created_at, t.updated_at, t.organization_id, t.deleted, t.name, t.provisioner, t.active_version_id, t.description, t.default_ttl, t.created_by, t.icon, t.user_acl, t.group_acl, t.display_name, t.allow_user_cancel_workspace_jobs, t.allow_user_autostart, t.allow_user_autostop, t.failure_ttl, t.time_til_dormant, t.time_til_dormant_autodelete, t.autostop_requirement_days_of_week, t.autostop_requirement_weeks, t.autostart_block_days_of_week, t.require_active_version, t.deprecated, t.activity_bump, t.max_port_sharing_level, t.use_classic_parameter_flow, t.cors_behavior, t.created_by_avatar_url, t.created_by_username, t.created_by_name, t.organization_name, t.organization_display_name, t.organization_icon
|
||||
t.id, t.created_at, t.updated_at, t.organization_id, t.deleted, t.name, t.provisioner, t.active_version_id, t.description, t.default_ttl, t.created_by, t.icon, t.user_acl, t.group_acl, t.display_name, t.allow_user_cancel_workspace_jobs, t.allow_user_autostart, t.allow_user_autostop, t.failure_ttl, t.time_til_dormant, t.time_til_dormant_autodelete, t.autostop_requirement_days_of_week, t.autostop_requirement_weeks, t.autostart_block_days_of_week, t.require_active_version, t.deprecated, t.activity_bump, t.max_port_sharing_level, t.use_classic_parameter_flow, t.cors_behavior, t.disable_module_cache, t.created_by_avatar_url, t.created_by_username, t.created_by_name, t.organization_name, t.organization_display_name, t.organization_icon
|
||||
FROM
|
||||
template_with_names AS t
|
||||
LEFT JOIN
|
||||
@@ -14071,6 +14059,7 @@ func (q *sqlQuerier) GetTemplatesWithFilter(ctx context.Context, arg GetTemplate
|
||||
&i.MaxPortSharingLevel,
|
||||
&i.UseClassicParameterFlow,
|
||||
&i.CorsBehavior,
|
||||
&i.DisableModuleCache,
|
||||
&i.CreatedByAvatarURL,
|
||||
&i.CreatedByUsername,
|
||||
&i.CreatedByName,
|
||||
@@ -14256,7 +14245,8 @@ SET
|
||||
group_acl = $8,
|
||||
max_port_sharing_level = $9,
|
||||
use_classic_parameter_flow = $10,
|
||||
cors_behavior = $11
|
||||
cors_behavior = $11,
|
||||
disable_module_cache = $12
|
||||
WHERE
|
||||
id = $1
|
||||
`
|
||||
@@ -14273,6 +14263,7 @@ type UpdateTemplateMetaByIDParams struct {
|
||||
MaxPortSharingLevel AppSharingLevel `db:"max_port_sharing_level" json:"max_port_sharing_level"`
|
||||
UseClassicParameterFlow bool `db:"use_classic_parameter_flow" json:"use_classic_parameter_flow"`
|
||||
CorsBehavior CorsBehavior `db:"cors_behavior" json:"cors_behavior"`
|
||||
DisableModuleCache bool `db:"disable_module_cache" json:"disable_module_cache"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpdateTemplateMetaByID(ctx context.Context, arg UpdateTemplateMetaByIDParams) error {
|
||||
@@ -14288,6 +14279,7 @@ func (q *sqlQuerier) UpdateTemplateMetaByID(ctx context.Context, arg UpdateTempl
|
||||
arg.MaxPortSharingLevel,
|
||||
arg.UseClassicParameterFlow,
|
||||
arg.CorsBehavior,
|
||||
arg.DisableModuleCache,
|
||||
)
|
||||
return err
|
||||
}
|
||||
@@ -21352,6 +21344,62 @@ func (q *sqlQuerier) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx context.Co
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getWorkspaceBuildMetricsByResourceID = `-- name: GetWorkspaceBuildMetricsByResourceID :one
|
||||
SELECT
|
||||
wb.created_at,
|
||||
wb.transition,
|
||||
t.name AS template_name,
|
||||
o.name AS organization_name,
|
||||
(w.owner_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0') AS is_prebuild,
|
||||
-- All agents must have ready_at set (terminal startup state)
|
||||
COUNT(*) FILTER (WHERE wa.ready_at IS NULL) = 0 AS all_agents_ready,
|
||||
-- Latest ready_at across all agents (for duration calculation)
|
||||
MAX(wa.ready_at)::timestamptz AS last_agent_ready_at,
|
||||
-- Worst status: error > timeout > ready
|
||||
CASE
|
||||
WHEN bool_or(wa.lifecycle_state = 'start_error') THEN 'error'
|
||||
WHEN bool_or(wa.lifecycle_state = 'start_timeout') THEN 'timeout'
|
||||
ELSE 'success'
|
||||
END AS worst_status
|
||||
FROM workspace_builds wb
|
||||
JOIN workspaces w ON wb.workspace_id = w.id
|
||||
JOIN templates t ON w.template_id = t.id
|
||||
JOIN organizations o ON t.organization_id = o.id
|
||||
JOIN workspace_resources wr ON wr.job_id = wb.job_id
|
||||
JOIN workspace_agents wa ON wa.resource_id = wr.id
|
||||
WHERE wb.job_id = (SELECT job_id FROM workspace_resources WHERE workspace_resources.id = $1)
|
||||
GROUP BY wb.created_at, wb.transition, t.name, o.name, w.owner_id
|
||||
`
|
||||
|
||||
type GetWorkspaceBuildMetricsByResourceIDRow struct {
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
Transition WorkspaceTransition `db:"transition" json:"transition"`
|
||||
TemplateName string `db:"template_name" json:"template_name"`
|
||||
OrganizationName string `db:"organization_name" json:"organization_name"`
|
||||
IsPrebuild bool `db:"is_prebuild" json:"is_prebuild"`
|
||||
AllAgentsReady bool `db:"all_agents_ready" json:"all_agents_ready"`
|
||||
LastAgentReadyAt time.Time `db:"last_agent_ready_at" json:"last_agent_ready_at"`
|
||||
WorstStatus string `db:"worst_status" json:"worst_status"`
|
||||
}
|
||||
|
||||
// Returns build metadata for e2e workspace build duration metrics.
|
||||
// Also checks if all agents are ready and returns the worst status.
|
||||
func (q *sqlQuerier) GetWorkspaceBuildMetricsByResourceID(ctx context.Context, id uuid.UUID) (GetWorkspaceBuildMetricsByResourceIDRow, error) {
|
||||
row := q.db.QueryRowContext(ctx, getWorkspaceBuildMetricsByResourceID, id)
|
||||
var i GetWorkspaceBuildMetricsByResourceIDRow
|
||||
err := row.Scan(
|
||||
&i.CreatedAt,
|
||||
&i.Transition,
|
||||
&i.TemplateName,
|
||||
&i.OrganizationName,
|
||||
&i.IsPrebuild,
|
||||
&i.AllAgentsReady,
|
||||
&i.LastAgentReadyAt,
|
||||
&i.WorstStatus,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getWorkspaceBuildStatsByTemplates = `-- name: GetWorkspaceBuildStatsByTemplates :many
|
||||
SELECT
|
||||
w.template_id,
|
||||
@@ -22843,7 +22891,7 @@ LEFT JOIN LATERAL (
|
||||
) latest_build ON TRUE
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT
|
||||
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, cors_behavior
|
||||
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, cors_behavior, disable_module_cache
|
||||
FROM
|
||||
templates
|
||||
WHERE
|
||||
|
||||
@@ -27,23 +27,26 @@ INSERT INTO boundary_usage_stats (
|
||||
updated_at = NOW()
|
||||
RETURNING (xmax = 0) AS new_period;
|
||||
|
||||
-- name: GetBoundaryUsageSummary :one
|
||||
-- Aggregates boundary usage statistics across all replicas. Filters to only
|
||||
-- include data where window_start is within the given interval to exclude
|
||||
-- stale data.
|
||||
-- name: GetAndResetBoundaryUsageSummary :one
|
||||
-- Atomic read+delete prevents replicas that flush between a separate read and
|
||||
-- reset from having their data deleted before the next snapshot. Uses a common
|
||||
-- table expression with DELETE...RETURNING so the rows we sum are exactly the
|
||||
-- rows we delete. Stale rows are excluded from the sum but still deleted.
|
||||
WITH deleted AS (
|
||||
DELETE FROM boundary_usage_stats
|
||||
RETURNING *
|
||||
)
|
||||
SELECT
|
||||
COALESCE(SUM(unique_workspaces_count), 0)::bigint AS unique_workspaces,
|
||||
COALESCE(SUM(unique_users_count), 0)::bigint AS unique_users,
|
||||
COALESCE(SUM(allowed_requests), 0)::bigint AS allowed_requests,
|
||||
COALESCE(SUM(denied_requests), 0)::bigint AS denied_requests
|
||||
FROM boundary_usage_stats
|
||||
WHERE window_start >= NOW() - (@max_staleness_ms::bigint || ' ms')::interval;
|
||||
|
||||
-- name: ResetBoundaryUsageStats :exec
|
||||
-- Deletes all boundary usage statistics. Called after telemetry reports the
|
||||
-- aggregated stats. Each replica will insert a fresh row on its next flush.
|
||||
DELETE FROM boundary_usage_stats;
|
||||
|
||||
-- name: DeleteBoundaryUsageStatsByReplicaID :exec
|
||||
-- Deletes boundary usage statistics for a specific replica.
|
||||
DELETE FROM boundary_usage_stats WHERE replica_id = @replica_id;
|
||||
COALESCE(SUM(unique_workspaces_count) FILTER (
|
||||
WHERE window_start >= NOW() - (@max_staleness_ms::bigint || ' ms')::interval
|
||||
), 0)::bigint AS unique_workspaces,
|
||||
COALESCE(SUM(unique_users_count) FILTER (
|
||||
WHERE window_start >= NOW() - (@max_staleness_ms::bigint || ' ms')::interval
|
||||
), 0)::bigint AS unique_users,
|
||||
COALESCE(SUM(allowed_requests) FILTER (
|
||||
WHERE window_start >= NOW() - (@max_staleness_ms::bigint || ' ms')::interval
|
||||
), 0)::bigint AS allowed_requests,
|
||||
COALESCE(SUM(denied_requests) FILTER (
|
||||
WHERE window_start >= NOW() - (@max_staleness_ms::bigint || ' ms')::interval
|
||||
), 0)::bigint AS denied_requests
|
||||
FROM deleted;
|
||||
|
||||
@@ -57,13 +57,19 @@ AND CASE WHEN @status::text != '' THEN tws.status = @status::task_status ELSE TR
|
||||
ORDER BY tws.created_at DESC;
|
||||
|
||||
-- name: DeleteTask :one
|
||||
UPDATE tasks
|
||||
SET
|
||||
deleted_at = @deleted_at::timestamptz
|
||||
WHERE
|
||||
id = @id::uuid
|
||||
AND deleted_at IS NULL
|
||||
RETURNING *;
|
||||
WITH deleted_task AS (
|
||||
UPDATE tasks
|
||||
SET
|
||||
deleted_at = @deleted_at::timestamptz
|
||||
WHERE
|
||||
id = @id::uuid
|
||||
AND deleted_at IS NULL
|
||||
RETURNING id
|
||||
), deleted_snapshot AS (
|
||||
DELETE FROM task_snapshots
|
||||
WHERE task_id = @id::uuid
|
||||
)
|
||||
SELECT id FROM deleted_task;
|
||||
|
||||
|
||||
-- name: UpdateTaskPrompt :one
|
||||
|
||||
@@ -173,7 +173,8 @@ SET
|
||||
group_acl = $8,
|
||||
max_port_sharing_level = $9,
|
||||
use_classic_parameter_flow = $10,
|
||||
cors_behavior = $11
|
||||
cors_behavior = $11,
|
||||
disable_module_cache = $12
|
||||
WHERE
|
||||
id = $1
|
||||
;
|
||||
|
||||
@@ -243,3 +243,31 @@ SET
|
||||
has_external_agent = @has_external_agent,
|
||||
updated_at = @updated_at::timestamptz
|
||||
WHERE id = @id::uuid;
|
||||
|
||||
-- name: GetWorkspaceBuildMetricsByResourceID :one
|
||||
-- Returns build metadata for e2e workspace build duration metrics.
|
||||
-- Also checks if all agents are ready and returns the worst status.
|
||||
SELECT
|
||||
wb.created_at,
|
||||
wb.transition,
|
||||
t.name AS template_name,
|
||||
o.name AS organization_name,
|
||||
(w.owner_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0') AS is_prebuild,
|
||||
-- All agents must have ready_at set (terminal startup state)
|
||||
COUNT(*) FILTER (WHERE wa.ready_at IS NULL) = 0 AS all_agents_ready,
|
||||
-- Latest ready_at across all agents (for duration calculation)
|
||||
MAX(wa.ready_at)::timestamptz AS last_agent_ready_at,
|
||||
-- Worst status: error > timeout > ready
|
||||
CASE
|
||||
WHEN bool_or(wa.lifecycle_state = 'start_error') THEN 'error'
|
||||
WHEN bool_or(wa.lifecycle_state = 'start_timeout') THEN 'timeout'
|
||||
ELSE 'success'
|
||||
END AS worst_status
|
||||
FROM workspace_builds wb
|
||||
JOIN workspaces w ON wb.workspace_id = w.id
|
||||
JOIN templates t ON w.template_id = t.id
|
||||
JOIN organizations o ON t.organization_id = o.id
|
||||
JOIN workspace_resources wr ON wr.job_id = wb.job_id
|
||||
JOIN workspace_agents wa ON wa.resource_id = wr.id
|
||||
WHERE wb.job_id = (SELECT job_id FROM workspace_resources WHERE workspace_resources.id = $1)
|
||||
GROUP BY wb.created_at, wb.transition, t.name, o.name, w.owner_id;
|
||||
|
||||
+120
-323
@@ -8,7 +8,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/jobreaper"
|
||||
@@ -113,87 +113,33 @@ func TestDetectorHungWorkspaceBuild(t *testing.T) {
|
||||
)
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
twentyMinAgo = now.Add(-time.Minute * 20)
|
||||
tenMinAgo = now.Add(-time.Minute * 10)
|
||||
sixMinAgo = now.Add(-time.Minute * 6)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
file = dbgen.File(t, db, database.File{})
|
||||
template = dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
templateVersion = dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: uuid.NullUUID{
|
||||
UUID: template.ID,
|
||||
Valid: true,
|
||||
},
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace = dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: template.ID,
|
||||
})
|
||||
|
||||
// Previous build.
|
||||
now = time.Now()
|
||||
twentyMinAgo = now.Add(-time.Minute * 20)
|
||||
tenMinAgo = now.Add(-time.Minute * 10)
|
||||
sixMinAgo = now.Add(-time.Minute * 6)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
expectedWorkspaceBuildState = []byte(`{"dean":"cool","colin":"also cool"}`)
|
||||
previousWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
|
||||
CreatedAt: twentyMinAgo,
|
||||
UpdatedAt: twentyMinAgo,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: twentyMinAgo,
|
||||
Valid: true,
|
||||
},
|
||||
CompletedAt: sql.NullTime{
|
||||
Time: twentyMinAgo,
|
||||
Valid: true,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
InitiatorID: user.ID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
StorageMethod: database.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
Input: []byte("{}"),
|
||||
})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: templateVersion.ID,
|
||||
BuildNumber: 1,
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
JobID: previousWorkspaceBuildJob.ID,
|
||||
})
|
||||
|
||||
// Current build.
|
||||
currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
|
||||
CreatedAt: tenMinAgo,
|
||||
UpdatedAt: sixMinAgo,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: tenMinAgo,
|
||||
Valid: true,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
InitiatorID: user.ID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
StorageMethod: database.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
Input: []byte("{}"),
|
||||
})
|
||||
currentWorkspaceBuild = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: templateVersion.ID,
|
||||
BuildNumber: 2,
|
||||
JobID: currentWorkspaceBuildJob.ID,
|
||||
// No provisioner state.
|
||||
})
|
||||
)
|
||||
|
||||
t.Log("previous job ID: ", previousWorkspaceBuildJob.ID)
|
||||
t.Log("current job ID: ", currentWorkspaceBuildJob.ID)
|
||||
// Previous build (completed successfully).
|
||||
previousBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Pubsub(pubsub).Seed(database.WorkspaceBuild{
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
}).Succeeded(dbfake.WithJobCompletedAt(twentyMinAgo)).
|
||||
Do()
|
||||
|
||||
// Current build (hung - running job with UpdatedAt > 5 min ago).
|
||||
currentBuild := dbfake.WorkspaceBuild(t, db, previousBuild.Workspace).
|
||||
Pubsub(pubsub).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 2}).
|
||||
Starting(dbfake.WithJobStartedAt(tenMinAgo), dbfake.WithJobUpdatedAt(sixMinAgo)).
|
||||
Do()
|
||||
|
||||
t.Log("previous job ID: ", previousBuild.Build.JobID)
|
||||
t.Log("current job ID: ", currentBuild.Build.JobID)
|
||||
|
||||
detector := jobreaper.New(ctx, wrapDBAuthz(db, log), pubsub, log, tickCh).WithStatsChannel(statsCh)
|
||||
detector.Start()
|
||||
@@ -202,10 +148,10 @@ func TestDetectorHungWorkspaceBuild(t *testing.T) {
|
||||
stats := <-statsCh
|
||||
require.NoError(t, stats.Error)
|
||||
require.Len(t, stats.TerminatedJobIDs, 1)
|
||||
require.Equal(t, currentWorkspaceBuildJob.ID, stats.TerminatedJobIDs[0])
|
||||
require.Equal(t, currentBuild.Build.JobID, stats.TerminatedJobIDs[0])
|
||||
|
||||
// Check that the current provisioner job was updated.
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentWorkspaceBuildJob.ID)
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentBuild.Build.JobID)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, now, job.UpdatedAt, 30*time.Second)
|
||||
require.True(t, job.CompletedAt.Valid)
|
||||
@@ -215,7 +161,7 @@ func TestDetectorHungWorkspaceBuild(t *testing.T) {
|
||||
require.False(t, job.ErrorCode.Valid)
|
||||
|
||||
// Check that the provisioner state was copied.
|
||||
build, err := db.GetWorkspaceBuildByID(ctx, currentWorkspaceBuild.ID)
|
||||
build, err := db.GetWorkspaceBuildByID(ctx, currentBuild.Build.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWorkspaceBuildState, build.ProvisionerState)
|
||||
|
||||
@@ -235,88 +181,37 @@ func TestDetectorHungWorkspaceBuildNoOverrideState(t *testing.T) {
|
||||
)
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
twentyMinAgo = now.Add(-time.Minute * 20)
|
||||
tenMinAgo = now.Add(-time.Minute * 10)
|
||||
sixMinAgo = now.Add(-time.Minute * 6)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
file = dbgen.File(t, db, database.File{})
|
||||
template = dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
templateVersion = dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: uuid.NullUUID{
|
||||
UUID: template.ID,
|
||||
Valid: true,
|
||||
},
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace = dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: template.ID,
|
||||
})
|
||||
|
||||
// Previous build.
|
||||
previousWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
|
||||
CreatedAt: twentyMinAgo,
|
||||
UpdatedAt: twentyMinAgo,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: twentyMinAgo,
|
||||
Valid: true,
|
||||
},
|
||||
CompletedAt: sql.NullTime{
|
||||
Time: twentyMinAgo,
|
||||
Valid: true,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
InitiatorID: user.ID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
StorageMethod: database.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
Input: []byte("{}"),
|
||||
})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: templateVersion.ID,
|
||||
BuildNumber: 1,
|
||||
ProvisionerState: []byte(`{"dean":"NOT cool","colin":"also NOT cool"}`),
|
||||
JobID: previousWorkspaceBuildJob.ID,
|
||||
})
|
||||
|
||||
// Current build.
|
||||
now = time.Now()
|
||||
twentyMinAgo = now.Add(-time.Minute * 20)
|
||||
tenMinAgo = now.Add(-time.Minute * 10)
|
||||
sixMinAgo = now.Add(-time.Minute * 6)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
expectedWorkspaceBuildState = []byte(`{"dean":"cool","colin":"also cool"}`)
|
||||
currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
|
||||
CreatedAt: tenMinAgo,
|
||||
UpdatedAt: sixMinAgo,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: tenMinAgo,
|
||||
Valid: true,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
InitiatorID: user.ID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
StorageMethod: database.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
Input: []byte("{}"),
|
||||
})
|
||||
currentWorkspaceBuild = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: templateVersion.ID,
|
||||
BuildNumber: 2,
|
||||
JobID: currentWorkspaceBuildJob.ID,
|
||||
// Should not be overridden.
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
})
|
||||
)
|
||||
|
||||
t.Log("previous job ID: ", previousWorkspaceBuildJob.ID)
|
||||
t.Log("current job ID: ", currentWorkspaceBuildJob.ID)
|
||||
// Previous build (completed successfully).
|
||||
previousBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Pubsub(pubsub).Seed(database.WorkspaceBuild{
|
||||
ProvisionerState: []byte(`{"dean":"NOT cool","colin":"also NOT cool"}`),
|
||||
}).Succeeded(dbfake.WithJobCompletedAt(twentyMinAgo)).
|
||||
Do()
|
||||
|
||||
// Current build (hung - running job with UpdatedAt > 5 min ago).
|
||||
// This build already has provisioner state, which should NOT be overridden.
|
||||
currentBuild := dbfake.WorkspaceBuild(t, db, previousBuild.Workspace).
|
||||
Pubsub(pubsub).
|
||||
Seed(database.WorkspaceBuild{
|
||||
BuildNumber: 2,
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
}).
|
||||
Starting(dbfake.WithJobStartedAt(tenMinAgo), dbfake.WithJobUpdatedAt(sixMinAgo)).
|
||||
Do()
|
||||
|
||||
t.Log("previous job ID: ", previousBuild.Build.JobID)
|
||||
t.Log("current job ID: ", currentBuild.Build.JobID)
|
||||
|
||||
detector := jobreaper.New(ctx, wrapDBAuthz(db, log), pubsub, log, tickCh).WithStatsChannel(statsCh)
|
||||
detector.Start()
|
||||
@@ -325,10 +220,10 @@ func TestDetectorHungWorkspaceBuildNoOverrideState(t *testing.T) {
|
||||
stats := <-statsCh
|
||||
require.NoError(t, stats.Error)
|
||||
require.Len(t, stats.TerminatedJobIDs, 1)
|
||||
require.Equal(t, currentWorkspaceBuildJob.ID, stats.TerminatedJobIDs[0])
|
||||
require.Equal(t, currentBuild.Build.JobID, stats.TerminatedJobIDs[0])
|
||||
|
||||
// Check that the current provisioner job was updated.
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentWorkspaceBuildJob.ID)
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentBuild.Build.JobID)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, now, job.UpdatedAt, 30*time.Second)
|
||||
require.True(t, job.CompletedAt.Valid)
|
||||
@@ -338,7 +233,7 @@ func TestDetectorHungWorkspaceBuildNoOverrideState(t *testing.T) {
|
||||
require.False(t, job.ErrorCode.Valid)
|
||||
|
||||
// Check that the provisioner state was NOT copied.
|
||||
build, err := db.GetWorkspaceBuildByID(ctx, currentWorkspaceBuild.ID)
|
||||
build, err := db.GetWorkspaceBuildByID(ctx, currentBuild.Build.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWorkspaceBuildState, build.ProvisionerState)
|
||||
|
||||
@@ -358,58 +253,25 @@ func TestDetectorHungWorkspaceBuildNoOverrideStateIfNoExistingBuild(t *testing.T
|
||||
)
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
tenMinAgo = now.Add(-time.Minute * 10)
|
||||
sixMinAgo = now.Add(-time.Minute * 6)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
file = dbgen.File(t, db, database.File{})
|
||||
template = dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
templateVersion = dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: uuid.NullUUID{
|
||||
UUID: template.ID,
|
||||
Valid: true,
|
||||
},
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace = dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: template.ID,
|
||||
})
|
||||
|
||||
// First build.
|
||||
now = time.Now()
|
||||
tenMinAgo = now.Add(-time.Minute * 10)
|
||||
sixMinAgo = now.Add(-time.Minute * 6)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
expectedWorkspaceBuildState = []byte(`{"dean":"cool","colin":"also cool"}`)
|
||||
currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
|
||||
CreatedAt: tenMinAgo,
|
||||
UpdatedAt: sixMinAgo,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: tenMinAgo,
|
||||
Valid: true,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
InitiatorID: user.ID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
StorageMethod: database.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
Input: []byte("{}"),
|
||||
})
|
||||
currentWorkspaceBuild = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: templateVersion.ID,
|
||||
BuildNumber: 1,
|
||||
JobID: currentWorkspaceBuildJob.ID,
|
||||
// Should not be overridden.
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
})
|
||||
)
|
||||
|
||||
t.Log("current job ID: ", currentWorkspaceBuildJob.ID)
|
||||
// First build (hung - no previous build exists).
|
||||
// This build has provisioner state, which should NOT be overridden.
|
||||
currentBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Pubsub(pubsub).Seed(database.WorkspaceBuild{
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
}).Starting(dbfake.WithJobStartedAt(tenMinAgo), dbfake.WithJobUpdatedAt(sixMinAgo)).
|
||||
Do()
|
||||
|
||||
t.Log("current job ID: ", currentBuild.Build.JobID)
|
||||
|
||||
detector := jobreaper.New(ctx, wrapDBAuthz(db, log), pubsub, log, tickCh).WithStatsChannel(statsCh)
|
||||
detector.Start()
|
||||
@@ -418,10 +280,10 @@ func TestDetectorHungWorkspaceBuildNoOverrideStateIfNoExistingBuild(t *testing.T
|
||||
stats := <-statsCh
|
||||
require.NoError(t, stats.Error)
|
||||
require.Len(t, stats.TerminatedJobIDs, 1)
|
||||
require.Equal(t, currentWorkspaceBuildJob.ID, stats.TerminatedJobIDs[0])
|
||||
require.Equal(t, currentBuild.Build.JobID, stats.TerminatedJobIDs[0])
|
||||
|
||||
// Check that the current provisioner job was updated.
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentWorkspaceBuildJob.ID)
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentBuild.Build.JobID)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, now, job.UpdatedAt, 30*time.Second)
|
||||
require.True(t, job.CompletedAt.Valid)
|
||||
@@ -431,7 +293,7 @@ func TestDetectorHungWorkspaceBuildNoOverrideStateIfNoExistingBuild(t *testing.T
|
||||
require.False(t, job.ErrorCode.Valid)
|
||||
|
||||
// Check that the provisioner state was NOT updated.
|
||||
build, err := db.GetWorkspaceBuildByID(ctx, currentWorkspaceBuild.ID)
|
||||
build, err := db.GetWorkspaceBuildByID(ctx, currentBuild.Build.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWorkspaceBuildState, build.ProvisionerState)
|
||||
|
||||
@@ -451,57 +313,24 @@ func TestDetectorPendingWorkspaceBuildNoOverrideStateIfNoExistingBuild(t *testin
|
||||
)
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
thirtyFiveMinAgo = now.Add(-time.Minute * 35)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
file = dbgen.File(t, db, database.File{})
|
||||
template = dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
templateVersion = dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: uuid.NullUUID{
|
||||
UUID: template.ID,
|
||||
Valid: true,
|
||||
},
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace = dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: template.ID,
|
||||
})
|
||||
|
||||
// First build.
|
||||
now = time.Now()
|
||||
thirtyFiveMinAgo = now.Add(-time.Minute * 35)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
expectedWorkspaceBuildState = []byte(`{"dean":"cool","colin":"also cool"}`)
|
||||
currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
|
||||
CreatedAt: thirtyFiveMinAgo,
|
||||
UpdatedAt: thirtyFiveMinAgo,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: time.Time{},
|
||||
Valid: false,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
InitiatorID: user.ID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
StorageMethod: database.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
Input: []byte("{}"),
|
||||
})
|
||||
currentWorkspaceBuild = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: templateVersion.ID,
|
||||
BuildNumber: 1,
|
||||
JobID: currentWorkspaceBuildJob.ID,
|
||||
// Should not be overridden.
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
})
|
||||
)
|
||||
|
||||
t.Log("current job ID: ", currentWorkspaceBuildJob.ID)
|
||||
// First build (hung pending - no previous build exists).
|
||||
// This build has provisioner state, which should NOT be overridden.
|
||||
currentBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Pubsub(pubsub).Seed(database.WorkspaceBuild{
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
}).Pending(dbfake.WithJobCreatedAt(thirtyFiveMinAgo), dbfake.WithJobUpdatedAt(thirtyFiveMinAgo)).
|
||||
Do()
|
||||
|
||||
t.Log("current job ID: ", currentBuild.Build.JobID)
|
||||
|
||||
detector := jobreaper.New(ctx, wrapDBAuthz(db, log), pubsub, log, tickCh).WithStatsChannel(statsCh)
|
||||
detector.Start()
|
||||
@@ -510,10 +339,10 @@ func TestDetectorPendingWorkspaceBuildNoOverrideStateIfNoExistingBuild(t *testin
|
||||
stats := <-statsCh
|
||||
require.NoError(t, stats.Error)
|
||||
require.Len(t, stats.TerminatedJobIDs, 1)
|
||||
require.Equal(t, currentWorkspaceBuildJob.ID, stats.TerminatedJobIDs[0])
|
||||
require.Equal(t, currentBuild.Build.JobID, stats.TerminatedJobIDs[0])
|
||||
|
||||
// Check that the current provisioner job was updated.
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentWorkspaceBuildJob.ID)
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentBuild.Build.JobID)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, now, job.UpdatedAt, 30*time.Second)
|
||||
require.True(t, job.CompletedAt.Valid)
|
||||
@@ -525,7 +354,7 @@ func TestDetectorPendingWorkspaceBuildNoOverrideStateIfNoExistingBuild(t *testin
|
||||
require.False(t, job.ErrorCode.Valid)
|
||||
|
||||
// Check that the provisioner state was NOT updated.
|
||||
build, err := db.GetWorkspaceBuildByID(ctx, currentWorkspaceBuild.ID)
|
||||
build, err := db.GetWorkspaceBuildByID(ctx, currentBuild.Build.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWorkspaceBuildState, build.ProvisionerState)
|
||||
|
||||
@@ -551,66 +380,34 @@ func TestDetectorWorkspaceBuildForDormantWorkspace(t *testing.T) {
|
||||
)
|
||||
|
||||
var (
|
||||
now = time.Now()
|
||||
tenMinAgo = now.Add(-time.Minute * 10)
|
||||
sixMinAgo = now.Add(-time.Minute * 6)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
file = dbgen.File(t, db, database.File{})
|
||||
template = dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
templateVersion = dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: uuid.NullUUID{
|
||||
UUID: template.ID,
|
||||
Valid: true,
|
||||
},
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace = dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: template.ID,
|
||||
DormantAt: sql.NullTime{
|
||||
Time: now.Add(-time.Hour),
|
||||
Valid: true,
|
||||
},
|
||||
})
|
||||
|
||||
// First build.
|
||||
now = time.Now()
|
||||
tenMinAgo = now.Add(-time.Minute * 10)
|
||||
sixMinAgo = now.Add(-time.Minute * 6)
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
expectedWorkspaceBuildState = []byte(`{"dean":"cool","colin":"also cool"}`)
|
||||
currentWorkspaceBuildJob = dbgen.ProvisionerJob(t, db, pubsub, database.ProvisionerJob{
|
||||
CreatedAt: tenMinAgo,
|
||||
UpdatedAt: sixMinAgo,
|
||||
StartedAt: sql.NullTime{
|
||||
Time: tenMinAgo,
|
||||
Valid: true,
|
||||
},
|
||||
OrganizationID: org.ID,
|
||||
InitiatorID: user.ID,
|
||||
Provisioner: database.ProvisionerTypeEcho,
|
||||
StorageMethod: database.ProvisionerStorageMethodFile,
|
||||
FileID: file.ID,
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
Input: []byte("{}"),
|
||||
})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: templateVersion.ID,
|
||||
BuildNumber: 1,
|
||||
JobID: currentWorkspaceBuildJob.ID,
|
||||
// Should not be overridden.
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
})
|
||||
)
|
||||
|
||||
t.Log("current job ID: ", currentWorkspaceBuildJob.ID)
|
||||
// First build (hung - running job with UpdatedAt > 5 min ago).
|
||||
// This build has provisioner state, which should NOT be overridden.
|
||||
// The workspace is dormant from the start.
|
||||
currentBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.ID,
|
||||
OwnerID: user.ID,
|
||||
DormantAt: sql.NullTime{
|
||||
Time: now.Add(-time.Hour),
|
||||
Valid: true,
|
||||
},
|
||||
}).Pubsub(pubsub).Seed(database.WorkspaceBuild{
|
||||
ProvisionerState: expectedWorkspaceBuildState,
|
||||
}).Starting(dbfake.WithJobStartedAt(tenMinAgo), dbfake.WithJobUpdatedAt(sixMinAgo)).
|
||||
Do()
|
||||
|
||||
t.Log("current job ID: ", currentBuild.Build.JobID)
|
||||
|
||||
// Ensure the RBAC is the dormant type to ensure we're testing the right
|
||||
// thing.
|
||||
require.Equal(t, rbac.ResourceWorkspaceDormant.Type, workspace.RBACObject().Type)
|
||||
require.Equal(t, rbac.ResourceWorkspaceDormant.Type, currentBuild.Workspace.RBACObject().Type)
|
||||
|
||||
detector := jobreaper.New(ctx, wrapDBAuthz(db, log), pubsub, log, tickCh).WithStatsChannel(statsCh)
|
||||
detector.Start()
|
||||
@@ -619,10 +416,10 @@ func TestDetectorWorkspaceBuildForDormantWorkspace(t *testing.T) {
|
||||
stats := <-statsCh
|
||||
require.NoError(t, stats.Error)
|
||||
require.Len(t, stats.TerminatedJobIDs, 1)
|
||||
require.Equal(t, currentWorkspaceBuildJob.ID, stats.TerminatedJobIDs[0])
|
||||
require.Equal(t, currentBuild.Build.JobID, stats.TerminatedJobIDs[0])
|
||||
|
||||
// Check that the current provisioner job was updated.
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentWorkspaceBuildJob.ID)
|
||||
job, err := db.GetProvisionerJobByID(ctx, currentBuild.Build.JobID)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, now, job.UpdatedAt, 30*time.Second)
|
||||
require.True(t, job.CompletedAt.Valid)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
@@ -92,8 +93,11 @@ func TestReportFailedWorkspaceBuilds(t *testing.T) {
|
||||
// Workspaces
|
||||
w1 := dbgen.Workspace(t, db, database.WorkspaceTable{TemplateID: t1.ID, OwnerID: user1.ID, OrganizationID: org.ID})
|
||||
|
||||
w1wb1pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: now.Add(-6 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: 1, TemplateVersionID: t1v1.ID, JobID: w1wb1pj.ID, CreatedAt: now.Add(-2 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 1, TemplateVersionID: t1v1.ID, CreatedAt: now.Add(-2 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(now.Add(-6*dayDuration))).
|
||||
Do()
|
||||
|
||||
// When: first run
|
||||
notifEnq.Clear()
|
||||
@@ -178,27 +182,54 @@ func TestReportFailedWorkspaceBuilds(t *testing.T) {
|
||||
now := clk.Now()
|
||||
|
||||
// Workspace builds
|
||||
w1wb1pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: now.Add(-6 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: 1, TemplateVersionID: t1v1.ID, JobID: w1wb1pj.ID, CreatedAt: now.Add(-6 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
w1wb2pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, CompletedAt: sql.NullTime{Time: now.Add(-5 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: 2, TemplateVersionID: t1v2.ID, JobID: w1wb2pj.ID, CreatedAt: now.Add(-5 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
w1wb3pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: now.Add(-4 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: 3, TemplateVersionID: t1v2.ID, JobID: w1wb3pj.ID, CreatedAt: now.Add(-4 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 1, TemplateVersionID: t1v1.ID, CreatedAt: now.Add(-6 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(now.Add(-6*dayDuration))).
|
||||
Do()
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 2, TemplateVersionID: t1v2.ID, CreatedAt: now.Add(-5 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Succeeded(dbfake.WithJobCompletedAt(now.Add(-5 * dayDuration))).
|
||||
Do()
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 3, TemplateVersionID: t1v2.ID, CreatedAt: now.Add(-4 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(now.Add(-4*dayDuration))).
|
||||
Do()
|
||||
|
||||
w2wb1pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, CompletedAt: sql.NullTime{Time: now.Add(-5 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w2.ID, BuildNumber: 4, TemplateVersionID: t2v1.ID, JobID: w2wb1pj.ID, CreatedAt: now.Add(-5 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
w2wb2pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: now.Add(-4 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w2.ID, BuildNumber: 5, TemplateVersionID: t2v2.ID, JobID: w2wb2pj.ID, CreatedAt: now.Add(-4 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
w2wb3pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: now.Add(-3 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w2.ID, BuildNumber: 6, TemplateVersionID: t2v2.ID, JobID: w2wb3pj.ID, CreatedAt: now.Add(-3 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
_ = dbfake.WorkspaceBuild(t, db, w2).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 4, TemplateVersionID: t2v1.ID, CreatedAt: now.Add(-5 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Succeeded(dbfake.WithJobCompletedAt(now.Add(-5 * dayDuration))).
|
||||
Do()
|
||||
_ = dbfake.WorkspaceBuild(t, db, w2).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 5, TemplateVersionID: t2v2.ID, CreatedAt: now.Add(-4 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(now.Add(-4*dayDuration))).
|
||||
Do()
|
||||
_ = dbfake.WorkspaceBuild(t, db, w2).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 6, TemplateVersionID: t2v2.ID, CreatedAt: now.Add(-3 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(now.Add(-3*dayDuration))).
|
||||
Do()
|
||||
|
||||
w3wb1pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: now.Add(-3 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w3.ID, BuildNumber: 7, TemplateVersionID: t1v1.ID, JobID: w3wb1pj.ID, CreatedAt: now.Add(-3 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
_ = dbfake.WorkspaceBuild(t, db, w3).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 7, TemplateVersionID: t1v1.ID, CreatedAt: now.Add(-3 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(now.Add(-3*dayDuration))).
|
||||
Do()
|
||||
|
||||
w4wb1pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: now.Add(-6 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w4.ID, BuildNumber: 8, TemplateVersionID: t2v1.ID, JobID: w4wb1pj.ID, CreatedAt: now.Add(-6 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
w4wb2pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, CompletedAt: sql.NullTime{Time: now.Add(-dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w4.ID, BuildNumber: 9, TemplateVersionID: t2v2.ID, JobID: w4wb2pj.ID, CreatedAt: now.Add(-dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
_ = dbfake.WorkspaceBuild(t, db, w4).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 8, TemplateVersionID: t2v1.ID, CreatedAt: now.Add(-6 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(now.Add(-6*dayDuration))).
|
||||
Do()
|
||||
_ = dbfake.WorkspaceBuild(t, db, w4).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 9, TemplateVersionID: t2v2.ID, CreatedAt: now.Add(-dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Succeeded(dbfake.WithJobCompletedAt(now.Add(-dayDuration))).
|
||||
Do()
|
||||
|
||||
// When
|
||||
notifEnq.Clear()
|
||||
@@ -275,8 +306,11 @@ func TestReportFailedWorkspaceBuilds(t *testing.T) {
|
||||
clk.Advance(6 * dayDuration).MustWait(context.Background())
|
||||
now = clk.Now()
|
||||
|
||||
w1wb4pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: now.Add(-dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: 77, TemplateVersionID: t1v2.ID, JobID: w1wb4pj.ID, CreatedAt: now.Add(-dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 77, TemplateVersionID: t1v2.ID, CreatedAt: now.Add(-dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(now.Add(-dayDuration))).
|
||||
Do()
|
||||
|
||||
// When
|
||||
notifEnq.Clear()
|
||||
@@ -380,17 +414,26 @@ func TestReportFailedWorkspaceBuilds(t *testing.T) {
|
||||
now := clk.Now()
|
||||
|
||||
// Workspace builds
|
||||
pj0 := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, CompletedAt: sql.NullTime{Time: now.Add(-24 * time.Hour), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: 777, TemplateVersionID: t1v1.ID, JobID: pj0.ID, CreatedAt: now.Add(-24 * time.Hour), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 777, TemplateVersionID: t1v1.ID, CreatedAt: now.Add(-24 * time.Hour), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Succeeded(dbfake.WithJobCompletedAt(now.Add(-24 * time.Hour))).
|
||||
Do()
|
||||
|
||||
for i := 1; i <= 23; i++ {
|
||||
at := now.Add(-time.Duration(i) * time.Hour)
|
||||
|
||||
pj1 := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: at, Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: int32(i), TemplateVersionID: t1v1.ID, JobID: pj1.ID, CreatedAt: at, Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}) // nolint:gosec
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: int32(i), TemplateVersionID: t1v1.ID, CreatedAt: at, Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}). // nolint:gosec
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(at)).
|
||||
Do()
|
||||
|
||||
pj2 := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, Error: jobError, ErrorCode: jobErrorCode, CompletedAt: sql.NullTime{Time: at, Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: int32(i) + 100, TemplateVersionID: t1v2.ID, JobID: pj2.ID, CreatedAt: at, Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}) // nolint:gosec
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: int32(i) + 100, TemplateVersionID: t1v2.ID, CreatedAt: at, Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}). // nolint:gosec
|
||||
Failed(dbfake.WithJobError(jobError.String), dbfake.WithJobErrorCode(jobErrorCode.String), dbfake.WithJobCompletedAt(at)).
|
||||
Do()
|
||||
}
|
||||
|
||||
// When
|
||||
@@ -486,10 +529,16 @@ func TestReportFailedWorkspaceBuilds(t *testing.T) {
|
||||
now := clk.Now()
|
||||
|
||||
// Workspace builds
|
||||
w1wb1pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, CompletedAt: sql.NullTime{Time: now.Add(-6 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: 1, TemplateVersionID: t1v1.ID, JobID: w1wb1pj.ID, CreatedAt: now.Add(-2 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
w1wb2pj := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{OrganizationID: org.ID, CompletedAt: sql.NullTime{Time: now.Add(-5 * dayDuration), Valid: true}})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{WorkspaceID: w1.ID, BuildNumber: 2, TemplateVersionID: t1v1.ID, JobID: w1wb2pj.ID, CreatedAt: now.Add(-1 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator})
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 1, TemplateVersionID: t1v1.ID, CreatedAt: now.Add(-2 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Succeeded(dbfake.WithJobCompletedAt(now.Add(-6 * dayDuration))).
|
||||
Do()
|
||||
_ = dbfake.WorkspaceBuild(t, db, w1).
|
||||
Pubsub(ps).
|
||||
Seed(database.WorkspaceBuild{BuildNumber: 2, TemplateVersionID: t1v1.ID, CreatedAt: now.Add(-1 * dayDuration), Transition: database.WorkspaceTransitionStart, Reason: database.BuildReasonInitiator}).
|
||||
Succeeded(dbfake.WithJobCompletedAt(now.Add(-5 * dayDuration))).
|
||||
Do()
|
||||
|
||||
// When
|
||||
notifEnq.Clear()
|
||||
|
||||
@@ -50,7 +50,7 @@ func ListApps(db database.Store, accessURL *url.URL) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
var sdkApps []codersdk.OAuth2ProviderApp
|
||||
sdkApps := make([]codersdk.OAuth2ProviderApp, 0, len(userApps))
|
||||
for _, app := range userApps {
|
||||
sdkApps = append(sdkApps, db2sdk.OAuth2ProviderApp(accessURL, app.OAuth2ProviderApp))
|
||||
}
|
||||
|
||||
@@ -512,13 +512,15 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
|
||||
|
||||
// Fetch the file id of the cached module files if it exists.
|
||||
versionModulesFile := ""
|
||||
tfvals, err := s.Database.GetTemplateVersionTerraformValues(ctx, templateVersion.ID)
|
||||
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
|
||||
// Older templates (before dynamic parameters) will not have cached module files.
|
||||
return nil, failJob(fmt.Sprintf("get template version terraform values: %s", err))
|
||||
}
|
||||
if err == nil && tfvals.CachedModuleFiles.Valid {
|
||||
versionModulesFile = tfvals.CachedModuleFiles.UUID.String()
|
||||
if !template.DisableModuleCache {
|
||||
tfvals, err := s.Database.GetTemplateVersionTerraformValues(ctx, templateVersion.ID)
|
||||
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
|
||||
// Older templates (before dynamic parameters) will not have cached module files.
|
||||
return nil, failJob(fmt.Sprintf("get template version terraform values: %s", err))
|
||||
}
|
||||
if err == nil && tfvals.CachedModuleFiles.Valid {
|
||||
versionModulesFile = tfvals.CachedModuleFiles.UUID.String()
|
||||
}
|
||||
}
|
||||
|
||||
var ownerSSHPublicKey, ownerSSHPrivateKey string
|
||||
|
||||
@@ -876,17 +876,20 @@ func (r *remoteReporter) collectBoundaryUsageSummary(ctx context.Context) (*Boun
|
||||
return nil, xerrors.Errorf("insert boundary usage telemetry lock (period_ending_at=%q): %w", periodEndingAt, err)
|
||||
}
|
||||
|
||||
summary, err := r.options.Database.GetBoundaryUsageSummary(boundaryCtx, maxStaleness.Milliseconds())
|
||||
var summary database.GetAndResetBoundaryUsageSummaryRow
|
||||
err = r.options.Database.InTx(func(tx database.Store) error {
|
||||
// The advisory lock use here ensures a clean transition to the next snapshot by
|
||||
// preventing replicas from upserting row(s) at the same time as we aggregate and
|
||||
// delete all rows here.
|
||||
var txErr error
|
||||
if txErr = tx.AcquireLock(boundaryCtx, database.LockIDBoundaryUsageStats); txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
summary, txErr = tx.GetAndResetBoundaryUsageSummary(boundaryCtx, maxStaleness.Milliseconds())
|
||||
return txErr
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get boundary usage summary: %w", err)
|
||||
}
|
||||
|
||||
// Reset stats after capturing the summary. This deletes all rows so each
|
||||
// replica will detect a new period on their next flush. Note: there is a
|
||||
// known race condition here that may result in a small telemetry inaccuracy
|
||||
// with multiple replicas (https://github.com/coder/coder/issues/21770).
|
||||
if err := r.options.Database.ResetBoundaryUsageStats(boundaryCtx); err != nil {
|
||||
return nil, xerrors.Errorf("reset boundary usage stats: %w", err)
|
||||
return nil, xerrors.Errorf("get and reset boundary usage summary: %w", err)
|
||||
}
|
||||
|
||||
return &BoundaryUsageSummary{
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/coderd/boundaryusage"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
@@ -942,13 +941,6 @@ func TestTelemetry_BoundaryUsageSummary(t *testing.T) {
|
||||
err = tracker2.FlushToDB(ctx, db, replica2ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify both replicas' data is in the database.
|
||||
boundaryCtx := dbauthz.AsBoundaryUsageTracker(ctx)
|
||||
const maxStalenessMs = 60000
|
||||
summary, err := db.GetBoundaryUsageSummary(boundaryCtx, maxStalenessMs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10+20), summary.AllowedRequests)
|
||||
|
||||
clock := quartz.NewMock(t)
|
||||
clock.Set(dbtime.Now())
|
||||
|
||||
|
||||
@@ -775,6 +775,10 @@ func (api *API) patchTemplateMeta(rw http.ResponseWriter, r *http.Request) {
|
||||
if req.UseClassicParameterFlow != nil {
|
||||
classicTemplateFlow = *req.UseClassicParameterFlow
|
||||
}
|
||||
disableModuleCache := template.DisableModuleCache
|
||||
if req.DisableModuleCache != nil {
|
||||
disableModuleCache = *req.DisableModuleCache
|
||||
}
|
||||
|
||||
displayName := ptr.NilToDefault(req.DisplayName, template.DisplayName)
|
||||
description := ptr.NilToDefault(req.Description, template.Description)
|
||||
@@ -800,6 +804,7 @@ func (api *API) patchTemplateMeta(rw http.ResponseWriter, r *http.Request) {
|
||||
req.RequireActiveVersion == template.RequireActiveVersion &&
|
||||
(deprecationMessage == template.Deprecated) &&
|
||||
(classicTemplateFlow == template.UseClassicParameterFlow) &&
|
||||
(disableModuleCache == template.DisableModuleCache) &&
|
||||
maxPortShareLevel == template.MaxPortSharingLevel &&
|
||||
corsBehavior == template.CorsBehavior {
|
||||
return nil
|
||||
@@ -844,6 +849,7 @@ func (api *API) patchTemplateMeta(rw http.ResponseWriter, r *http.Request) {
|
||||
MaxPortSharingLevel: maxPortShareLevel,
|
||||
UseClassicParameterFlow: classicTemplateFlow,
|
||||
CorsBehavior: corsBehavior,
|
||||
DisableModuleCache: disableModuleCache,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("update template metadata: %w", err)
|
||||
@@ -1128,6 +1134,7 @@ func (api *API) convertTemplate(
|
||||
MaxPortShareLevel: maxPortShareLevel,
|
||||
UseClassicParameterFlow: template.UseClassicParameterFlow,
|
||||
CORSBehavior: codersdk.CORSBehavior(template.CorsBehavior),
|
||||
DisableModuleCache: template.DisableModuleCache,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1616,6 +1616,39 @@ func TestPatchTemplateMeta(t *testing.T) {
|
||||
assert.False(t, updated.UseClassicParameterFlow, "expected false")
|
||||
})
|
||||
|
||||
t.Run("DisableModuleCache", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
require.False(t, template.DisableModuleCache, "default is false")
|
||||
|
||||
req := codersdk.UpdateTemplateMeta{
|
||||
DisableModuleCache: ptr.Ref(true),
|
||||
}
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// set to true
|
||||
updated, err := client.UpdateTemplateMeta(ctx, template.ID, req)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, updated.DisableModuleCache, "expected true")
|
||||
|
||||
// noop - should stay true when not specified
|
||||
req.DisableModuleCache = nil
|
||||
updated, err = client.UpdateTemplateMeta(ctx, template.ID, req)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, updated.DisableModuleCache, "expected true")
|
||||
|
||||
// back to false
|
||||
req.DisableModuleCache = ptr.Ref(false)
|
||||
updated, err = client.UpdateTemplateMeta(ctx, template.ID, req)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, updated.DisableModuleCache, "expected false")
|
||||
})
|
||||
|
||||
t.Run("SupportEmptyOrDefaultFields", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -158,6 +158,7 @@ func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) {
|
||||
DerpMapUpdateFrequency: api.Options.DERPMapUpdateFrequency,
|
||||
ExternalAuthConfigs: api.ExternalAuthConfigs,
|
||||
Experiments: api.Experiments,
|
||||
LifecycleMetrics: api.lifecycleMetrics,
|
||||
|
||||
// Optional:
|
||||
UpdateAgentMetricsFn: api.UpdateAgentMetrics,
|
||||
|
||||
@@ -384,7 +384,7 @@ func (api *API) postWorkspaceBuildsInternal(
|
||||
Experiments(api.Experiments).
|
||||
TemplateVersionPresetID(createBuild.TemplateVersionPresetID)
|
||||
|
||||
if transition == database.WorkspaceTransitionStart && createBuild.Reason != "" {
|
||||
if (transition == database.WorkspaceTransitionStart || transition == database.WorkspaceTransitionStop) && createBuild.Reason != "" {
|
||||
builder = builder.Reason(database.BuildReason(createBuild.Reason))
|
||||
}
|
||||
|
||||
|
||||
@@ -425,11 +425,20 @@ func DevcontainerFromProto(pdc *proto.WorkspaceAgentDevcontainer) (codersdk.Work
|
||||
if err != nil {
|
||||
return codersdk.WorkspaceAgentDevcontainer{}, xerrors.Errorf("parse id: %w", err)
|
||||
}
|
||||
var subagentID uuid.NullUUID
|
||||
if pdc.SubagentId != nil {
|
||||
subagentID.Valid = true
|
||||
subagentID.UUID, err = uuid.FromBytes(pdc.SubagentId)
|
||||
if err != nil {
|
||||
return codersdk.WorkspaceAgentDevcontainer{}, xerrors.Errorf("parse subagent id: %w", err)
|
||||
}
|
||||
}
|
||||
return codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: id,
|
||||
Name: pdc.Name,
|
||||
WorkspaceFolder: pdc.WorkspaceFolder,
|
||||
ConfigPath: pdc.ConfigPath,
|
||||
SubagentID: subagentID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -442,10 +451,16 @@ func ProtoFromDevcontainers(dcs []codersdk.WorkspaceAgentDevcontainer) []*proto.
|
||||
}
|
||||
|
||||
func ProtoFromDevcontainer(dc codersdk.WorkspaceAgentDevcontainer) *proto.WorkspaceAgentDevcontainer {
|
||||
var subagentID []byte
|
||||
if dc.SubagentID.Valid {
|
||||
subagentID = dc.SubagentID.UUID[:]
|
||||
}
|
||||
|
||||
return &proto.WorkspaceAgentDevcontainer{
|
||||
Id: dc.ID[:],
|
||||
Name: dc.Name,
|
||||
WorkspaceFolder: dc.WorkspaceFolder,
|
||||
ConfigPath: dc.ConfigPath,
|
||||
SubagentId: subagentID,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,6 +136,7 @@ func TestManifest(t *testing.T) {
|
||||
ID: uuid.New(),
|
||||
WorkspaceFolder: "/home/coder/coder",
|
||||
ConfigPath: "/home/coder/coder/.devcontainer/devcontainer.json",
|
||||
SubagentID: uuid.NullUUID{Valid: true, UUID: uuid.New()},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -329,6 +329,31 @@ func (c *Client) UpdateTaskInput(ctx context.Context, user string, id uuid.UUID,
|
||||
return nil
|
||||
}
|
||||
|
||||
// PauseTaskResponse represents the response from pausing a task.
|
||||
type PauseTaskResponse struct {
|
||||
WorkspaceBuild *WorkspaceBuild `json:"workspace_build"`
|
||||
}
|
||||
|
||||
// PauseTask pauses a task by stopping its workspace.
|
||||
// Experimental: uses the /api/experimental endpoint.
|
||||
func (c *Client) PauseTask(ctx context.Context, user string, id uuid.UUID) (PauseTaskResponse, error) {
|
||||
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/experimental/tasks/%s/%s/pause", user, id.String()), nil)
|
||||
if err != nil {
|
||||
return PauseTaskResponse{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
return PauseTaskResponse{}, ReadBodyAsError(res)
|
||||
}
|
||||
|
||||
var resp PauseTaskResponse
|
||||
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
|
||||
return PauseTaskResponse{}, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// TaskLogType indicates the source of a task log entry.
|
||||
type TaskLogType string
|
||||
|
||||
|
||||
+50
-48
@@ -1431,7 +1431,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
}
|
||||
emailHello := serpent.Option{
|
||||
Name: "Email: Hello",
|
||||
Description: "The hostname identifying the SMTP server.",
|
||||
Description: "The hostname identifying this client to the SMTP server.",
|
||||
Flag: "email-hello",
|
||||
Env: "CODER_EMAIL_HELLO",
|
||||
Default: "localhost",
|
||||
@@ -1523,7 +1523,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
}
|
||||
emailTLSCertFile := serpent.Option{
|
||||
Name: "Email TLS: Certificate File",
|
||||
Description: "Certificate file to use.",
|
||||
Description: "Client certificate file for mutual TLS authentication.",
|
||||
Flag: "email-tls-cert-file",
|
||||
Env: "CODER_EMAIL_TLS_CERTFILE",
|
||||
Value: &c.Notifications.SMTP.TLS.CertFile,
|
||||
@@ -1532,7 +1532,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
}
|
||||
emailTLSCertKeyFile := serpent.Option{
|
||||
Name: "Email TLS: Certificate Key File",
|
||||
Description: "Certificate key file to use.",
|
||||
Description: "Private key file for the client certificate.",
|
||||
Flag: "email-tls-cert-key-file",
|
||||
Env: "CODER_EMAIL_TLS_CERTKEYFILE",
|
||||
Value: &c.Notifications.SMTP.TLS.KeyFile,
|
||||
@@ -1551,7 +1551,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
}
|
||||
workspaceHostnameSuffix := serpent.Option{
|
||||
Name: "Workspace Hostname Suffix",
|
||||
Description: "Workspace hostnames use this suffix in SSH config and Coder Connect on Coder Desktop. By default it is coder, resulting in names like myworkspace.coder.",
|
||||
Description: "Workspace hostnames use this suffix for SSH connections and Coder Connect. By default it is coder, resulting in hostnames like agent.workspace.owner.coder.",
|
||||
Flag: "workspace-hostname-suffix",
|
||||
Env: "CODER_WORKSPACE_HOSTNAME_SUFFIX",
|
||||
YAML: "workspaceHostnameSuffix",
|
||||
@@ -1680,7 +1680,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "TLS Client CA Files",
|
||||
Description: "PEM-encoded Certificate Authority file used for checking the authenticity of client.",
|
||||
Description: "PEM-encoded Certificate Authority file used for checking the authenticity of the client.",
|
||||
Flag: "tls-client-ca-file",
|
||||
Env: "CODER_TLS_CLIENT_CA_FILE",
|
||||
Value: &c.TLS.ClientCAFile,
|
||||
@@ -1742,7 +1742,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "TLS Ciphers",
|
||||
Description: "Specify specific TLS ciphers that allowed to be used. See https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L53-L75.",
|
||||
Description: "Specify specific TLS ciphers that are allowed to be used. See https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L53-L75.",
|
||||
Flag: "tls-ciphers",
|
||||
Env: "CODER_TLS_CIPHERS",
|
||||
Default: "",
|
||||
@@ -1800,7 +1800,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "DERP Server Region Name",
|
||||
Description: "Region name that for the embedded DERP server.",
|
||||
Description: "Region name to use for the embedded DERP server.",
|
||||
Flag: "derp-server-region-name",
|
||||
Env: "CODER_DERP_SERVER_REGION_NAME",
|
||||
Default: "Coder Embedded Relay",
|
||||
@@ -1811,7 +1811,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "DERP Server STUN Addresses",
|
||||
Description: "Addresses for STUN servers to establish P2P connections. It's recommended to have at least two STUN servers to give users the best chance of connecting P2P to workspaces. Each STUN server will get it's own DERP region, with region IDs starting at `--derp-server-region-id + 1`. Use special value 'disable' to turn off STUN completely.",
|
||||
Description: "Addresses for STUN servers to establish P2P connections. It's recommended to have at least two STUN servers to give users the best chance of connecting P2P to workspaces. Each STUN server will get its own DERP region, with region IDs starting at `--derp-server-region-id + 1`. Use special value 'disable' to turn off STUN completely.",
|
||||
Flag: "derp-server-stun-addresses",
|
||||
Env: "CODER_DERP_SERVER_STUN_ADDRESSES",
|
||||
Default: "stun.l.google.com:19302,stun1.l.google.com:19302,stun2.l.google.com:19302,stun3.l.google.com:19302,stun4.l.google.com:19302",
|
||||
@@ -1833,7 +1833,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "Block Direct Connections",
|
||||
Description: "Block peer-to-peer (aka. direct) workspace connections. All workspace connections from the CLI will be proxied through Coder (or custom configured DERP servers) and will never be peer-to-peer when enabled. Workspaces may still reach out to STUN servers to get their address until they are restarted after this change has been made, but new connections will still be proxied regardless.",
|
||||
Description: "Block peer-to-peer (aka. direct) workspace connections. All workspace connections from the CLI will be proxied through Coder (or custom configured DERP servers) and will never be peer-to-peer when enabled. Workspace agents may still reach out to STUN servers to discover their address until they are restarted, but all new connections will be proxied regardless.",
|
||||
// This cannot be called `disable-direct-connections` because that's
|
||||
// already a global CLI flag for CLI connections. This is a
|
||||
// deployment-wide flag.
|
||||
@@ -1884,7 +1884,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
// Prometheus settings
|
||||
{
|
||||
Name: "Prometheus Enable",
|
||||
Description: "Serve prometheus metrics on the address defined by prometheus address.",
|
||||
Description: "Serve Prometheus metrics on the address defined by prometheus address.",
|
||||
Flag: "prometheus-enable",
|
||||
Env: "CODER_PROMETHEUS_ENABLE",
|
||||
Value: &c.Prometheus.Enable,
|
||||
@@ -1894,7 +1894,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "Prometheus Address",
|
||||
Description: "The bind address to serve prometheus metrics.",
|
||||
Description: "The bind address to serve Prometheus metrics.",
|
||||
Flag: "prometheus-address",
|
||||
Env: "CODER_PROMETHEUS_ADDRESS",
|
||||
Default: "127.0.0.1:2112",
|
||||
@@ -1945,7 +1945,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
// Pprof settings
|
||||
{
|
||||
Name: "pprof Enable",
|
||||
Description: "Serve pprof metrics on the address defined by pprof address.",
|
||||
Description: "Serve pprof profiling endpoints on the address defined by pprof address.",
|
||||
Flag: "pprof-enable",
|
||||
Env: "CODER_PPROF_ENABLE",
|
||||
Value: &c.Pprof.Enable,
|
||||
@@ -2032,7 +2032,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OAuth2 GitHub Allow Everyone",
|
||||
Description: "Allow all logins, setting this option means allowed orgs and teams must be empty.",
|
||||
Description: "Allow all GitHub users to authenticate. When enabled, allowed orgs and teams must be empty.",
|
||||
Flag: "oauth2-github-allow-everyone",
|
||||
Env: "CODER_OAUTH2_GITHUB_ALLOW_EVERYONE",
|
||||
Value: &c.OAuth2.Github.AllowEveryone,
|
||||
@@ -2079,8 +2079,8 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OIDC Client Key File",
|
||||
Description: "Pem encoded RSA private key to use for oauth2 PKI/JWT authorization. " +
|
||||
"This can be used instead of oidc-client-secret if your IDP supports it.",
|
||||
Description: "PEM encoded RSA private key to use for OAuth2 PKI/JWT authorization. " +
|
||||
"This can be used instead of oidc-client-secret if your IdP supports it.",
|
||||
Flag: "oidc-client-key-file",
|
||||
Env: "CODER_OIDC_CLIENT_KEY_FILE",
|
||||
YAML: "oidcClientKeyFile",
|
||||
@@ -2089,8 +2089,8 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OIDC Client Cert File",
|
||||
Description: "Pem encoded certificate file to use for oauth2 PKI/JWT authorization. " +
|
||||
"The public certificate that accompanies oidc-client-key-file. A standard x509 certificate is expected.",
|
||||
Description: "PEM encoded certificate file to use for OAuth2 PKI/JWT authorization. " +
|
||||
"The public certificate that accompanies oidc-client-key-file. A standard X.509 certificate is expected.",
|
||||
Flag: "oidc-client-cert-file",
|
||||
Env: "CODER_OIDC_CLIENT_CERT_FILE",
|
||||
YAML: "oidcClientCertFile",
|
||||
@@ -2242,7 +2242,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OIDC Group Field",
|
||||
Description: "This field must be set if using the group sync feature and the scope name is not 'groups'. Set to the claim to be used for groups.",
|
||||
Description: "OIDC claim field to use as the user's groups. This field must be set if using the group sync feature and the scope name is not 'groups'.",
|
||||
Flag: "oidc-group-field",
|
||||
Env: "CODER_OIDC_GROUP_FIELD",
|
||||
// This value is intentionally blank. If this is empty, then OIDC group
|
||||
@@ -2257,7 +2257,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OIDC Group Mapping",
|
||||
Description: "A map of OIDC group IDs and the group in Coder it should map to. This is useful for when OIDC providers only return group IDs.",
|
||||
Description: "A map of OIDC group IDs and the groups in Coder they should map to. This is useful when OIDC providers only return group IDs.",
|
||||
Flag: "oidc-group-mapping",
|
||||
Env: "CODER_OIDC_GROUP_MAPPING",
|
||||
Default: "{}",
|
||||
@@ -2277,7 +2277,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OIDC Regex Group Filter",
|
||||
Description: "If provided any group name not matching the regex is ignored. This allows for filtering out groups that are not needed. This filter is applied after the group mapping.",
|
||||
Description: "If provided, any group name not matching the regex is ignored. This allows filtering out groups that are not needed. This filter is applied after the OIDC Group Mapping step.",
|
||||
Flag: "oidc-group-regex-filter",
|
||||
Env: "CODER_OIDC_GROUP_REGEX_FILTER",
|
||||
Default: ".*",
|
||||
@@ -2287,7 +2287,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OIDC Allowed Groups",
|
||||
Description: "If provided any group name not in the list will not be allowed to authenticate. This allows for restricting access to a specific set of groups. This filter is applied after the group mapping and before the regex filter.",
|
||||
Description: "If provided, only users with at least one group in this list will be allowed to authenticate. This restricts access to a specific set of groups. This check is applied before any group mapping or filtering.",
|
||||
Flag: "oidc-allowed-groups",
|
||||
Env: "CODER_OIDC_ALLOWED_GROUPS",
|
||||
Default: "",
|
||||
@@ -2309,7 +2309,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OIDC User Role Mapping",
|
||||
Description: "A map of the OIDC passed in user roles and the groups in Coder it should map to. This is useful if the group names do not match. If mapped to the empty string, the role will ignored.",
|
||||
Description: "A map of OIDC user role names to Coder role names. This is useful if the role names do not match between systems. If mapped to the empty string, the role will be ignored.",
|
||||
Flag: "oidc-user-role-mapping",
|
||||
Env: "CODER_OIDC_USER_ROLE_MAPPING",
|
||||
Default: "{}",
|
||||
@@ -2319,7 +2319,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OIDC User Role Default",
|
||||
Description: "If user role sync is enabled, these roles are always included for all authenticated users. The 'member' role is always assigned.",
|
||||
Description: "If user role sync is enabled, these roles are always included for all authenticated users in addition to synced roles. The 'member' role is always assigned regardless of this setting.",
|
||||
Flag: "oidc-user-role-default",
|
||||
Env: "CODER_OIDC_USER_ROLE_DEFAULT",
|
||||
Default: "",
|
||||
@@ -2339,7 +2339,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "OpenID connect icon URL",
|
||||
Description: "URL pointing to the icon to use on the OpenID Connect login button.",
|
||||
Description: "URL of the icon to use on the OpenID Connect login button.",
|
||||
Flag: "oidc-icon-url",
|
||||
Env: "CODER_OIDC_ICON_URL",
|
||||
Value: &c.OIDC.IconURL,
|
||||
@@ -2348,7 +2348,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "Signups disabled text",
|
||||
Description: "The custom text to show on the error page informing about disabled OIDC signups. Markdown format is supported.",
|
||||
Description: "Custom text to show on the error page when OIDC signups are disabled. Markdown format is supported.",
|
||||
Flag: "oidc-signups-disabled-text",
|
||||
Env: "CODER_OIDC_SIGNUPS_DISABLED_TEXT",
|
||||
Value: &c.OIDC.SignupsDisabledText,
|
||||
@@ -2380,6 +2380,8 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
Group: &deploymentGroupTelemetry,
|
||||
UseInstead: []serpent.Option{telemetryEnable},
|
||||
},
|
||||
// For local development testing, see scripts/telemetry-server which
|
||||
// provides a mock server that prints received telemetry as JSON.
|
||||
{
|
||||
Name: "Telemetry URL",
|
||||
Description: "URL to send telemetry.",
|
||||
@@ -2805,7 +2807,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
},
|
||||
{
|
||||
Name: "SameSite Auth Cookie",
|
||||
Description: "Controls the 'SameSite' property is set on browser session cookies.",
|
||||
Description: "Controls if the 'SameSite' property is set on browser session cookies.",
|
||||
Flag: "samesite-auth-cookie",
|
||||
Env: "CODER_SAMESITE_AUTH_COOKIE",
|
||||
// Do not allow "strict" same-site cookies. That would potentially break workspace apps.
|
||||
@@ -2998,7 +3000,7 @@ func (c *DeploymentValues) Options() serpent.OptionSet {
|
||||
{
|
||||
Name: "SSH Config Options",
|
||||
Description: "These SSH config options will override the default SSH config options. " +
|
||||
"Provide options in \"key=value\" or \"key value\" format separated by commas." +
|
||||
"Provide options in \"key=value\" or \"key value\" format separated by commas. " +
|
||||
"Using this incorrectly can break SSH to your deployment, use cautiously.",
|
||||
Flag: "ssh-config-options",
|
||||
Env: "CODER_SSH_CONFIG_OPTIONS",
|
||||
@@ -3039,7 +3041,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
{
|
||||
// Env handling is done in cli.ReadGitAuthFromEnvironment
|
||||
Name: "External Auth Providers",
|
||||
Description: "External Authentication providers.",
|
||||
Description: "Configure external authentication providers for Git and other services.",
|
||||
YAML: "externalAuthProviders",
|
||||
Flag: "external-auth-providers",
|
||||
Value: &c.ExternalAuthConfigs,
|
||||
@@ -3057,7 +3059,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "Proxy Health Check Interval",
|
||||
Description: "The interval in which coderd should be checking the status of workspace proxies.",
|
||||
Description: "The interval at which coderd checks the status of workspace proxies.",
|
||||
Flag: "proxy-health-interval",
|
||||
Env: "CODER_PROXY_HEALTH_INTERVAL",
|
||||
Default: (time.Minute).String(),
|
||||
@@ -3078,7 +3080,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "Allow Custom Quiet Hours",
|
||||
Description: "Allow users to set their own quiet hours schedule for workspaces to stop in (depending on template autostop requirement settings). If false, users can't change their quiet hours schedule and the site default is always used.",
|
||||
Description: "Allow users to set their own quiet hours schedule for when workspaces are stopped (depending on template autostop requirement settings). If false, users can't change their quiet hours schedule and the site default is always used.",
|
||||
Flag: "allow-custom-quiet-hours",
|
||||
Env: "CODER_ALLOW_CUSTOM_QUIET_HOURS",
|
||||
Default: "true",
|
||||
@@ -3190,7 +3192,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email: Hello",
|
||||
Description: "The hostname identifying the SMTP server.",
|
||||
Description: "The hostname identifying this client to the SMTP server.",
|
||||
Flag: "notifications-email-hello",
|
||||
Env: "CODER_NOTIFICATIONS_EMAIL_HELLO",
|
||||
Value: &c.Notifications.SMTP.Hello,
|
||||
@@ -3353,7 +3355,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Name: "Notifications: Store Sync Interval",
|
||||
Description: "The notifications system buffers message updates in memory to ease pressure on the database. " +
|
||||
"This option controls how often it synchronizes its state with the database. The shorter this value the " +
|
||||
"lower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the " +
|
||||
"lower the chance of state inconsistency in a non-graceful shutdown - but it also increases load on the " +
|
||||
"database. It is recommended to keep this option at its default value.",
|
||||
Flag: "notifications-store-sync-interval",
|
||||
Env: "CODER_NOTIFICATIONS_STORE_SYNC_INTERVAL",
|
||||
@@ -3368,7 +3370,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Name: "Notifications: Store Sync Buffer Size",
|
||||
Description: "The notifications system buffers message updates in memory to ease pressure on the database. " +
|
||||
"This option controls how many updates are kept in memory. The lower this value the " +
|
||||
"lower the change of state inconsistency in a non-graceful shutdown - but it also increases load on the " +
|
||||
"lower the chance of state inconsistency in a non-graceful shutdown - but it also increases load on the " +
|
||||
"database. It is recommended to keep this option at its default value.",
|
||||
Flag: "notifications-store-sync-buffer-size",
|
||||
Env: "CODER_NOTIFICATIONS_STORE_SYNC_BUFFER_SIZE",
|
||||
@@ -3432,7 +3434,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "Reconciliation Backoff Interval",
|
||||
Description: "Interval to increase reconciliation backoff by when prebuilds fail, after which a retry attempt is made.",
|
||||
Description: "Amount of time to add to the reconciliation backoff delay after each prebuild failure, before the next retry attempt is made.",
|
||||
Flag: "workspace-prebuilds-reconciliation-backoff-interval",
|
||||
Env: "CODER_WORKSPACE_PREBUILDS_RECONCILIATION_BACKOFF_INTERVAL",
|
||||
Value: &c.Prebuilds.ReconciliationBackoffInterval,
|
||||
@@ -3444,7 +3446,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "Reconciliation Backoff Lookback Period",
|
||||
Description: "Interval to look back to determine number of failed prebuilds, which influences backoff.",
|
||||
Description: "Time period to look back when counting failed prebuilds to calculate the backoff delay.",
|
||||
Flag: "workspace-prebuilds-reconciliation-backoff-lookback-period",
|
||||
Env: "CODER_WORKSPACE_PREBUILDS_RECONCILIATION_BACKOFF_LOOKBACK_PERIOD",
|
||||
Value: &c.Prebuilds.ReconciliationBackoffLookback,
|
||||
@@ -3456,7 +3458,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "Failure Hard Limit",
|
||||
Description: "Maximum number of consecutive failed prebuilds before a preset hits the hard limit; disabled when set to zero.",
|
||||
Description: "Maximum number of consecutive failed prebuilds before a preset is considered hard-limited and stops automatic prebuild creation. Disabled when set to zero.",
|
||||
Flag: "workspace-prebuilds-failure-hard-limit",
|
||||
Env: "CODER_WORKSPACE_PREBUILDS_FAILURE_HARD_LIMIT",
|
||||
Value: &c.Prebuilds.FailureHardLimit,
|
||||
@@ -3479,7 +3481,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
// AI Bridge Options
|
||||
{
|
||||
Name: "AI Bridge Enabled",
|
||||
Description: "Whether to start an in-memory aibridged instance.",
|
||||
Description: "Enable the embedded AI Bridge service to intercept and record AI provider requests.",
|
||||
Flag: "aibridge-enabled",
|
||||
Env: "CODER_AIBRIDGE_ENABLED",
|
||||
Value: &c.AI.BridgeConfig.Enabled,
|
||||
@@ -3499,7 +3501,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge OpenAI Key",
|
||||
Description: "The key to authenticate against the OpenAI API.",
|
||||
Description: "API key for authenticating with the OpenAI API.",
|
||||
Flag: "aibridge-openai-key",
|
||||
Env: "CODER_AIBRIDGE_OPENAI_KEY",
|
||||
Value: &c.AI.BridgeConfig.OpenAI.Key,
|
||||
@@ -3519,7 +3521,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Anthropic Key",
|
||||
Description: "The key to authenticate against the Anthropic API.",
|
||||
Description: "API key for authenticating with the Anthropic API.",
|
||||
Flag: "aibridge-anthropic-key",
|
||||
Env: "CODER_AIBRIDGE_ANTHROPIC_KEY",
|
||||
Value: &c.AI.BridgeConfig.Anthropic.Key,
|
||||
@@ -3551,7 +3553,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Bedrock Access Key",
|
||||
Description: "The access key to authenticate against the AWS Bedrock API.",
|
||||
Description: "AWS access key for authenticating with the AWS Bedrock API.",
|
||||
Flag: "aibridge-bedrock-access-key",
|
||||
Env: "CODER_AIBRIDGE_BEDROCK_ACCESS_KEY",
|
||||
Value: &c.AI.BridgeConfig.Bedrock.AccessKey,
|
||||
@@ -3561,7 +3563,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Bedrock Access Key Secret",
|
||||
Description: "The access key secret to use with the access key to authenticate against the AWS Bedrock API.",
|
||||
Description: "AWS secret access key for authenticating with the AWS Bedrock API.",
|
||||
Flag: "aibridge-bedrock-access-key-secret",
|
||||
Env: "CODER_AIBRIDGE_BEDROCK_ACCESS_KEY_SECRET",
|
||||
Value: &c.AI.BridgeConfig.Bedrock.AccessKeySecret,
|
||||
@@ -3591,7 +3593,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Inject Coder MCP tools",
|
||||
Description: "Whether to inject Coder's MCP tools into intercepted AI Bridge requests (requires the \"oauth2\" and \"mcp-server-http\" experiments to be enabled).",
|
||||
Description: "Enable injection of Coder's MCP tools into intercepted AI Bridge requests. Requires the 'oauth2' and 'mcp-server-http' experiments.",
|
||||
Flag: "aibridge-inject-coder-mcp-tools",
|
||||
Env: "CODER_AIBRIDGE_INJECT_CODER_MCP_TOOLS",
|
||||
Value: &c.AI.BridgeConfig.InjectCoderMCPTools,
|
||||
@@ -3601,7 +3603,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Data Retention Duration",
|
||||
Description: "Length of time to retain data such as interceptions and all related records (token, prompt, tool use).",
|
||||
Description: "How long to retain AI Bridge data including interceptions, tokens, prompts, and tool usage records.",
|
||||
Flag: "aibridge-retention",
|
||||
Env: "CODER_AIBRIDGE_RETENTION",
|
||||
Value: &c.AI.BridgeConfig.Retention,
|
||||
@@ -3654,7 +3656,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Circuit Breaker Enabled",
|
||||
Description: "Enable the circuit breaker to protect against cascading failures from upstream AI provider rate limits (429, 503, 529 overloaded).",
|
||||
Description: "Enable the circuit breaker to protect against cascading failures from upstream AI provider rate limits and overload errors (HTTP 429, 503, 529).",
|
||||
Flag: "aibridge-circuit-breaker-enabled",
|
||||
Env: "CODER_AIBRIDGE_CIRCUIT_BREAKER_ENABLED",
|
||||
Value: &c.AI.BridgeConfig.CircuitBreakerEnabled,
|
||||
@@ -3664,7 +3666,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Circuit Breaker Failure Threshold",
|
||||
Description: "Number of consecutive failures that triggers the circuit breaker to open.",
|
||||
Description: "Number of consecutive failures that trigger the circuit breaker to open.",
|
||||
Flag: "aibridge-circuit-breaker-failure-threshold",
|
||||
Env: "CODER_AIBRIDGE_CIRCUIT_BREAKER_FAILURE_THRESHOLD",
|
||||
Value: serpent.Validate(&c.AI.BridgeConfig.CircuitBreakerFailureThreshold, func(value *serpent.Int64) error {
|
||||
@@ -3680,7 +3682,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "AI Bridge Circuit Breaker Interval",
|
||||
Description: "Cyclic period of the closed state for clearing internal failure counts.",
|
||||
Description: "Time window for counting failures before resetting the failure count in the closed state.",
|
||||
Flag: "aibridge-circuit-breaker-interval",
|
||||
Env: "CODER_AIBRIDGE_CIRCUIT_BREAKER_INTERVAL",
|
||||
Value: &c.AI.BridgeConfig.CircuitBreakerInterval,
|
||||
@@ -3828,7 +3830,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "Workspace Agent Logs Retention",
|
||||
Description: "How long workspace agent logs are retained. Logs from non-latest builds are deleted if the agent hasn't connected within this period. Logs from the latest build are always retained. Set to 0 to disable automatic deletion.",
|
||||
Description: "How long workspace agent logs are retained. Logs from non-latest builds are deleted if the agent hasn't connected within this period. Logs from the latest build for each workspace are always retained. Set to 0 to disable automatic deletion.",
|
||||
Flag: "workspace-agent-logs-retention",
|
||||
Env: "CODER_WORKSPACE_AGENT_LOGS_RETENTION",
|
||||
Value: &c.Retention.WorkspaceAgentLogs,
|
||||
@@ -3839,7 +3841,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
},
|
||||
{
|
||||
Name: "Enable Authorization Recordings",
|
||||
Description: "All api requests will have a header including all authorization calls made during the request. " +
|
||||
Description: "All API requests will have a header including all authorization calls made during the request. " +
|
||||
"This is used for debugging purposes and only available for dev builds.",
|
||||
Required: false,
|
||||
Flag: "enable-authz-recordings",
|
||||
|
||||
@@ -64,6 +64,10 @@ type Template struct {
|
||||
CORSBehavior CORSBehavior `json:"cors_behavior"`
|
||||
|
||||
UseClassicParameterFlow bool `json:"use_classic_parameter_flow"`
|
||||
|
||||
// DisableModuleCache disables the use of cached Terraform modules during
|
||||
// provisioning.
|
||||
DisableModuleCache bool `json:"disable_module_cache"`
|
||||
}
|
||||
|
||||
// WeekdaysToBitmap converts a list of weekdays to a bitmap in accordance with
|
||||
@@ -263,6 +267,9 @@ type UpdateTemplateMeta struct {
|
||||
// made the default.
|
||||
// An "opt-out" is present in case the new feature breaks some existing templates.
|
||||
UseClassicParameterFlow *bool `json:"use_classic_parameter_flow,omitempty"`
|
||||
// DisableModuleCache disables the using of cached Terraform modules during
|
||||
// provisioning. It is recommended not to disable this.
|
||||
DisableModuleCache *bool `json:"disable_module_cache,omitempty"`
|
||||
}
|
||||
|
||||
type TemplateExample struct {
|
||||
|
||||
@@ -440,10 +440,11 @@ func (s WorkspaceAgentDevcontainerStatus) Transitioning() bool {
|
||||
// WorkspaceAgentDevcontainer defines the location of a devcontainer
|
||||
// configuration in a workspace that is visible to the workspace agent.
|
||||
type WorkspaceAgentDevcontainer struct {
|
||||
ID uuid.UUID `json:"id" format:"uuid"`
|
||||
Name string `json:"name"`
|
||||
WorkspaceFolder string `json:"workspace_folder"`
|
||||
ConfigPath string `json:"config_path,omitempty"`
|
||||
ID uuid.UUID `json:"id" format:"uuid"`
|
||||
Name string `json:"name"`
|
||||
WorkspaceFolder string `json:"workspace_folder"`
|
||||
ConfigPath string `json:"config_path,omitempty"`
|
||||
SubagentID uuid.NullUUID `json:"subagent_id,omitempty" format:"uuid"`
|
||||
|
||||
// Additional runtime fields.
|
||||
Status WorkspaceAgentDevcontainerStatus `json:"status"`
|
||||
@@ -458,6 +459,7 @@ func (d WorkspaceAgentDevcontainer) Equals(other WorkspaceAgentDevcontainer) boo
|
||||
return d.ID == other.ID &&
|
||||
d.Name == other.Name &&
|
||||
d.WorkspaceFolder == other.WorkspaceFolder &&
|
||||
d.SubagentID == other.SubagentID &&
|
||||
d.Status == other.Status &&
|
||||
d.Dirty == other.Dirty &&
|
||||
(d.Container == nil && other.Container == nil ||
|
||||
@@ -467,6 +469,12 @@ func (d WorkspaceAgentDevcontainer) Equals(other WorkspaceAgentDevcontainer) boo
|
||||
d.Error == other.Error
|
||||
}
|
||||
|
||||
// IsTerraformDefined returns true if this devcontainer has resources defined
|
||||
// in Terraform.
|
||||
func (d WorkspaceAgentDevcontainer) IsTerraformDefined() bool {
|
||||
return d.SubagentID.Valid
|
||||
}
|
||||
|
||||
// WorkspaceAgentDevcontainerAgent represents the sub agent for a
|
||||
// devcontainer.
|
||||
type WorkspaceAgentDevcontainerAgent struct {
|
||||
|
||||
@@ -110,3 +110,142 @@ func TestWorkspaceAgentLogTextSpecialChars(t *testing.T) {
|
||||
result := log.Text("main", "startup_script")
|
||||
require.Equal(t, "2024-01-28T10:30:00Z [debug] [agent.main|startup_script] \033[31mError!\033[0m 🚀 Unicode: 日本語", result)
|
||||
}
|
||||
|
||||
func TestWorkspaceAgentDevcontainerEquals(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
agentID := uuid.New()
|
||||
|
||||
base := codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "test-dc",
|
||||
WorkspaceFolder: "/workspace",
|
||||
Status: codersdk.WorkspaceAgentDevcontainerStatusRunning,
|
||||
Dirty: false,
|
||||
Container: &codersdk.WorkspaceAgentContainer{ID: "container-123"},
|
||||
Agent: &codersdk.WorkspaceAgentDevcontainerAgent{ID: agentID, Name: "agent-1"},
|
||||
Error: "",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
modify func(*codersdk.WorkspaceAgentDevcontainer)
|
||||
wantEqual bool
|
||||
}{
|
||||
{
|
||||
name: "identical",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) {},
|
||||
wantEqual: true,
|
||||
},
|
||||
{
|
||||
name: "different ID",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) { d.ID = uuid.New() },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Name",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) { d.Name = "other-dc" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different WorkspaceFolder",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) { d.WorkspaceFolder = "/other" },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different SubagentID (one valid, one nil)",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) {
|
||||
d.SubagentID = uuid.NullUUID{Valid: true, UUID: uuid.New()}
|
||||
},
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different SubagentID UUIDs",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) {
|
||||
d.SubagentID = uuid.NullUUID{Valid: true, UUID: uuid.New()}
|
||||
},
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Status",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) {
|
||||
d.Status = codersdk.WorkspaceAgentDevcontainerStatusStopped
|
||||
},
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Dirty",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) { d.Dirty = true },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Container (one nil)",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) { d.Container = nil },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Container IDs",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) {
|
||||
d.Container = &codersdk.WorkspaceAgentContainer{ID: "different-container"}
|
||||
},
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Agent (one nil)",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) { d.Agent = nil },
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Agent values",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) {
|
||||
d.Agent = &codersdk.WorkspaceAgentDevcontainerAgent{ID: agentID, Name: "agent-2"}
|
||||
},
|
||||
wantEqual: false,
|
||||
},
|
||||
{
|
||||
name: "different Error",
|
||||
modify: func(d *codersdk.WorkspaceAgentDevcontainer) { d.Error = "some error" },
|
||||
wantEqual: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
modified := base
|
||||
tt.modify(&modified)
|
||||
require.Equal(t, tt.wantEqual, base.Equals(modified))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorkspaceAgentDevcontainerIsTerraformDefined(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("SubagentID Valid", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dc := codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "test-dc",
|
||||
WorkspaceFolder: "/workspace",
|
||||
SubagentID: uuid.NullUUID{Valid: true, UUID: uuid.New()},
|
||||
}
|
||||
|
||||
require.True(t, dc.IsTerraformDefined())
|
||||
})
|
||||
|
||||
t.Run("SubagentID Null", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dc := codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "test-dc",
|
||||
WorkspaceFolder: "/workspace",
|
||||
SubagentID: uuid.NullUUID{Valid: false},
|
||||
}
|
||||
|
||||
require.False(t, dc.IsTerraformDefined())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -109,6 +109,7 @@ const (
|
||||
CreateWorkspaceBuildReasonSSHConnection CreateWorkspaceBuildReason = "ssh_connection"
|
||||
CreateWorkspaceBuildReasonVSCodeConnection CreateWorkspaceBuildReason = "vscode_connection"
|
||||
CreateWorkspaceBuildReasonJetbrainsConnection CreateWorkspaceBuildReason = "jetbrains_connection"
|
||||
CreateWorkspaceBuildReasonTaskManualPause CreateWorkspaceBuildReason = "task_manual_pause"
|
||||
)
|
||||
|
||||
// CreateWorkspaceBuildRequest provides options to update the latest workspace build.
|
||||
@@ -129,7 +130,7 @@ type CreateWorkspaceBuildRequest struct {
|
||||
// TemplateVersionPresetID is the ID of the template version preset to use for the build.
|
||||
TemplateVersionPresetID uuid.UUID `json:"template_version_preset_id,omitempty" format:"uuid"`
|
||||
// Reason sets the reason for the workspace build.
|
||||
Reason CreateWorkspaceBuildReason `json:"reason,omitempty" validate:"omitempty,oneof=dashboard cli ssh_connection vscode_connection jetbrains_connection"`
|
||||
Reason CreateWorkspaceBuildReason `json:"reason,omitempty" validate:"omitempty,oneof=dashboard cli ssh_connection vscode_connection jetbrains_connection task_manual_pause"`
|
||||
}
|
||||
|
||||
type WorkspaceOptions struct {
|
||||
|
||||
@@ -119,9 +119,7 @@ this:
|
||||
- Run `./scripts/deploy-pr.sh`
|
||||
- Manually trigger the
|
||||
[`pr-deploy.yaml`](https://github.com/coder/coder/actions/workflows/pr-deploy.yaml)
|
||||
GitHub Action workflow:
|
||||
|
||||
<Image src="./images/deploy-pr-manually.png" alt="Deploy PR manually" height="348px" align="center" />
|
||||
GitHub Action workflow.
|
||||
|
||||
#### Available options
|
||||
|
||||
|
||||
@@ -220,16 +220,12 @@ screen-readers; a placeholder text value is not enough for all users.
|
||||
When possible, make sure that all image/graphic elements have accompanying text
|
||||
that describes the image. `<img />` elements should have an `alt` text value. In
|
||||
other situations, it might make sense to place invisible, descriptive text
|
||||
inside the component itself using MUI's `visuallyHidden` utility function.
|
||||
inside the component itself using Tailwind's `sr-only` class.
|
||||
|
||||
```tsx
|
||||
import { visuallyHidden } from "@mui/utils";
|
||||
|
||||
<Button>
|
||||
<GearIcon />
|
||||
<Box component="span" sx={visuallyHidden}>
|
||||
Settings
|
||||
</Box>
|
||||
<span className="sr-only">Settings</span>
|
||||
</Button>;
|
||||
```
|
||||
|
||||
|
||||
@@ -162,6 +162,7 @@ deployment. They will always be available from the agent.
|
||||
| `coderd_provisionerd_jobs_current` | gauge | The number of currently running provisioner jobs. | `provisioner` |
|
||||
| `coderd_provisionerd_num_daemons` | gauge | The number of provisioner daemons. | |
|
||||
| `coderd_provisionerd_workspace_build_timings_seconds` | histogram | The time taken for a workspace to build. | `status` `template_name` `template_version` `workspace_transition` |
|
||||
| `coderd_template_workspace_build_duration_seconds` | histogram | Duration from workspace build creation to agent ready, by template. | `is_prebuild` `organization_name` `status` `template_name` `transition` |
|
||||
| `coderd_workspace_builds_total` | counter | The number of workspaces started, updated, or deleted. | `action` `owner_email` `status` `template_name` `template_version` `workspace_name` |
|
||||
| `coderd_workspace_creation_duration_seconds` | histogram | Time to create a workspace by organization, template, preset, and type (regular or prebuild). | `organization_name` `preset_name` `template_name` `type` |
|
||||
| `coderd_workspace_creation_total` | counter | Total regular (non-prebuilt) workspace creations by organization, template, and preset. | `organization_name` `preset_name` `template_name` |
|
||||
@@ -211,6 +212,7 @@ The following metrics support native histograms:
|
||||
|
||||
* `coderd_workspace_creation_duration_seconds`
|
||||
* `coderd_prebuilt_workspace_claim_duration_seconds`
|
||||
* `coderd_template_coderd_template_workspace_build_duration_seconds`
|
||||
|
||||
Native histograms are an **experimental** Prometheus feature that removes the need to predefine bucket boundaries and allows higher-resolution buckets that adapt to deployment characteristics.
|
||||
Whether a metric is exposed as classic or native depends entirely on the Prometheus server configuration (see [Prometheus docs](https://prometheus.io/docs/specs/native_histograms/) for details):
|
||||
|
||||
@@ -29,6 +29,12 @@ user <-> Coder connections.
|
||||
Coder automatically enters HA mode when multiple instances simultaneously
|
||||
connect to the same Postgres endpoint.
|
||||
|
||||
> [!NOTE]
|
||||
> When upgrading HA deployments, database migrations may require special
|
||||
> handling to avoid lock contention. See
|
||||
> [Upgrading Best Practices](../../install/upgrade-best-practices.md) for
|
||||
> recommended procedures.
|
||||
|
||||
HA brings one configuration variable to set in each Coderd node:
|
||||
`CODER_DERP_SERVER_RELAY_URL`. The HA nodes use these URLs to communicate with
|
||||
each other. Inter-node communication is only required while using the embedded
|
||||
|
||||
@@ -13,32 +13,32 @@ We track the following resources:
|
||||
|
||||
<!-- Code generated by 'make docs/admin/security/audit-logs.md'. DO NOT EDIT -->
|
||||
|
||||
| <b>Resource<b> | | |
|
||||
|----------------------------------------------------------|----------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| APIKey<br><i>login, logout, register, create, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>allow_list</td><td>false</td></tr><tr><td>created_at</td><td>true</td></tr><tr><td>expires_at</td><td>true</td></tr><tr><td>hashed_secret</td><td>false</td></tr><tr><td>id</td><td>false</td></tr><tr><td>ip_address</td><td>false</td></tr><tr><td>last_used</td><td>true</td></tr><tr><td>lifetime_seconds</td><td>false</td></tr><tr><td>login_type</td><td>false</td></tr><tr><td>scopes</td><td>false</td></tr><tr><td>token_name</td><td>false</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_id</td><td>true</td></tr></tbody></table> |
|
||||
| AuditOAuthConvertState<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>true</td></tr><tr><td>expires_at</td><td>true</td></tr><tr><td>from_login_type</td><td>true</td></tr><tr><td>to_login_type</td><td>true</td></tr><tr><td>user_id</td><td>true</td></tr></tbody></table> |
|
||||
| Group<br><i>create, write, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>avatar_url</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>members</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>quota_allowance</td><td>true</td></tr><tr><td>source</td><td>false</td></tr></tbody></table> |
|
||||
| AuditableOrganizationMember<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>roles</td><td>true</td></tr><tr><td>updated_at</td><td>true</td></tr><tr><td>user_id</td><td>true</td></tr><tr><td>username</td><td>true</td></tr></tbody></table> |
|
||||
| CustomRole<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>false</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>is_system</td><td>false</td></tr><tr><td>member_permissions</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>org_permissions</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>site_permissions</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_permissions</td><td>true</td></tr></tbody></table> |
|
||||
| GitSSHKey<br><i>create</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>false</td></tr><tr><td>private_key</td><td>true</td></tr><tr><td>public_key</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_id</td><td>true</td></tr></tbody></table> |
|
||||
| GroupSyncSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>auto_create_missing_groups</td><td>true</td></tr><tr><td>field</td><td>true</td></tr><tr><td>legacy_group_name_mapping</td><td>false</td></tr><tr><td>mapping</td><td>true</td></tr><tr><td>regex_filter</td><td>true</td></tr></tbody></table> |
|
||||
| HealthSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>dismissed_healthchecks</td><td>true</td></tr><tr><td>id</td><td>false</td></tr></tbody></table> |
|
||||
| License<br><i>create, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>exp</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>jwt</td><td>false</td></tr><tr><td>uploaded_at</td><td>true</td></tr><tr><td>uuid</td><td>true</td></tr></tbody></table> |
|
||||
| NotificationTemplate<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>actions</td><td>true</td></tr><tr><td>body_template</td><td>true</td></tr><tr><td>enabled_by_default</td><td>true</td></tr><tr><td>group</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>kind</td><td>true</td></tr><tr><td>method</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>title_template</td><td>true</td></tr></tbody></table> |
|
||||
| NotificationsSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>id</td><td>false</td></tr><tr><td>notifier_paused</td><td>true</td></tr></tbody></table> |
|
||||
| OAuth2ProviderApp<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>callback_url</td><td>true</td></tr><tr><td>client_id_issued_at</td><td>false</td></tr><tr><td>client_secret_expires_at</td><td>true</td></tr><tr><td>client_type</td><td>true</td></tr><tr><td>client_uri</td><td>true</td></tr><tr><td>contacts</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>dynamically_registered</td><td>true</td></tr><tr><td>grant_types</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>jwks</td><td>true</td></tr><tr><td>jwks_uri</td><td>true</td></tr><tr><td>logo_uri</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>policy_uri</td><td>true</td></tr><tr><td>redirect_uris</td><td>true</td></tr><tr><td>registration_access_token</td><td>true</td></tr><tr><td>registration_client_uri</td><td>true</td></tr><tr><td>response_types</td><td>true</td></tr><tr><td>scope</td><td>true</td></tr><tr><td>software_id</td><td>true</td></tr><tr><td>software_version</td><td>true</td></tr><tr><td>token_endpoint_auth_method</td><td>true</td></tr><tr><td>tos_uri</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr></tbody></table> |
|
||||
| OAuth2ProviderAppSecret<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>app_id</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>display_secret</td><td>false</td></tr><tr><td>hashed_secret</td><td>false</td></tr><tr><td>id</td><td>false</td></tr><tr><td>last_used_at</td><td>false</td></tr><tr><td>secret_prefix</td><td>false</td></tr></tbody></table> |
|
||||
| Organization<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>false</td></tr><tr><td>deleted</td><td>true</td></tr><tr><td>description</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>is_default</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>updated_at</td><td>true</td></tr><tr><td>workspace_sharing_disabled</td><td>true</td></tr></tbody></table> |
|
||||
| OrganizationSyncSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>assign_default</td><td>true</td></tr><tr><td>field</td><td>true</td></tr><tr><td>mapping</td><td>true</td></tr></tbody></table> |
|
||||
| PrebuildsSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>id</td><td>false</td></tr><tr><td>reconciliation_paused</td><td>true</td></tr></tbody></table> |
|
||||
| RoleSyncSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>field</td><td>true</td></tr><tr><td>mapping</td><td>true</td></tr></tbody></table> |
|
||||
| TaskTable<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>false</td></tr><tr><td>deleted_at</td><td>false</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>owner_id</td><td>true</td></tr><tr><td>prompt</td><td>true</td></tr><tr><td>template_parameters</td><td>true</td></tr><tr><td>template_version_id</td><td>true</td></tr><tr><td>workspace_id</td><td>true</td></tr></tbody></table> |
|
||||
| Template<br><i>write, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>active_version_id</td><td>true</td></tr><tr><td>activity_bump</td><td>true</td></tr><tr><td>allow_user_autostart</td><td>true</td></tr><tr><td>allow_user_autostop</td><td>true</td></tr><tr><td>allow_user_cancel_workspace_jobs</td><td>true</td></tr><tr><td>autostart_block_days_of_week</td><td>true</td></tr><tr><td>autostop_requirement_days_of_week</td><td>true</td></tr><tr><td>autostop_requirement_weeks</td><td>true</td></tr><tr><td>cors_behavior</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>created_by</td><td>true</td></tr><tr><td>created_by_avatar_url</td><td>false</td></tr><tr><td>created_by_name</td><td>false</td></tr><tr><td>created_by_username</td><td>false</td></tr><tr><td>default_ttl</td><td>true</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>deprecated</td><td>true</td></tr><tr><td>description</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>failure_ttl</td><td>true</td></tr><tr><td>group_acl</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>max_port_sharing_level</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_display_name</td><td>false</td></tr><tr><td>organization_icon</td><td>false</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>organization_name</td><td>false</td></tr><tr><td>provisioner</td><td>true</td></tr><tr><td>require_active_version</td><td>true</td></tr><tr><td>time_til_dormant</td><td>true</td></tr><tr><td>time_til_dormant_autodelete</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>use_classic_parameter_flow</td><td>true</td></tr><tr><td>user_acl</td><td>true</td></tr></tbody></table> |
|
||||
| TemplateVersion<br><i>create, write</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>archived</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>created_by</td><td>true</td></tr><tr><td>created_by_avatar_url</td><td>false</td></tr><tr><td>created_by_name</td><td>false</td></tr><tr><td>created_by_username</td><td>false</td></tr><tr><td>external_auth_providers</td><td>false</td></tr><tr><td>has_ai_task</td><td>false</td></tr><tr><td>has_external_agent</td><td>false</td></tr><tr><td>id</td><td>true</td></tr><tr><td>job_id</td><td>false</td></tr><tr><td>message</td><td>false</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>readme</td><td>true</td></tr><tr><td>source_example_id</td><td>false</td></tr><tr><td>template_id</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr></tbody></table> |
|
||||
| User<br><i>create, write, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>avatar_url</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>deleted</td><td>true</td></tr><tr><td>email</td><td>true</td></tr><tr><td>github_com_user_id</td><td>false</td></tr><tr><td>hashed_one_time_passcode</td><td>false</td></tr><tr><td>hashed_password</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>is_system</td><td>true</td></tr><tr><td>last_seen_at</td><td>false</td></tr><tr><td>login_type</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>one_time_passcode_expires_at</td><td>true</td></tr><tr><td>quiet_hours_schedule</td><td>true</td></tr><tr><td>rbac_roles</td><td>true</td></tr><tr><td>status</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>username</td><td>true</td></tr></tbody></table> |
|
||||
| WorkspaceBuild<br><i>start, stop</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>build_number</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>daily_cost</td><td>false</td></tr><tr><td>deadline</td><td>false</td></tr><tr><td>has_ai_task</td><td>false</td></tr><tr><td>has_external_agent</td><td>false</td></tr><tr><td>id</td><td>false</td></tr><tr><td>initiator_by_avatar_url</td><td>false</td></tr><tr><td>initiator_by_name</td><td>false</td></tr><tr><td>initiator_by_username</td><td>false</td></tr><tr><td>initiator_id</td><td>false</td></tr><tr><td>job_id</td><td>false</td></tr><tr><td>max_deadline</td><td>false</td></tr><tr><td>provisioner_state</td><td>false</td></tr><tr><td>reason</td><td>false</td></tr><tr><td>template_version_id</td><td>true</td></tr><tr><td>template_version_preset_id</td><td>false</td></tr><tr><td>transition</td><td>false</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>workspace_id</td><td>false</td></tr></tbody></table> |
|
||||
| WorkspaceProxy<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>true</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>derp_enabled</td><td>true</td></tr><tr><td>derp_only</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>region_id</td><td>true</td></tr><tr><td>token_hashed_secret</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>url</td><td>true</td></tr><tr><td>version</td><td>true</td></tr><tr><td>wildcard_hostname</td><td>true</td></tr></tbody></table> |
|
||||
| WorkspaceTable<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>automatic_updates</td><td>true</td></tr><tr><td>autostart_schedule</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>deleting_at</td><td>true</td></tr><tr><td>dormant_at</td><td>true</td></tr><tr><td>favorite</td><td>true</td></tr><tr><td>group_acl</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>last_used_at</td><td>false</td></tr><tr><td>name</td><td>true</td></tr><tr><td>next_start_at</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>owner_id</td><td>true</td></tr><tr><td>template_id</td><td>true</td></tr><tr><td>ttl</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_acl</td><td>true</td></tr></tbody></table> |
|
||||
| <b>Resource<b> | | |
|
||||
|----------------------------------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| APIKey<br><i>login, logout, register, create, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>allow_list</td><td>false</td></tr><tr><td>created_at</td><td>true</td></tr><tr><td>expires_at</td><td>true</td></tr><tr><td>hashed_secret</td><td>false</td></tr><tr><td>id</td><td>false</td></tr><tr><td>ip_address</td><td>false</td></tr><tr><td>last_used</td><td>true</td></tr><tr><td>lifetime_seconds</td><td>false</td></tr><tr><td>login_type</td><td>false</td></tr><tr><td>scopes</td><td>false</td></tr><tr><td>token_name</td><td>false</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_id</td><td>true</td></tr></tbody></table> |
|
||||
| AuditOAuthConvertState<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>true</td></tr><tr><td>expires_at</td><td>true</td></tr><tr><td>from_login_type</td><td>true</td></tr><tr><td>to_login_type</td><td>true</td></tr><tr><td>user_id</td><td>true</td></tr></tbody></table> |
|
||||
| Group<br><i>create, write, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>avatar_url</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>members</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>quota_allowance</td><td>true</td></tr><tr><td>source</td><td>false</td></tr></tbody></table> |
|
||||
| AuditableOrganizationMember<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>roles</td><td>true</td></tr><tr><td>updated_at</td><td>true</td></tr><tr><td>user_id</td><td>true</td></tr><tr><td>username</td><td>true</td></tr></tbody></table> |
|
||||
| CustomRole<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>false</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>is_system</td><td>false</td></tr><tr><td>member_permissions</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>org_permissions</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>site_permissions</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_permissions</td><td>true</td></tr></tbody></table> |
|
||||
| GitSSHKey<br><i>create</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>false</td></tr><tr><td>private_key</td><td>true</td></tr><tr><td>public_key</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_id</td><td>true</td></tr></tbody></table> |
|
||||
| GroupSyncSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>auto_create_missing_groups</td><td>true</td></tr><tr><td>field</td><td>true</td></tr><tr><td>legacy_group_name_mapping</td><td>false</td></tr><tr><td>mapping</td><td>true</td></tr><tr><td>regex_filter</td><td>true</td></tr></tbody></table> |
|
||||
| HealthSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>dismissed_healthchecks</td><td>true</td></tr><tr><td>id</td><td>false</td></tr></tbody></table> |
|
||||
| License<br><i>create, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>exp</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>jwt</td><td>false</td></tr><tr><td>uploaded_at</td><td>true</td></tr><tr><td>uuid</td><td>true</td></tr></tbody></table> |
|
||||
| NotificationTemplate<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>actions</td><td>true</td></tr><tr><td>body_template</td><td>true</td></tr><tr><td>enabled_by_default</td><td>true</td></tr><tr><td>group</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>kind</td><td>true</td></tr><tr><td>method</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>title_template</td><td>true</td></tr></tbody></table> |
|
||||
| NotificationsSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>id</td><td>false</td></tr><tr><td>notifier_paused</td><td>true</td></tr></tbody></table> |
|
||||
| OAuth2ProviderApp<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>callback_url</td><td>true</td></tr><tr><td>client_id_issued_at</td><td>false</td></tr><tr><td>client_secret_expires_at</td><td>true</td></tr><tr><td>client_type</td><td>true</td></tr><tr><td>client_uri</td><td>true</td></tr><tr><td>contacts</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>dynamically_registered</td><td>true</td></tr><tr><td>grant_types</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>jwks</td><td>true</td></tr><tr><td>jwks_uri</td><td>true</td></tr><tr><td>logo_uri</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>policy_uri</td><td>true</td></tr><tr><td>redirect_uris</td><td>true</td></tr><tr><td>registration_access_token</td><td>true</td></tr><tr><td>registration_client_uri</td><td>true</td></tr><tr><td>response_types</td><td>true</td></tr><tr><td>scope</td><td>true</td></tr><tr><td>software_id</td><td>true</td></tr><tr><td>software_version</td><td>true</td></tr><tr><td>token_endpoint_auth_method</td><td>true</td></tr><tr><td>tos_uri</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr></tbody></table> |
|
||||
| OAuth2ProviderAppSecret<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>app_id</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>display_secret</td><td>false</td></tr><tr><td>hashed_secret</td><td>false</td></tr><tr><td>id</td><td>false</td></tr><tr><td>last_used_at</td><td>false</td></tr><tr><td>secret_prefix</td><td>false</td></tr></tbody></table> |
|
||||
| Organization<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>false</td></tr><tr><td>deleted</td><td>true</td></tr><tr><td>description</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>false</td></tr><tr><td>is_default</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>updated_at</td><td>true</td></tr><tr><td>workspace_sharing_disabled</td><td>true</td></tr></tbody></table> |
|
||||
| OrganizationSyncSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>assign_default</td><td>true</td></tr><tr><td>field</td><td>true</td></tr><tr><td>mapping</td><td>true</td></tr></tbody></table> |
|
||||
| PrebuildsSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>id</td><td>false</td></tr><tr><td>reconciliation_paused</td><td>true</td></tr></tbody></table> |
|
||||
| RoleSyncSettings<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>field</td><td>true</td></tr><tr><td>mapping</td><td>true</td></tr></tbody></table> |
|
||||
| TaskTable<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>false</td></tr><tr><td>deleted_at</td><td>false</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>owner_id</td><td>true</td></tr><tr><td>prompt</td><td>true</td></tr><tr><td>template_parameters</td><td>true</td></tr><tr><td>template_version_id</td><td>true</td></tr><tr><td>workspace_id</td><td>true</td></tr></tbody></table> |
|
||||
| Template<br><i>write, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>active_version_id</td><td>true</td></tr><tr><td>activity_bump</td><td>true</td></tr><tr><td>allow_user_autostart</td><td>true</td></tr><tr><td>allow_user_autostop</td><td>true</td></tr><tr><td>allow_user_cancel_workspace_jobs</td><td>true</td></tr><tr><td>autostart_block_days_of_week</td><td>true</td></tr><tr><td>autostop_requirement_days_of_week</td><td>true</td></tr><tr><td>autostop_requirement_weeks</td><td>true</td></tr><tr><td>cors_behavior</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>created_by</td><td>true</td></tr><tr><td>created_by_avatar_url</td><td>false</td></tr><tr><td>created_by_name</td><td>false</td></tr><tr><td>created_by_username</td><td>false</td></tr><tr><td>default_ttl</td><td>true</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>deprecated</td><td>true</td></tr><tr><td>description</td><td>true</td></tr><tr><td>disable_module_cache</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>failure_ttl</td><td>true</td></tr><tr><td>group_acl</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>max_port_sharing_level</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_display_name</td><td>false</td></tr><tr><td>organization_icon</td><td>false</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>organization_name</td><td>false</td></tr><tr><td>provisioner</td><td>true</td></tr><tr><td>require_active_version</td><td>true</td></tr><tr><td>time_til_dormant</td><td>true</td></tr><tr><td>time_til_dormant_autodelete</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>use_classic_parameter_flow</td><td>true</td></tr><tr><td>user_acl</td><td>true</td></tr></tbody></table> |
|
||||
| TemplateVersion<br><i>create, write</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>archived</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>created_by</td><td>true</td></tr><tr><td>created_by_avatar_url</td><td>false</td></tr><tr><td>created_by_name</td><td>false</td></tr><tr><td>created_by_username</td><td>false</td></tr><tr><td>external_auth_providers</td><td>false</td></tr><tr><td>has_ai_task</td><td>false</td></tr><tr><td>has_external_agent</td><td>false</td></tr><tr><td>id</td><td>true</td></tr><tr><td>job_id</td><td>false</td></tr><tr><td>message</td><td>false</td></tr><tr><td>name</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>readme</td><td>true</td></tr><tr><td>source_example_id</td><td>false</td></tr><tr><td>template_id</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr></tbody></table> |
|
||||
| User<br><i>create, write, delete</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>avatar_url</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>deleted</td><td>true</td></tr><tr><td>email</td><td>true</td></tr><tr><td>github_com_user_id</td><td>false</td></tr><tr><td>hashed_one_time_passcode</td><td>false</td></tr><tr><td>hashed_password</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>is_system</td><td>true</td></tr><tr><td>last_seen_at</td><td>false</td></tr><tr><td>login_type</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>one_time_passcode_expires_at</td><td>true</td></tr><tr><td>quiet_hours_schedule</td><td>true</td></tr><tr><td>rbac_roles</td><td>true</td></tr><tr><td>status</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>username</td><td>true</td></tr></tbody></table> |
|
||||
| WorkspaceBuild<br><i>start, stop</i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>build_number</td><td>false</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>daily_cost</td><td>false</td></tr><tr><td>deadline</td><td>false</td></tr><tr><td>has_ai_task</td><td>false</td></tr><tr><td>has_external_agent</td><td>false</td></tr><tr><td>id</td><td>false</td></tr><tr><td>initiator_by_avatar_url</td><td>false</td></tr><tr><td>initiator_by_name</td><td>false</td></tr><tr><td>initiator_by_username</td><td>false</td></tr><tr><td>initiator_id</td><td>false</td></tr><tr><td>job_id</td><td>false</td></tr><tr><td>max_deadline</td><td>false</td></tr><tr><td>provisioner_state</td><td>false</td></tr><tr><td>reason</td><td>false</td></tr><tr><td>template_version_id</td><td>true</td></tr><tr><td>template_version_preset_id</td><td>false</td></tr><tr><td>transition</td><td>false</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>workspace_id</td><td>false</td></tr></tbody></table> |
|
||||
| WorkspaceProxy<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>created_at</td><td>true</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>derp_enabled</td><td>true</td></tr><tr><td>derp_only</td><td>true</td></tr><tr><td>display_name</td><td>true</td></tr><tr><td>icon</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>name</td><td>true</td></tr><tr><td>region_id</td><td>true</td></tr><tr><td>token_hashed_secret</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>url</td><td>true</td></tr><tr><td>version</td><td>true</td></tr><tr><td>wildcard_hostname</td><td>true</td></tr></tbody></table> |
|
||||
| WorkspaceTable<br><i></i> | <table><thead><tr><th>Field</th><th>Tracked</th></tr></thead><tbody> | <tr><td>automatic_updates</td><td>true</td></tr><tr><td>autostart_schedule</td><td>true</td></tr><tr><td>created_at</td><td>false</td></tr><tr><td>deleted</td><td>false</td></tr><tr><td>deleting_at</td><td>true</td></tr><tr><td>dormant_at</td><td>true</td></tr><tr><td>favorite</td><td>true</td></tr><tr><td>group_acl</td><td>true</td></tr><tr><td>id</td><td>true</td></tr><tr><td>last_used_at</td><td>false</td></tr><tr><td>name</td><td>true</td></tr><tr><td>next_start_at</td><td>true</td></tr><tr><td>organization_id</td><td>false</td></tr><tr><td>owner_id</td><td>true</td></tr><tr><td>template_id</td><td>true</td></tr><tr><td>ttl</td><td>true</td></tr><tr><td>updated_at</td><td>false</td></tr><tr><td>user_acl</td><td>true</td></tr></tbody></table> |
|
||||
|
||||
<!-- End generated by 'make docs/admin/security/audit-logs.md'. -->
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ to resolve modules via [Artifactory](https://jfrog.com/artifactory/).
|
||||
|
||||
```shell
|
||||
git clone https://github.com/coder/registry
|
||||
cd registry/coder/modules
|
||||
cd registry/registry/coder/modules
|
||||
jf tfc
|
||||
jf tf p --namespace="coder" --provider="coder" --tag="1.0.0"
|
||||
```
|
||||
|
||||
@@ -33,4 +33,3 @@ Nevertheless, clients with base URL overrides also work with the proxy, in case
|
||||
## Next steps
|
||||
|
||||
* [Set up AI Bridge Proxy](./setup.md) on your Coder deployment
|
||||
* [Troubleshoot](./setup.md) common issues
|
||||
|
||||
@@ -7,7 +7,7 @@ Once enabled, `coderd` runs the `aibridgeproxyd` in-memory and intercepts traffi
|
||||
|
||||
1. AI Bridge must be enabled and configured (requires a **Premium** license with the [AI Governance Add-On](../../ai-governance.md)). See [AI Bridge Setup](../setup.md) for further information.1. AI Bridge Proxy must be [enabled](#proxy-configuration) using the server flag.
|
||||
1. A [CA certificate](#ca-certificate) must be configured for MITM interception.
|
||||
1. Clients must be configured to trust the CA certificate and use the proxy.
|
||||
1. [Clients](#client-configuration) must be configured to use the proxy and trust the CA certificate.
|
||||
|
||||
> [!WARNING]
|
||||
> AI Bridge Proxy should only be accessible within a trusted network and **must not** be directly exposed to the public internet.
|
||||
@@ -112,7 +112,7 @@ CODER_AIBRIDGE_PROXY_CERT_FILE=/path/to/ca.crt
|
||||
CODER_AIBRIDGE_PROXY_KEY_FILE=/path/to/ca.key
|
||||
```
|
||||
|
||||
### Organization-signed certificate
|
||||
### Corporate CA certificate
|
||||
|
||||
If your organization has an internal CA that clients already trust, you can have it issue an intermediate CA certificate for AI Bridge Proxy.
|
||||
This simplifies deployment since AI tools that already trust your organization's root CA will automatically trust certificates signed by the intermediate.
|
||||
@@ -145,10 +145,10 @@ For **self-signed certificates**, AI tools must be configured to trust the CA ce
|
||||
https://<coder-url>/api/v2/aibridge/proxy/ca-cert.pem
|
||||
```
|
||||
|
||||
For **organization-signed certificates**, if the systems where AI tools run already trust your organization's root CA, and the intermediate certificate chains correctly to that root, no additional certificate distribution is needed.
|
||||
For **corporate CA certificates**, if the systems where AI tools run already trust your organization's root CA, and the intermediate certificate chains correctly to that root, no additional certificate distribution is needed.
|
||||
Otherwise, AI tools must be configured to trust the intermediate CA certificate from the endpoint above.
|
||||
|
||||
How you configure AI tools to trust the certificate depends on the tool and operating system. See Client Configuration for details.
|
||||
How you configure AI tools to trust the certificate depends on the tool and operating system. See [Client Configuration](#client-configuration) for details.
|
||||
|
||||
## Upstream proxy
|
||||
|
||||
@@ -187,3 +187,89 @@ If the system already trusts the upstream proxy's CA certificate, [`CODER_AIBRID
|
||||
<!-- TODO(ssncferreira): Add Client Configuration section -->
|
||||
|
||||
<!-- TODO(ssncferreira): Add Troubleshooting section -->
|
||||
|
||||
## Client Configuration
|
||||
|
||||
To use AI Bridge Proxy, AI tools must be configured to:
|
||||
|
||||
1. Route traffic through the proxy
|
||||
1. Trust the proxy's CA certificate
|
||||
|
||||
### Configuring the proxy
|
||||
|
||||
The preferred approach is to configure the proxy directly in the AI tool's settings, as this avoids routing unnecessary traffic through the proxy.
|
||||
Consult the tool's documentation for specific instructions.
|
||||
|
||||
Alternatively, most tools support the standard proxy environment variables, though this is not guaranteed for all tools:
|
||||
|
||||
```shell
|
||||
export HTTP_PROXY="http://coder:${CODER_SESSION_TOKEN}@<proxy-host>:8888"
|
||||
export HTTPS_PROXY="http://coder:${CODER_SESSION_TOKEN}@<proxy-host>:8888"
|
||||
```
|
||||
|
||||
* `HTTP_PROXY`: Used for requests to `http://` URLs
|
||||
* `HTTPS_PROXY`: Used for requests to `https://` URLs (this is the one used for AI provider domains)
|
||||
|
||||
In order for AI tools that communicate with AI Bridge Proxy to authenticate with Coder via AI Bridge, the Coder session token needs to be passed in the proxy credentials as the password field.
|
||||
|
||||
### Trusting the CA certificate
|
||||
|
||||
The preferred approach is to configure the CA certificate directly in the AI tool's settings, as this limits the scope of the trusted certificate to that specific application.
|
||||
Consult the tool's documentation for specific instructions.
|
||||
|
||||
> [!NOTE]
|
||||
> If using a [corporate CA certificate](#corporate-ca-certificate) and the system already trusts your organization's root CA, no additional certificate configuration is required.
|
||||
|
||||
Download the certificate:
|
||||
|
||||
```shell
|
||||
curl -o coder-aibridge-proxy-ca.pem \
|
||||
-H "Coder-Session-Token: ${CODER_SESSION_TOKEN}" \
|
||||
https://<coder-url>/api/v2/aibridge/proxy/ca-cert.pem
|
||||
```
|
||||
|
||||
Replace `<coder-url>` with your Coder deployment URL.
|
||||
|
||||
#### Environment variables
|
||||
|
||||
Different AI tools use different runtimes, each with their own environment variable for CA certificates:
|
||||
|
||||
| Environment Variable | Runtime |
|
||||
|-----------------------|---------------------------|
|
||||
| `NODE_EXTRA_CA_CERTS` | Node.js |
|
||||
| `SSL_CERT_FILE` | OpenSSL, Python, curl |
|
||||
| `REQUESTS_CA_BUNDLE` | Python `requests` library |
|
||||
| `CURL_CA_BUNDLE` | curl |
|
||||
|
||||
Set the environment variables associated with the AI tool's runtime.
|
||||
If you're unsure which runtime the tool uses, or if you use multiple AI tools, the simplest approach is to set all of them:
|
||||
|
||||
```shell
|
||||
export NODE_EXTRA_CA_CERTS="/path/to/coder-aibridge-proxy-ca.pem"
|
||||
export SSL_CERT_FILE="/path/to/coder-aibridge-proxy-ca.pem"
|
||||
export REQUESTS_CA_BUNDLE="/path/to/coder-aibridge-proxy-ca.pem"
|
||||
export CURL_CA_BUNDLE="/path/to/coder-aibridge-proxy-ca.pem"
|
||||
```
|
||||
|
||||
#### System trust store
|
||||
|
||||
When tool-specific or environment variable configuration is not possible, you can add the certificate to the system trust store.
|
||||
This makes the certificate trusted by all applications on the system.
|
||||
|
||||
On Linux:
|
||||
|
||||
```shell
|
||||
sudo cp coder-aibridge-proxy-ca.pem /usr/local/share/ca-certificates/
|
||||
sudo update-ca-certificates
|
||||
```
|
||||
|
||||
For other operating systems, refer to the system's documentation for instructions on adding trusted certificates.
|
||||
|
||||
### Coder workspaces
|
||||
|
||||
For AI tools running inside Coder workspaces, template administrators can pre-configure the proxy settings and CA certificate in the workspace template.
|
||||
This provides a seamless experience where users don't need to configure anything manually.
|
||||
|
||||
<!-- TODO(ssncferreira): Add registry link for AI Bridge Proxy module for Coder workspaces: https://github.com/coder/internal/issues/1187 -->
|
||||
|
||||
For tool-specific configuration details, check the [client compatibility table](../clients/index.md#compatibility) for clients that require proxy-based integration.
|
||||
|
||||
@@ -0,0 +1,136 @@
|
||||
# GitHub Copilot
|
||||
|
||||
[GitHub Copilot](https://github.com/features/copilot) is an AI coding assistant that doesn't support custom base URLs but does respect proxy configurations.
|
||||
This makes it compatible with [AI Bridge Proxy](../ai-bridge-proxy/index.md), which integrates with [AI Bridge](../index.md) for full access to auditing and governance features.
|
||||
To use Copilot with AI Bridge, make sure AI Bridge Proxy is properly configured, see [AI Bridge Proxy Setup](../ai-bridge-proxy/setup.md) for instructions.
|
||||
|
||||
Copilot uses **per-user tokens** tied to GitHub accounts rather than a shared API key.
|
||||
Users must still authenticate with GitHub to use Copilot.
|
||||
|
||||
For general information about GitHub Copilot, see the [GitHub Copilot documentation](https://docs.github.com/en/copilot).
|
||||
|
||||
For general client configuration requirements, see [AI Bridge Proxy Client Configuration](../ai-bridge-proxy/setup.md#client-configuration).
|
||||
The sections below cover Copilot-specific setup for each client.
|
||||
|
||||
## Copilot CLI
|
||||
|
||||
For installation instructions, see [GitHub Copilot CLI documentation](https://docs.github.com/en/copilot/how-tos/copilot-cli/install-copilot-cli).
|
||||
|
||||
### Proxy configuration
|
||||
|
||||
Set the `HTTP_PROXY` and `HTTPS_PROXY` environment variables:
|
||||
|
||||
```shell
|
||||
export HTTP_PROXY="http://coder:${CODER_SESSION_TOKEN}@<proxy-host>:8888"
|
||||
export HTTPS_PROXY="http://coder:${CODER_SESSION_TOKEN}@<proxy-host>:8888"
|
||||
```
|
||||
|
||||
Replace `<proxy-host>` with your AI Bridge Proxy hostname.
|
||||
|
||||
### CA certificate trust
|
||||
|
||||
Copilot CLI is built on Node.js and uses the `NODE_EXTRA_CA_CERTS` environment variable for custom certificates:
|
||||
|
||||
```shell
|
||||
export NODE_EXTRA_CA_CERTS="/path/to/coder-aibridge-proxy-ca.pem"
|
||||
```
|
||||
|
||||
See [Client Configuration CA certificate trust](../ai-bridge-proxy/setup.md#trusting-the-ca-certificate) for details on how to obtain the certificate file.
|
||||
|
||||
## VS Code Copilot Extension
|
||||
|
||||
For installation instructions, see [Installing the GitHub Copilot extension in VS Code](https://docs.github.com/en/copilot/how-tos/set-up/install-copilot-extension?tool=vscode).
|
||||
|
||||
### Proxy configuration
|
||||
|
||||
You can configure the proxy using environment variables or VS Code settings.
|
||||
For environment variables, see [AI Bridge Proxy client configuration](../ai-bridge-proxy/setup.md#configuring-the-proxy).
|
||||
|
||||
Alternatively, you can configure the proxy directly in VS Code settings:
|
||||
|
||||
1. Open Settings (`Ctrl+,` for Windows or `Cmd+,` for macOS)
|
||||
1. Search for `HTTP: Proxy`
|
||||
1. Set the proxy URL using the format `http://coder:<CODER_SESSION_TOKEN>@<proxy-host>:8888`
|
||||
|
||||
Or add directly to your `settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"http.proxy": "http://coder:<CODER_SESSION_TOKEN>@<proxy-host>:8888"
|
||||
}
|
||||
```
|
||||
|
||||
The `http.proxy` setting is used for both HTTP and HTTPS requests.
|
||||
Replace `<proxy-host>` with your AI Bridge Proxy hostname and `<CODER_SESSION_TOKEN>` with your coder session token.
|
||||
|
||||
Restart VS Code for changes to take effect.
|
||||
|
||||
For more details, see [Configuring proxy settings for Copilot](https://docs.github.com/en/copilot/how-tos/configure-personal-settings/configure-network-settings?tool=vscode) in the GitHub documentation.
|
||||
|
||||
### CA certificate trust
|
||||
|
||||
Add the AI Bridge Proxy CA certificate to your operating system's trust store.
|
||||
By default, VS Code loads system certificates, controlled by the `http.systemCertificates` setting.
|
||||
|
||||
See [Client Configuration CA certificate trust](../ai-bridge-proxy/setup.md#trusting-the-ca-certificate) for details on how to obtain the certificate file.
|
||||
|
||||
### Using Coder Remote extension
|
||||
|
||||
When connecting to a Coder workspace with the [Coder extension](https://marketplace.visualstudio.com/items?itemName=coder.coder-remote), the Copilot extension runs inside the Coder workspace and not on your local machine.
|
||||
This means proxy and certificate configuration must be done in the Coder workspace environment.
|
||||
|
||||
#### Proxy configuration
|
||||
|
||||
Configure the proxy in VS Code's remote settings:
|
||||
|
||||
1. [Connect to your Coder workspace](../../../user-guides/workspace-access/vscode.md)
|
||||
1. Open Settings (`Ctrl+,` for Windows or `Cmd+,` for macOS)
|
||||
1. Select the **Remote** tab
|
||||
1. Search for `HTTP: Proxy`
|
||||
1. Set the proxy URL using the format `http://coder:<CODER_SESSION_TOKEN>@<proxy-host>:8888`
|
||||
|
||||
Replace `<proxy-host>` with your AI Bridge Proxy hostname and `<CODER_SESSION_TOKEN>` with your coder session token.
|
||||
|
||||
#### CA certificate trust
|
||||
|
||||
Since the Copilot extension runs inside the Coder workspace, add the [AI Bridge Proxy CA certificate](../ai-bridge-proxy/setup.md#trusting-the-ca-certificate) to the Coder workspace's system trust store.
|
||||
See [System trust store](../ai-bridge-proxy/setup.md#system-trust-store) for instructions on how to do this on Linux.
|
||||
|
||||
Restart VS Code for changes to take effect.
|
||||
|
||||
## JetBrains IDEs
|
||||
|
||||
For installation instructions, see [Installing the GitHub Copilot extension in JetBrains IDE](https://docs.github.com/en/copilot/how-tos/set-up/install-copilot-extension?tool=jetbrains).
|
||||
|
||||
### Proxy configuration
|
||||
|
||||
Configure the proxy directly in JetBrains IDE settings:
|
||||
|
||||
1. Open Settings (`Ctrl+Alt+S` for Windows or `Cmd+,` for macOS)
|
||||
1. Navigate to `Appearance & Behavior` > `System Settings` > `HTTP Proxy`
|
||||
1. Select `Manual proxy configuration` and `HTTP`
|
||||
1. Enter the proxy hostname and port (default: 8888)
|
||||
1. Select `Proxy authentication` and enter:
|
||||
1. Login: `coder` (this value is ignored)
|
||||
1. Password: Your Coder session token
|
||||
1. Check `Remember` to save the password
|
||||
1. Restart the IDE for changes to take effect
|
||||
|
||||
For more details, see [Configuring proxy settings for Copilot](https://docs.github.com/en/copilot/how-tos/configure-personal-settings/configure-network-settings?tool=jetbrains) in the GitHub documentation.
|
||||
|
||||
### CA certificate trust
|
||||
|
||||
Add the AI Bridge Proxy CA certificate to your operating system's trust store.
|
||||
If the certificate is in the system trust store, no additional IDE configuration is needed.
|
||||
|
||||
Alternatively, you can configure the IDE to accept the certificate:
|
||||
|
||||
1. Open Settings (`Ctrl+Alt+S` for Windows or `Cmd+,` for macOS)
|
||||
1. Navigate to `Appearance & Behavior` > `System Settings` > `Server Certificates`
|
||||
1. Under `Accepted certificates`, click `+` and select the CA certificate file
|
||||
1. Check `Accept non-trusted certificates automatically`
|
||||
1. Restart the IDE for changes to take effect
|
||||
|
||||
For more details, see [Trusted root certificates](https://www.jetbrains.com/help/idea/ssl-certificates.html) in the JetBrains documentation.
|
||||
|
||||
See [Client Configuration CA certificate trust](../ai-bridge-proxy/setup.md#trusting-the-ca-certificate) for details on how to obtain the certificate file.
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Once AI Bridge is setup on your deployment, the AI coding tools used by your users will need to be configured to route requests via AI Bridge.
|
||||
|
||||
There are two ways to connect AI tools to AI Bridge:
|
||||
|
||||
- Base URL configuration (Recommended): Most AI tools allow customizing the base URL for API requests. This is the preferred approach when supported.
|
||||
- AI Bridge Proxy: For tools that don't support base URL configuration, [AI Bridge Proxy](../ai-bridge-proxy/index.md) can intercept traffic and forward it to AI Bridge.
|
||||
|
||||
## Base URLs
|
||||
|
||||
Most AI coding tools allow the "base URL" to be customized. In other words, when a request is made to OpenAI's API from your coding tool, the API endpoint such as [`/v1/chat/completions`](https://platform.openai.com/docs/api-reference/chat) will be appended to the configured base. Therefore, instead of the default base URL of `https://api.openai.com/v1`, you'll need to set it to `https://coder.example.com/api/v2/aibridge/openai/v1`.
|
||||
@@ -55,6 +60,7 @@ The table below shows tested AI clients and their compatibility with AI Bridge.
|
||||
| [VS Code](./vscode.md) | ✅ | ❌ | Only supports Custom Base URL for OpenAI. |
|
||||
| [JetBrains IDEs](./jetbrains.md) | ✅ | ❌ | Works in Chat mode via "Bring Your Own Key". |
|
||||
| [Zed](./zed.md) | ✅ | ✅ | |
|
||||
| [GitHub Copilot](./copilot.md) | ⚙️ | - | Requires [AI Bridge Proxy](../ai-bridge-proxy/index.md). Uses per-user GitHub tokens. |
|
||||
| WindSurf | ❌ | ❌ | No option to override base URL. |
|
||||
| Cursor | ❌ | ❌ | Override for OpenAI broken ([upstream issue](https://forum.cursor.com/t/requests-are-sent-to-incorrect-endpoint-when-using-base-url-override/144894)). |
|
||||
| Sourcegraph Amp | ❌ | ❌ | No option to override base URL. |
|
||||
@@ -63,7 +69,7 @@ The table below shows tested AI clients and their compatibility with AI Bridge.
|
||||
| Antigravity | ❌ | ❌ | No option to override base URL. |
|
||||
|
|
||||
|
||||
*Legend: ✅ supported, ❌ not supported, - not applicable.*
|
||||
*Legend: ✅ supported, ⚙️ requires AI Bridge Proxy, ❌ not supported, - not applicable.*
|
||||
|
||||
## Configuring In-Workspace Tools
|
||||
|
||||
|
||||
@@ -131,9 +131,10 @@ may also consume agent workspace builds.
|
||||
### Agent Workspace Build Limits
|
||||
|
||||
Without proper controls and sandboxing, it is not recommended to open up Coder
|
||||
Tasks to a large audience in the enterprise. Coder Premium deployments include
|
||||
1,000 Agent Workspace Builds, primarily for proof-of-concept use and basic
|
||||
workflows.
|
||||
Tasks to a large audience in the enterprise. Both Community and Premium
|
||||
deployments include 1,000 Agent Workspace Builds, primarily for proof-of-concept
|
||||
use and basic workflows. Community deployments do not have access to
|
||||
[AI Bridge](./ai-bridge/index.md) or [Agent Boundaries](./agent-boundaries/index.md).
|
||||
|
||||
Our [AI Governance Add-On](./ai-governance.md) includes a shared usage pool of
|
||||
Agent Workspace Builds for automated workflows, along with limits that scale
|
||||
|
||||
@@ -75,7 +75,7 @@ pages.
|
||||
| [2.27](https://coder.com/changelog/coder-2-27) | October 02, 2025 | Not Supported | [v2.27.11](https://github.com/coder/coder/releases/tag/v2.27.11) |
|
||||
| [2.28](https://coder.com/changelog/coder-2-28) | November 04, 2025 | Security Support | [v2.28.10](https://github.com/coder/coder/releases/tag/v2.28.10) |
|
||||
| [2.29](https://coder.com/changelog/coder-2-29) | December 02, 2025 | Stable + ESR | [v2.29.5](https://github.com/coder/coder/releases/tag/v2.29.5) |
|
||||
| [2.30](https://coder.com/changelog/2-30) | February 03, 2026 | Mainline | [v2.30.0](https://github.com/coder/coder/releases/tag/v2.30.0) |
|
||||
| [2.30](https://coder.com/changelog/coder-2-30) | February 03, 2026 | Mainline | [v2.30.0](https://github.com/coder/coder/releases/tag/v2.30.0) |
|
||||
| 2.31 | | Not Released | N/A |
|
||||
<!-- RELEASE_CALENDAR_END -->
|
||||
|
||||
|
||||
@@ -0,0 +1,200 @@
|
||||
# Upgrading Best Practices
|
||||
|
||||
This guide provides best practices for upgrading Coder, along with
|
||||
troubleshooting steps for common issues encountered during upgrades,
|
||||
particularly with database migrations in high availability (HA) deployments.
|
||||
|
||||
## Before you upgrade
|
||||
|
||||
> [!TIP]
|
||||
> To check your current Coder version, use `coder version` from the CLI, check
|
||||
> the bottom-right of the Coder dashboard, or query the `/api/v2/buildinfo`
|
||||
> endpoint. See the [version command](../reference/cli/version.md) for details.
|
||||
|
||||
- **Schedule upgrades during off-peak hours.** Upgrades can cause a noticeable
|
||||
disruption to the developer experience. Plan your maintenance window when
|
||||
the fewest developers are actively using their workspaces.
|
||||
- **The larger the version jump, the more migrations will run.** If you are
|
||||
upgrading across multiple minor versions, expect longer migration times.
|
||||
- **Large upgrades should complete in minutes** (typically 4-7 minutes). If your
|
||||
upgrade is taking significantly longer, there may be an issue requiring
|
||||
investigation.
|
||||
- **Check for known issues affecting your upgrade path.** Some version upgrades
|
||||
have known issues that may require a larger maintenance window or additional
|
||||
steps. For example, upgrades from v2.26.0 to v2.27.8 may encounter issues with
|
||||
the `api_keys` table—upgrading to v2.26.6 first can help mitigate this.
|
||||
Contact [Coder support](../support/index.md) for guidance on your specific
|
||||
upgrade path.
|
||||
|
||||
## Pre-upgrade strategy for Kubernetes HA deployments
|
||||
|
||||
Standard Kubernetes rolling updates may fail when exclusive database locks are
|
||||
required because old replicas keep connections open. For production deployments
|
||||
running multiple replicas (HA), active connections from existing pods can
|
||||
prevent the new pod from acquiring necessary locks.
|
||||
|
||||
### Recommended strategy for major upgrades
|
||||
|
||||
1. **Scale down before upgrading:** Before running `helm upgrade`, scale your
|
||||
Coder deployment down to eliminate database connection contention from
|
||||
existing pods.
|
||||
|
||||
- **Scale to zero** for a clean cutover with no active database connections
|
||||
when the upgrade starts. This momentarily ensures no application access to
|
||||
the database, allowing migrations to acquire locks immediately:
|
||||
|
||||
```shell
|
||||
kubectl scale deployment coder --replicas=0
|
||||
```
|
||||
|
||||
- **Scale to one** if you prefer to minimize downtime. This keeps one pod
|
||||
running but eliminates contention from multiple replicas:
|
||||
|
||||
```shell
|
||||
kubectl scale deployment coder --replicas=1
|
||||
```
|
||||
|
||||
1. **Perform upgrade:** Run your standard Helm upgrade command. When scaling to
|
||||
zero, this will bring up a fresh pod that can run migrations without
|
||||
competing for database locks.
|
||||
|
||||
1. **Scale back:** Once the upgrade is healthy, scale back to your desired
|
||||
replica count.
|
||||
|
||||
## Kubernetes liveness probes and long-running migrations
|
||||
|
||||
Liveness probes can cause pods to be killed during long-running database
|
||||
migrations. Starting with Coder v2.30.0, liveness probes are *disabled by
|
||||
default* in the Helm chart.
|
||||
|
||||
This change was made because:
|
||||
|
||||
- Liveness probes can kill pods during legitimate long-running migrations
|
||||
- If a Coder pod becomes unresponsive (due to a deadlock, etc.), it's better to
|
||||
investigate the issue rather than have Kubernetes silently restart the pod
|
||||
|
||||
If you have enabled liveness probes in your deployment and observe pods
|
||||
restarting with `CrashLoopBackOff` during an upgrade, the liveness probe may be
|
||||
killing the pod prematurely.
|
||||
|
||||
### Diagnosing liveness probe issues
|
||||
|
||||
To confirm whether Kubernetes is killing pods due to liveness probe failures,
|
||||
check the Kubernetes events and pod logs:
|
||||
|
||||
```shell
|
||||
# Check events for the Coder deployment
|
||||
kubectl get events --field-selector involvedObject.name=coder -n <namespace>
|
||||
|
||||
# Check pod logs for migration progress
|
||||
kubectl logs -l app.kubernetes.io/name=coder -n <namespace> --previous
|
||||
```
|
||||
|
||||
Look for events indicating `Liveness probe failed` or `Container coder failed
|
||||
liveness probe, will be restarted`.
|
||||
|
||||
### Recommended approach
|
||||
|
||||
If you have liveness probes enabled and experience issues during upgrades,
|
||||
disable them before upgrading:
|
||||
|
||||
```shell
|
||||
kubectl edit deployment coder
|
||||
```
|
||||
|
||||
Remove the `livenessProbe` section entirely, then proceed with the upgrade.
|
||||
|
||||
> [!NOTE]
|
||||
> For versions prior to v2.30.0, liveness probes were enabled by default. You
|
||||
> can disable them by editing the Deployment directly with `kubectl edit
|
||||
> deployment coder` or by using a ConfigMap override. See the
|
||||
> [Helm chart values](https://artifacthub.io/packages/helm/coder-v2/coder?modal=values&path=coder.livenessProbe)
|
||||
> for configuration options available in v2.30.0+.
|
||||
|
||||
### Workaround steps
|
||||
|
||||
1. **Remove or adjust liveness probes:** Temporarily remove the `livenessProbe`
|
||||
from your Deployment configuration to prevent Kubernetes from restarting the
|
||||
pod during migrations.
|
||||
|
||||
1. **Isolate the migration:** Ensure all extra replica sets are shut down. If
|
||||
you have clear evidence of database locks from old pods, scale the deployment
|
||||
to 1 replica to prevent old pods from holding locks on the tables being
|
||||
upgraded.
|
||||
|
||||
1. **Clear database locks:** Monitor database activity. If the migration remains
|
||||
blocked by locks despite scaling down, you may need to manually terminate
|
||||
existing connections. See
|
||||
[Recovering from failed database migrations](#recovering-from-failed-database-migrations)
|
||||
below for instructions.
|
||||
|
||||
## Recovering from failed database migrations
|
||||
|
||||
If an upgrade gets stuck in a restart loop due to database locks:
|
||||
|
||||
1. **Scale to zero:** Scale the Coder deployment to 0 to stop all application
|
||||
activity.
|
||||
|
||||
```shell
|
||||
kubectl scale deployment coder --replicas=0
|
||||
```
|
||||
|
||||
1. **Clear connections:** Terminate existing connections to the Coder database
|
||||
to release any lingering locks. This PostgreSQL command drops all active
|
||||
connections to the database:
|
||||
|
||||
> [!CAUTION]
|
||||
> This command is intrusive and should be used as a last resort. Contact
|
||||
> [Coder support](../support/index.md) before running destructive database
|
||||
> commands in production. SQL commands may vary depending on your PostgreSQL
|
||||
> version and configuration.
|
||||
|
||||
```sql
|
||||
SELECT pg_terminate_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = 'coder'
|
||||
AND pid <> pg_backend_pid();
|
||||
```
|
||||
|
||||
1. **Check schema migrations:** Verify the level of upgrade and check if `dirty`
|
||||
is true. If this has progressed, this now indicates your current Coder
|
||||
installation state.
|
||||
|
||||
> [!NOTE]
|
||||
> The SQL commands below are for informational purposes. If you are unsure
|
||||
> about querying your database directly, contact
|
||||
> [Coder support](../support/index.md) for assistance.
|
||||
|
||||
```sql
|
||||
SELECT * FROM schema_migrations;
|
||||
```
|
||||
|
||||
1. **Ensure image version:** Confirm the Deployment image is set to the
|
||||
appropriate version (old or new, depending on the database migration state
|
||||
found in step 3). Match your tag in the
|
||||
[migrations directory](https://github.com/coder/coder/tree/main/coderd/database/migrations)
|
||||
to the value in the `schema_migrations` output.
|
||||
|
||||
1. **Resume the upgrade:** Follow the
|
||||
[pre-upgrade strategy](#recommended-strategy-for-major-upgrades) to scale
|
||||
back up and continue the upgrade process.
|
||||
|
||||
## When to contact support
|
||||
|
||||
If you encounter any of the following issues, contact
|
||||
[Coder support](../support/index.md):
|
||||
|
||||
- Locking issues that cannot be mitigated by the steps in this guide
|
||||
- Migrations taking significantly longer than expected (more than 15 minutes)
|
||||
without evidence of lock contention—this may indicate database resource
|
||||
constraints requiring investigation
|
||||
- Resource consumption issues (excessive memory, CPU, or OOM kills) during
|
||||
upgrades
|
||||
- Any other upgrade problems not covered by this documentation
|
||||
|
||||
When contacting support, please collect and provide:
|
||||
|
||||
- `coderd` logs with details on the stages where the upgrade stalled
|
||||
- PostgreSQL logs if available
|
||||
- The Coder versions involved (source and target)
|
||||
- Your deployment configuration (number of replicas, resource limits)
|
||||
@@ -6,6 +6,9 @@ This article describes how to upgrade your Coder server.
|
||||
> Prior to upgrading a production Coder deployment, take a database snapshot since
|
||||
> Coder does not support rollbacks.
|
||||
|
||||
For upgrade recommendations and troubleshooting, see
|
||||
[Upgrading Best Practices](./upgrade-best-practices.md).
|
||||
|
||||
## Reinstall Coder to upgrade
|
||||
|
||||
To upgrade your Coder server, reinstall Coder using your original method
|
||||
|
||||
+18
-1
@@ -169,7 +169,14 @@
|
||||
"title": "Upgrading",
|
||||
"description": "Learn how to upgrade Coder",
|
||||
"path": "./install/upgrade.md",
|
||||
"icon_path": "./images/icons/upgrade.svg"
|
||||
"icon_path": "./images/icons/upgrade.svg",
|
||||
"children": [
|
||||
{
|
||||
"title": "Upgrading Best Practices",
|
||||
"description": "Best practices and troubleshooting for Coder upgrades",
|
||||
"path": "./install/upgrade-best-practices.md"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Uninstall",
|
||||
@@ -1091,6 +1098,11 @@
|
||||
"title": "Zed",
|
||||
"description": "Configure Zed to use AI Bridge",
|
||||
"path": "./ai-coder/ai-bridge/clients/zed.md"
|
||||
},
|
||||
{
|
||||
"title": "GitHub Copilot",
|
||||
"description": "Configure GitHub Copilot to use AI Bridge via AI Bridge Proxy",
|
||||
"path": "./ai-coder/ai-bridge/clients/copilot.md"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1632,6 +1644,11 @@
|
||||
"description": "Delete an organization",
|
||||
"path": "reference/cli/organizations_delete.md"
|
||||
},
|
||||
{
|
||||
"title": "organizations list",
|
||||
"description": "List all organizations",
|
||||
"path": "reference/cli/organizations_list.md"
|
||||
},
|
||||
{
|
||||
"title": "organizations members",
|
||||
"description": "Manage organization members",
|
||||
|
||||
Generated
+8
@@ -838,6 +838,10 @@ curl -X GET http://coder-server:8080/api/v2/workspaceagents/{workspaceagent}/con
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"name": "string",
|
||||
"status": "running",
|
||||
"subagent_id": {
|
||||
"uuid": "string",
|
||||
"valid": true
|
||||
},
|
||||
"workspace_folder": "string"
|
||||
}
|
||||
],
|
||||
@@ -1015,6 +1019,10 @@ curl -X GET http://coder-server:8080/api/v2/workspaceagents/{workspaceagent}/con
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"name": "string",
|
||||
"status": "running",
|
||||
"subagent_id": {
|
||||
"uuid": "string",
|
||||
"valid": true
|
||||
},
|
||||
"workspace_folder": "string"
|
||||
}
|
||||
],
|
||||
|
||||
Generated
+240
-8
@@ -2184,9 +2184,9 @@ This is required on creation to enable a user-flow of validating a template work
|
||||
|
||||
#### Enumerated Values
|
||||
|
||||
| Value(s) |
|
||||
|-----------------------------------------------------------------------------------|
|
||||
| `cli`, `dashboard`, `jetbrains_connection`, `ssh_connection`, `vscode_connection` |
|
||||
| Value(s) |
|
||||
|--------------------------------------------------------------------------------------------------------|
|
||||
| `cli`, `dashboard`, `jetbrains_connection`, `ssh_connection`, `task_manual_pause`, `vscode_connection` |
|
||||
|
||||
## codersdk.CreateWorkspaceBuildRequest
|
||||
|
||||
@@ -2227,11 +2227,11 @@ This is required on creation to enable a user-flow of validating a template work
|
||||
|
||||
#### Enumerated Values
|
||||
|
||||
| Property | Value(s) |
|
||||
|--------------|-----------------------------------------------------------------------------------|
|
||||
| `log_level` | `debug` |
|
||||
| `reason` | `cli`, `dashboard`, `jetbrains_connection`, `ssh_connection`, `vscode_connection` |
|
||||
| `transition` | `delete`, `start`, `stop` |
|
||||
| Property | Value(s) |
|
||||
|--------------|--------------------------------------------------------------------------------------------------------|
|
||||
| `log_level` | `debug` |
|
||||
| `reason` | `cli`, `dashboard`, `jetbrains_connection`, `ssh_connection`, `task_manual_pause`, `vscode_connection` |
|
||||
| `transition` | `delete`, `start`, `stop` |
|
||||
|
||||
## codersdk.CreateWorkspaceProxyRequest
|
||||
|
||||
@@ -6178,6 +6178,225 @@ Only certain features set these fields: - FeatureManagedAgentLimit|
|
||||
| `name` | string | true | | |
|
||||
| `regenerate_token` | boolean | false | | |
|
||||
|
||||
## codersdk.PauseTaskResponse
|
||||
|
||||
```json
|
||||
{
|
||||
"workspace_build": {
|
||||
"build_number": 0,
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"daily_cost": 0,
|
||||
"deadline": "2019-08-24T14:15:22Z",
|
||||
"has_ai_task": true,
|
||||
"has_external_agent": true,
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
|
||||
"initiator_name": "string",
|
||||
"job": {
|
||||
"available_workers": [
|
||||
"497f6eca-6276-4993-bfeb-53cbbbba6f08"
|
||||
],
|
||||
"canceled_at": "2019-08-24T14:15:22Z",
|
||||
"completed_at": "2019-08-24T14:15:22Z",
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"error": "string",
|
||||
"error_code": "REQUIRED_TEMPLATE_VARIABLES",
|
||||
"file_id": "8a0cfb4f-ddc9-436d-91bb-75133c583767",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
|
||||
"input": {
|
||||
"error": "string",
|
||||
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
|
||||
"workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478"
|
||||
},
|
||||
"logs_overflowed": true,
|
||||
"metadata": {
|
||||
"template_display_name": "string",
|
||||
"template_icon": "string",
|
||||
"template_id": "c6d67e98-83ea-49f0-8812-e4abae2b68bc",
|
||||
"template_name": "string",
|
||||
"template_version_name": "string",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
|
||||
"workspace_name": "string"
|
||||
},
|
||||
"organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6",
|
||||
"queue_position": 0,
|
||||
"queue_size": 0,
|
||||
"started_at": "2019-08-24T14:15:22Z",
|
||||
"status": "pending",
|
||||
"tags": {
|
||||
"property1": "string",
|
||||
"property2": "string"
|
||||
},
|
||||
"type": "template_version_import",
|
||||
"worker_id": "ae5fa6f7-c55b-40c1-b40a-b36ac467652b",
|
||||
"worker_name": "string"
|
||||
},
|
||||
"matched_provisioners": {
|
||||
"available": 0,
|
||||
"count": 0,
|
||||
"most_recently_seen": "2019-08-24T14:15:22Z"
|
||||
},
|
||||
"max_deadline": "2019-08-24T14:15:22Z",
|
||||
"reason": "initiator",
|
||||
"resources": [
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"api_version": "string",
|
||||
"apps": [
|
||||
{
|
||||
"command": "string",
|
||||
"display_name": "string",
|
||||
"external": true,
|
||||
"group": "string",
|
||||
"health": "disabled",
|
||||
"healthcheck": {
|
||||
"interval": 0,
|
||||
"threshold": 0,
|
||||
"url": "string"
|
||||
},
|
||||
"hidden": true,
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"open_in": "slim-window",
|
||||
"sharing_level": "owner",
|
||||
"slug": "string",
|
||||
"statuses": [
|
||||
{
|
||||
"agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978",
|
||||
"app_id": "affd1d10-9538-4fc8-9e0b-4594a28c1335",
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"message": "string",
|
||||
"needs_user_attention": true,
|
||||
"state": "working",
|
||||
"uri": "string",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9"
|
||||
}
|
||||
],
|
||||
"subdomain": true,
|
||||
"subdomain_name": "string",
|
||||
"tooltip": "string",
|
||||
"url": "string"
|
||||
}
|
||||
],
|
||||
"architecture": "string",
|
||||
"connection_timeout_seconds": 0,
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"directory": "string",
|
||||
"disconnected_at": "2019-08-24T14:15:22Z",
|
||||
"display_apps": [
|
||||
"vscode"
|
||||
],
|
||||
"environment_variables": {
|
||||
"property1": "string",
|
||||
"property2": "string"
|
||||
},
|
||||
"expanded_directory": "string",
|
||||
"first_connected_at": "2019-08-24T14:15:22Z",
|
||||
"health": {
|
||||
"healthy": false,
|
||||
"reason": "agent has lost connection"
|
||||
},
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"instance_id": "string",
|
||||
"last_connected_at": "2019-08-24T14:15:22Z",
|
||||
"latency": {
|
||||
"property1": {
|
||||
"latency_ms": 0,
|
||||
"preferred": true
|
||||
},
|
||||
"property2": {
|
||||
"latency_ms": 0,
|
||||
"preferred": true
|
||||
}
|
||||
},
|
||||
"lifecycle_state": "created",
|
||||
"log_sources": [
|
||||
{
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"display_name": "string",
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"workspace_agent_id": "7ad2e618-fea7-4c1a-b70a-f501566a72f1"
|
||||
}
|
||||
],
|
||||
"logs_length": 0,
|
||||
"logs_overflowed": true,
|
||||
"name": "string",
|
||||
"operating_system": "string",
|
||||
"parent_id": {
|
||||
"uuid": "string",
|
||||
"valid": true
|
||||
},
|
||||
"ready_at": "2019-08-24T14:15:22Z",
|
||||
"resource_id": "4d5215ed-38bb-48ed-879a-fdb9ca58522f",
|
||||
"scripts": [
|
||||
{
|
||||
"cron": "string",
|
||||
"display_name": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"log_path": "string",
|
||||
"log_source_id": "4197ab25-95cf-4b91-9c78-f7f2af5d353a",
|
||||
"run_on_start": true,
|
||||
"run_on_stop": true,
|
||||
"script": "string",
|
||||
"start_blocks_login": true,
|
||||
"timeout": 0
|
||||
}
|
||||
],
|
||||
"started_at": "2019-08-24T14:15:22Z",
|
||||
"startup_script_behavior": "blocking",
|
||||
"status": "connecting",
|
||||
"subsystems": [
|
||||
"envbox"
|
||||
],
|
||||
"troubleshooting_url": "string",
|
||||
"updated_at": "2019-08-24T14:15:22Z",
|
||||
"version": "string"
|
||||
}
|
||||
],
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"daily_cost": 0,
|
||||
"hide": true,
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"job_id": "453bd7d7-5355-4d6d-a38e-d9e7eb218c3f",
|
||||
"metadata": [
|
||||
{
|
||||
"key": "string",
|
||||
"sensitive": true,
|
||||
"value": "string"
|
||||
}
|
||||
],
|
||||
"name": "string",
|
||||
"type": "string",
|
||||
"workspace_transition": "start"
|
||||
}
|
||||
],
|
||||
"status": "pending",
|
||||
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
|
||||
"template_version_name": "string",
|
||||
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
|
||||
"transition": "start",
|
||||
"updated_at": "2019-08-24T14:15:22Z",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
|
||||
"workspace_name": "string",
|
||||
"workspace_owner_avatar_url": "string",
|
||||
"workspace_owner_id": "e7078695-5279-4c86-8774-3ac2367a2fc7",
|
||||
"workspace_owner_name": "string"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
| Name | Type | Required | Restrictions | Description |
|
||||
|-------------------|----------------------------------------------------|----------|--------------|-------------|
|
||||
| `workspace_build` | [codersdk.WorkspaceBuild](#codersdkworkspacebuild) | false | | |
|
||||
|
||||
## codersdk.Permission
|
||||
|
||||
```json
|
||||
@@ -8008,6 +8227,7 @@ Only certain features set these fields: - FeatureManagedAgentLimit|
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
@@ -8048,6 +8268,7 @@ Only certain features set these fields: - FeatureManagedAgentLimit|
|
||||
| `deprecated` | boolean | false | | |
|
||||
| `deprecation_message` | string | false | | |
|
||||
| `description` | string | false | | |
|
||||
| `disable_module_cache` | boolean | false | | Disable module cache disables the use of cached Terraform modules during provisioning. |
|
||||
| `display_name` | string | false | | |
|
||||
| `failure_ttl_ms` | integer | false | | Failure ttl ms TimeTilDormantMillis, and TimeTilDormantAutoDeleteMillis are enterprise-only. Their values are used if your license is entitled to use the advanced template scheduling feature. |
|
||||
| `icon` | string | false | | |
|
||||
@@ -9086,6 +9307,7 @@ Restarts will only happen on weekdays in this list on weeks which line up with W
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_everyone_group_access": true,
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
@@ -9115,6 +9337,7 @@ Restarts will only happen on weekdays in this list on weeks which line up with W
|
||||
| `deprecation_message` | string | false | | Deprecation message if set, will mark the template as deprecated and block any new workspaces from using this template. If passed an empty string, will remove the deprecated message, making the template usable for new workspaces again. |
|
||||
| `description` | string | false | | |
|
||||
| `disable_everyone_group_access` | boolean | false | | Disable everyone group access allows optionally disabling the default behavior of granting the 'everyone' group access to use the template. If this is set to true, the template will not be available to all users, and must be explicitly granted to users or groups in the permissions settings of the template. |
|
||||
| `disable_module_cache` | boolean | false | | Disable module cache disables the using of cached Terraform modules during provisioning. It is recommended not to disable this. |
|
||||
| `display_name` | string | false | | |
|
||||
| `failure_ttl_ms` | integer | false | | |
|
||||
| `icon` | string | false | | |
|
||||
@@ -10514,6 +10737,10 @@ If the schedule is empty, the user will be updated to use the default schedule.|
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"name": "string",
|
||||
"status": "running",
|
||||
"subagent_id": {
|
||||
"uuid": "string",
|
||||
"valid": true
|
||||
},
|
||||
"workspace_folder": "string"
|
||||
}
|
||||
```
|
||||
@@ -10530,6 +10757,7 @@ If the schedule is empty, the user will be updated to use the default schedule.|
|
||||
| `id` | string | false | | |
|
||||
| `name` | string | false | | |
|
||||
| `status` | [codersdk.WorkspaceAgentDevcontainerStatus](#codersdkworkspaceagentdevcontainerstatus) | false | | Additional runtime fields. |
|
||||
| `subagent_id` | [uuid.NullUUID](#uuidnulluuid) | false | | |
|
||||
| `workspace_folder` | string | false | | |
|
||||
|
||||
## codersdk.WorkspaceAgentDevcontainerAgent
|
||||
@@ -10661,6 +10889,10 @@ If the schedule is empty, the user will be updated to use the default schedule.|
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"name": "string",
|
||||
"status": "running",
|
||||
"subagent_id": {
|
||||
"uuid": "string",
|
||||
"valid": true
|
||||
},
|
||||
"workspace_folder": "string"
|
||||
}
|
||||
],
|
||||
|
||||
Generated
+32
@@ -365,6 +365,38 @@ curl -X GET http://coder-server:8080/api/v2/tasks/{user}/{task}/logs \
|
||||
|
||||
To perform this operation, you must be authenticated. [Learn more](authentication.md).
|
||||
|
||||
## Pause task
|
||||
|
||||
### Code samples
|
||||
|
||||
```shell
|
||||
# Example request using curl
|
||||
curl -X POST http://coder-server:8080/api/v2/tasks/{user}/{task}/pause \
|
||||
-H 'Accept: */*' \
|
||||
-H 'Coder-Session-Token: API_KEY'
|
||||
```
|
||||
|
||||
`POST /tasks/{user}/{task}/pause`
|
||||
|
||||
### Parameters
|
||||
|
||||
| Name | In | Type | Required | Description |
|
||||
|--------|------|--------------|----------|-------------------------------------------------------|
|
||||
| `user` | path | string | true | Username, user ID, or 'me' for the authenticated user |
|
||||
| `task` | path | string(uuid) | true | Task ID |
|
||||
|
||||
### Example responses
|
||||
|
||||
> 202 Response
|
||||
|
||||
### Responses
|
||||
|
||||
| Status | Meaning | Description | Schema |
|
||||
|--------|---------------------------------------------------------------|-------------|--------------------------------------------------------------------|
|
||||
| 202 | [Accepted](https://tools.ietf.org/html/rfc7231#section-6.3.3) | Accepted | [codersdk.PauseTaskResponse](schemas.md#codersdkpausetaskresponse) |
|
||||
|
||||
To perform this operation, you must be authenticated. [Learn more](authentication.md).
|
||||
|
||||
## Send input to AI task
|
||||
|
||||
### Code samples
|
||||
|
||||
Generated
+9
@@ -65,6 +65,7 @@ To include deprecated templates, specify `deprecated:true` in the search query.
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
@@ -122,6 +123,7 @@ Restarts will only happen on weekdays in this list on weeks which line up with W
|
||||
|`» deprecated`|boolean|false|||
|
||||
|`» deprecation_message`|string|false|||
|
||||
|`» description`|string|false|||
|
||||
|`» disable_module_cache`|boolean|false||Disable module cache disables the use of cached Terraform modules during provisioning.|
|
||||
|`» display_name`|string|false|||
|
||||
|`» failure_ttl_ms`|integer|false||Failure ttl ms TimeTilDormantMillis, and TimeTilDormantAutoDeleteMillis are enterprise-only. Their values are used if your license is entitled to use the advanced template scheduling feature.|
|
||||
|`» icon`|string|false|||
|
||||
@@ -247,6 +249,7 @@ curl -X POST http://coder-server:8080/api/v2/organizations/{organization}/templa
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
@@ -397,6 +400,7 @@ curl -X GET http://coder-server:8080/api/v2/organizations/{organization}/templat
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
@@ -813,6 +817,7 @@ To include deprecated templates, specify `deprecated:true` in the search query.
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
@@ -870,6 +875,7 @@ Restarts will only happen on weekdays in this list on weeks which line up with W
|
||||
|`» deprecated`|boolean|false|||
|
||||
|`» deprecation_message`|string|false|||
|
||||
|`» description`|string|false|||
|
||||
|`» disable_module_cache`|boolean|false||Disable module cache disables the use of cached Terraform modules during provisioning.|
|
||||
|`» display_name`|string|false|||
|
||||
|`» failure_ttl_ms`|integer|false||Failure ttl ms TimeTilDormantMillis, and TimeTilDormantAutoDeleteMillis are enterprise-only. Their values are used if your license is entitled to use the advanced template scheduling feature.|
|
||||
|`» icon`|string|false|||
|
||||
@@ -1013,6 +1019,7 @@ curl -X GET http://coder-server:8080/api/v2/templates/{template} \
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
@@ -1122,6 +1129,7 @@ curl -X PATCH http://coder-server:8080/api/v2/templates/{template} \
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_everyone_group_access": true,
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
@@ -1184,6 +1192,7 @@ curl -X PATCH http://coder-server:8080/api/v2/templates/{template} \
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
"disable_module_cache": true,
|
||||
"display_name": "string",
|
||||
"failure_ttl_ms": 0,
|
||||
"icon": "string",
|
||||
|
||||
Generated
+10
-10
@@ -97,16 +97,6 @@ Enable pprof profiling server.
|
||||
|
||||
Set port for pprof profiling server.
|
||||
|
||||
### --configure-dns-for-local-stub-resolver
|
||||
|
||||
| | |
|
||||
|-------------|--------------------------------------------------------------|
|
||||
| Type | <code>bool</code> |
|
||||
| Environment | <code>$BOUNDARY_CONFIGURE_DNS_FOR_LOCAL_STUB_RESOLVER</code> |
|
||||
| YAML | <code>configure_dns_for_local_stub_resolver</code> |
|
||||
|
||||
Configure DNS for local stub resolver (e.g., systemd-resolved). Only needed when /etc/resolv.conf contains nameserver 127.0.0.53.
|
||||
|
||||
### --jail-type
|
||||
|
||||
| | |
|
||||
@@ -118,6 +108,16 @@ Configure DNS for local stub resolver (e.g., systemd-resolved). Only needed when
|
||||
|
||||
Jail type to use for network isolation. Options: nsjail (default), landjail.
|
||||
|
||||
### --use-real-dns
|
||||
|
||||
| | |
|
||||
|-------------|-------------------------------------|
|
||||
| Type | <code>bool</code> |
|
||||
| Environment | <code>$BOUNDARY_USE_REAL_DNS</code> |
|
||||
| YAML | <code>use_real_dns</code> |
|
||||
|
||||
Use real DNS in the jail instead of the dummy DNS (allows DNS exfiltration). Default: false.
|
||||
|
||||
### --disable-audit-logs
|
||||
|
||||
| | |
|
||||
|
||||
Generated
+1
@@ -20,6 +20,7 @@ coder organizations [flags] [subcommand]
|
||||
| Name | Purpose |
|
||||
|------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [<code>show</code>](./organizations_show.md) | Show the organization. Using "selected" will show the selected organization from the "--org" flag. Using "me" will show all organizations you are a member of. |
|
||||
| [<code>list</code>](./organizations_list.md) | List all organizations |
|
||||
| [<code>create</code>](./organizations_create.md) | Create a new organization. |
|
||||
| [<code>delete</code>](./organizations_delete.md) | Delete an organization |
|
||||
| [<code>members</code>](./organizations_members.md) | Manage organization members |
|
||||
|
||||
Generated
+40
@@ -0,0 +1,40 @@
|
||||
<!-- DO NOT EDIT | GENERATED CONTENT -->
|
||||
# organizations list
|
||||
|
||||
List all organizations
|
||||
|
||||
Aliases:
|
||||
|
||||
* ls
|
||||
|
||||
## Usage
|
||||
|
||||
```console
|
||||
coder organizations list [flags]
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
```console
|
||||
List all organizations. Requires a role which grants ResourceOrganization: read.
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
### -c, --column
|
||||
|
||||
| | |
|
||||
|---------|-------------------------------------------------------------------------------------------|
|
||||
| Type | <code>[id\|name\|display name\|icon\|description\|created at\|updated at\|default]</code> |
|
||||
| Default | <code>name,display name,id,default</code> |
|
||||
|
||||
Columns to display in table output.
|
||||
|
||||
### -o, --output
|
||||
|
||||
| | |
|
||||
|---------|--------------------------|
|
||||
| Type | <code>table\|json</code> |
|
||||
| Default | <code>table</code> |
|
||||
|
||||
Output format.
|
||||
@@ -1,5 +1,5 @@
|
||||
# 1.86.0
|
||||
FROM rust:slim@sha256:df6ca8f96d338697ccdbe3ccac57a85d2172e03a2429c2d243e74f3bb83ba2f5 AS rust-utils
|
||||
FROM rust:slim@sha256:760ad1d638d70ebbd0c61e06210e1289cbe45ff6425e3ea6e01241de3e14d08e AS rust-utils
|
||||
# Install rust helper programs
|
||||
ENV CARGO_INSTALL_ROOT=/tmp/
|
||||
# Use more reliable mirrors for Debian packages
|
||||
|
||||
@@ -344,7 +344,7 @@ module "dotfiles" {
|
||||
module "git-config" {
|
||||
count = data.coder_workspace.me.start_count
|
||||
source = "dev.registry.coder.com/coder/git-config/coder"
|
||||
version = "1.0.32"
|
||||
version = "1.0.33"
|
||||
agent_id = coder_agent.dev.id
|
||||
# If you prefer to commit with a different email, this allows you to do so.
|
||||
allow_email_change = true
|
||||
@@ -856,7 +856,7 @@ resource "coder_script" "boundary_config_setup" {
|
||||
module "claude-code" {
|
||||
count = data.coder_task.me.enabled ? data.coder_workspace.me.start_count : 0
|
||||
source = "dev.registry.coder.com/coder/claude-code/coder"
|
||||
version = "4.7.2"
|
||||
version = "4.7.5"
|
||||
enable_boundary = true
|
||||
agent_id = coder_agent.dev.id
|
||||
workdir = local.repo_dir
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user