Compare commits

...

16 Commits

Author SHA1 Message Date
Spike Curtis 097e085fcb fix: use correct slog arguments (#20721) (#20723)
Fixes a bad slog.Error() command that didn't wrap the error in
`slog.Error`

(cherry pick form https://github.com/coder/coder/pull/20721)
2025-11-12 00:30:09 +04:00
Cian Johnston b8ab2d351f chore: update Go to 1.24.10 (#20684) (#20688)
(cherry picked from commit 81c3375670)

Signed-off-by: Danny Kopping <danny@coder.com>
Co-authored-by: Danny Kopping <danny@coder.com>
2025-11-10 10:56:18 -06:00
Dean Sheather 1b1e3cb706 chore: change managed agent limit (#20664)
(cherry picked from commit b3f651d62f)
2025-11-04 12:11:37 -05:00
Mathias Fredriksson ea0aca0f26 fix(coderd): fix template ai task check error message (#20651) (#20659)
Create task was still mentioning magic prompt parameter when checking
template task validity. This change updates it to only mention validity
of `coder_ai_task` resource.

The previous message is incorrect, would lead to confusion and is
unhelpful.

(cherry picked from commit daad93967a)
2025-11-04 15:25:58 +02:00
Cian Johnston 563612eb3b fix: delete related task when deleting workspace (#20567) (#20585)
* Instead of prompting the user to start a deleted workspace (which is
silly), prompt them to create a new task instead.
* Adds a warning dialog when deleting a workspace related to a task
* Updates provisionerdserver to delete the related task if a workspace
is related to a task

(cherry picked from commit 73dedcc765)
2025-11-03 10:04:41 +00:00
Cian Johnston fa43ea8e68 chore: remove brazil fly.io proxy (#20601) (#20645)
(cherry picked from commit 7182c53df7)

Co-authored-by: Dean Sheather <dean@deansheather.com>
2025-11-03 09:41:16 +00:00
Cian Johnston d82ba7e3a4 fix(coderd): disallow POSTing a workspace build on a deleted workspace (#20584) (#20586)
- Adds a check on /api/v2/workspacebuilds to disallow creating a START
or STOP build if the workspace is deleted.
- DELETEs are still allowed.

(cherry picked from commit 38017010ce)
2025-11-03 09:01:57 +00:00
Cian Johnston cb4ea1f397 fix(coderd): fix audit log resource link for tasks (#20545) (#20547)
Existing task audit log links were incorrect. As audit log links are
generated on-the-fly, this does not require backfill.

(cherry picked from commit 566146af72)
2025-11-03 09:00:42 +00:00
Danielle Maywood effbe4e52e refactor: remove TaskAppID from codersdk.WorkspaceBuild (#20583) (#20592)
`TaskAppID` has not yet been shipped. We're dropping this field in favor
of using the same information but from `codersdk.Task`.

---

Cherry picked from d80b5fc8ed
https://github.com/coder/coder/pull/20583
2025-10-30 17:02:25 +00:00
Susana Ferreira 6424093146 feat: add prebuilds reconciliation duration metric (#20535) (#20581)
Related to PR: https://github.com/coder/coder/pull/20535

(cherry picked from commit aad1b401c1)
2025-10-30 12:16:19 +00:00
Susana Ferreira 2cf4b5c5a2 perf: optimize prebuilds membership reconciliation to check orgs not presets (#20493) (#20555)
Related to PR: https://github.com/coder/coder/pull/20493

(cherry picked from commit 7e8fcb4b0f)
2025-10-30 10:50:38 +00:00
Susana Ferreira a7b3efb540 feat: delete pending canceled prebuilds (#20499) (#20554)
Related to PR: https://github.com/coder/coder/pull/20499

(cherry picked from commit c3e3bb58f2)
2025-10-30 10:38:14 +00:00
Cian Johnston 0b5542f933 fix: update task link AppStatus using task_id (#20543) (#20551)
Fixes https://github.com/coder/coder/issues/20515

Alternative to https://github.com/coder/coder/pull/20519

Adds `task_id` to `workspaces_expanded` view and updates the "View Task"
link in `AppStatuses` component.

NOTE: this contains a migration
(cherry picked from commit 1ebc217624)

<!--

If you have used AI to produce some or all of this PR, please ensure you
have read our [AI Contribution
guidelines](https://coder.com/docs/about/contributing/AI_CONTRIBUTING)
before submitting.

-->
2025-10-29 21:45:43 +00:00
Danielle Maywood ba14acf4e8 fix(site): fix disappearing preset selector when switching task template (#20514) (#20564)
Ensure we set `selectedPresetId` to `undefined` when we change
`selectedTemplateId` to ensure we don't end up breaking the `<Select>`
component by giving it an invalid preset id.

---

Cherry picked from 9629d873fb (#20514)
2025-10-29 21:29:54 +00:00
Danielle Maywood d0a2e6d603 fix: ensure lifecycle executor has sufficient task permissions (#20539) (#20560)
We recently made a change to the `wsbuilder` to handle task related
logic. Our test coverage for the lifecycle executor didn't handle this
scenario and so we missed that it had insufficient permissions.

This PR adds `Update` and `Read` permissions for `Task`s in the
lifecycle executor, as well as an autostart/autostop test tailored to
task workspaces to verify the change.

---

This is cherry picked from
https://github.com/coder/coder/commit/06dbadab11760fe5fbf88c5bfcac2c48e11f7862
https://github.com/coder/coder/pull/20539
2025-10-29 21:08:26 +00:00
Danny Kopping 2a22440b0e chore!: patch release v2.28 to remove aibridge experiment (#20544)
Includes stack of PRs from https://github.com/coder/coder/pull/20520

---------

Signed-off-by: Danny Kopping <danny@coder.com>
2025-10-29 15:38:30 -04:00
109 changed files with 2409 additions and 1108 deletions
+1 -1
View File
@@ -4,7 +4,7 @@ description: |
inputs:
version:
description: "The Go version to use."
default: "1.24.6"
default: "1.24.10"
use-preinstalled-go:
description: "Whether to use preinstalled Go."
default: "false"
@@ -1,34 +0,0 @@
app = "sao-paulo-coder"
primary_region = "gru"
[experimental]
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
auto_rollback = true
[build]
image = "ghcr.io/coder/coder-preview:main"
[env]
CODER_ACCESS_URL = "https://sao-paulo.fly.dev.coder.com"
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
CODER_WILDCARD_ACCESS_URL = "*--apps.sao-paulo.fly.dev.coder.com"
CODER_VERBOSE = "true"
[http_service]
internal_port = 3000
force_https = true
auto_stop_machines = true
auto_start_machines = true
min_machines_running = 0
# Ref: https://fly.io/docs/reference/configuration/#http_service-concurrency
[http_service.concurrency]
type = "requests"
soft_limit = 50
hard_limit = 100
[[vm]]
cpu_kind = "shared"
cpus = 2
memory_mb = 512
-2
View File
@@ -163,12 +163,10 @@ jobs:
run: |
flyctl deploy --image "$IMAGE" --app paris-coder --config ./.github/fly-wsproxies/paris-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_PARIS" --yes
flyctl deploy --image "$IMAGE" --app sydney-coder --config ./.github/fly-wsproxies/sydney-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SYDNEY" --yes
flyctl deploy --image "$IMAGE" --app sao-paulo-coder --config ./.github/fly-wsproxies/sao-paulo-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SAO_PAULO" --yes
flyctl deploy --image "$IMAGE" --app jnb-coder --config ./.github/fly-wsproxies/jnb-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_JNB" --yes
env:
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
IMAGE: ${{ inputs.image }}
TOKEN_PARIS: ${{ secrets.FLY_PARIS_CODER_PROXY_SESSION_TOKEN }}
TOKEN_SYDNEY: ${{ secrets.FLY_SYDNEY_CODER_PROXY_SESSION_TOKEN }}
TOKEN_SAO_PAULO: ${{ secrets.FLY_SAO_PAULO_CODER_PROXY_SESSION_TOKEN }}
TOKEN_JNB: ${{ secrets.FLY_JNB_CODER_PROXY_SESSION_TOKEN }}
+1 -1
View File
@@ -40,7 +40,7 @@ jobs:
with:
# Pinning to 2.28 here, as Nix gets a "error: [json.exception.type_error.302] type must be array, but is string"
# on version 2.29 and above.
nix_version: "2.28.4"
nix_version: "2.28.5"
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
with:
+8 -8
View File
@@ -636,8 +636,8 @@ TAILNETTEST_MOCKS := \
tailnet/tailnettest/subscriptionmock.go
AIBRIDGED_MOCKS := \
enterprise/x/aibridged/aibridgedmock/clientmock.go \
enterprise/x/aibridged/aibridgedmock/poolmock.go
enterprise/aibridged/aibridgedmock/clientmock.go \
enterprise/aibridged/aibridgedmock/poolmock.go
GEN_FILES := \
tailnet/proto/tailnet.pb.go \
@@ -645,7 +645,7 @@ GEN_FILES := \
provisionersdk/proto/provisioner.pb.go \
provisionerd/proto/provisionerd.pb.go \
vpn/vpn.pb.go \
enterprise/x/aibridged/proto/aibridged.pb.go \
enterprise/aibridged/proto/aibridged.pb.go \
$(DB_GEN_FILES) \
$(SITE_GEN_FILES) \
coderd/rbac/object_gen.go \
@@ -697,7 +697,7 @@ gen/mark-fresh:
provisionersdk/proto/provisioner.pb.go \
provisionerd/proto/provisionerd.pb.go \
vpn/vpn.pb.go \
enterprise/x/aibridged/proto/aibridged.pb.go \
enterprise/aibridged/proto/aibridged.pb.go \
coderd/database/dump.sql \
$(DB_GEN_FILES) \
site/src/api/typesGenerated.ts \
@@ -768,8 +768,8 @@ codersdk/workspacesdk/agentconnmock/agentconnmock.go: codersdk/workspacesdk/agen
go generate ./codersdk/workspacesdk/agentconnmock/
touch "$@"
$(AIBRIDGED_MOCKS): enterprise/x/aibridged/client.go enterprise/x/aibridged/pool.go
go generate ./enterprise/x/aibridged/aibridgedmock/
$(AIBRIDGED_MOCKS): enterprise/aibridged/client.go enterprise/aibridged/pool.go
go generate ./enterprise/aibridged/aibridgedmock/
touch "$@"
agent/agentcontainers/dcspec/dcspec_gen.go: \
@@ -822,13 +822,13 @@ vpn/vpn.pb.go: vpn/vpn.proto
--go_opt=paths=source_relative \
./vpn/vpn.proto
enterprise/x/aibridged/proto/aibridged.pb.go: enterprise/x/aibridged/proto/aibridged.proto
enterprise/aibridged/proto/aibridged.pb.go: enterprise/aibridged/proto/aibridged.proto
protoc \
--go_out=. \
--go_opt=paths=source_relative \
--go-drpc_out=. \
--go-drpc_opt=paths=source_relative \
./enterprise/x/aibridged/proto/aibridged.proto
./enterprise/aibridged/proto/aibridged.proto
site/src/api/typesGenerated.ts: site/node_modules/.installed $(wildcard scripts/apitypings/*) $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
# -C sets the directory for the go run command
+2 -1
View File
@@ -90,6 +90,7 @@
"allow_renames": false,
"favorite": false,
"next_start_at": "====[timestamp]=====",
"is_prebuild": false
"is_prebuild": false,
"task_id": null
}
]
+35
View File
@@ -80,6 +80,41 @@ OPTIONS:
Periodically check for new releases of Coder and inform the owner. The
check is performed once per day.
AIBRIDGE OPTIONS:
--aibridge-anthropic-base-url string, $CODER_AIBRIDGE_ANTHROPIC_BASE_URL (default: https://api.anthropic.com/)
The base URL of the Anthropic API.
--aibridge-anthropic-key string, $CODER_AIBRIDGE_ANTHROPIC_KEY
The key to authenticate against the Anthropic API.
--aibridge-bedrock-access-key string, $CODER_AIBRIDGE_BEDROCK_ACCESS_KEY
The access key to authenticate against the AWS Bedrock API.
--aibridge-bedrock-access-key-secret string, $CODER_AIBRIDGE_BEDROCK_ACCESS_KEY_SECRET
The access key secret to use with the access key to authenticate
against the AWS Bedrock API.
--aibridge-bedrock-model string, $CODER_AIBRIDGE_BEDROCK_MODEL (default: global.anthropic.claude-sonnet-4-5-20250929-v1:0)
The model to use when making requests to the AWS Bedrock API.
--aibridge-bedrock-region string, $CODER_AIBRIDGE_BEDROCK_REGION
The AWS Bedrock API region.
--aibridge-bedrock-small-fastmodel string, $CODER_AIBRIDGE_BEDROCK_SMALL_FAST_MODEL (default: global.anthropic.claude-haiku-4-5-20251001-v1:0)
The small fast model to use when making requests to the AWS Bedrock
API. Claude Code uses Haiku-class models to perform background tasks.
See
https://docs.claude.com/en/docs/claude-code/settings#environment-variables.
--aibridge-enabled bool, $CODER_AIBRIDGE_ENABLED (default: false)
Whether to start an in-memory aibridged instance.
--aibridge-openai-base-url string, $CODER_AIBRIDGE_OPENAI_BASE_URL (default: https://api.openai.com/v1/)
The base URL of the OpenAI API.
--aibridge-openai-key string, $CODER_AIBRIDGE_OPENAI_KEY
The key to authenticate against the OpenAI API.
CLIENT OPTIONS:
These options change the behavior of how clients interact with the Coder.
Clients include the Coder CLI, Coder Desktop, IDE extensions, and the web UI.
+1 -2
View File
@@ -714,8 +714,7 @@ workspace_prebuilds:
# (default: 3, type: int)
failure_hard_limit: 3
aibridge:
# Whether to start an in-memory aibridged instance ("aibridge" experiment must be
# enabled, too).
# Whether to start an in-memory aibridged instance.
# (default: false, type: bool)
enabled: false
# The base URL of the OpenAI API.
+1 -1
View File
@@ -143,7 +143,7 @@ func (api *API) tasksCreate(rw http.ResponseWriter, r *http.Request) {
if !templateVersion.HasAITask.Valid || !templateVersion.HasAITask.Bool {
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
Message: fmt.Sprintf(`Template does not have required parameter %q`, codersdk.AITaskPromptParameterName),
Message: `Template does not have a valid "coder_ai_task" resource.`,
})
return
}
+58 -6
View File
@@ -259,6 +259,9 @@ func TestTasks(t *testing.T) {
// Wait for the workspace to be built.
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
require.NoError(t, err)
if assert.True(t, workspace.TaskID.Valid, "task id should be set on workspace") {
assert.Equal(t, task.ID, workspace.TaskID.UUID, "workspace task id should match")
}
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
// List tasks via experimental API and verify the prompt and status mapping.
@@ -297,6 +300,9 @@ func TestTasks(t *testing.T) {
// Get the workspace and wait for it to be ready.
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
require.NoError(t, err)
if assert.True(t, ws.TaskID.Valid, "task id should be set on workspace") {
assert.Equal(t, task.ID, ws.TaskID.UUID, "workspace task id should match")
}
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
ws = coderdtest.MustWorkspace(t, client, task.WorkspaceID.UUID)
// Assert invariant: the workspace has exactly one resource with one agent with one app.
@@ -371,6 +377,9 @@ func TestTasks(t *testing.T) {
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
require.NoError(t, err)
if assert.True(t, ws.TaskID.Valid, "task id should be set on workspace") {
assert.Equal(t, task.ID, ws.TaskID.UUID, "workspace task id should match")
}
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
err = exp.DeleteTask(ctx, "me", task.ID)
@@ -417,6 +426,9 @@ func TestTasks(t *testing.T) {
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
ws := coderdtest.CreateWorkspace(t, client, template.ID)
if assert.False(t, ws.TaskID.Valid, "task id should not be set on non-task workspace") {
assert.Zero(t, ws.TaskID, "non-task workspace task id should be empty")
}
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
exp := codersdk.NewExperimentalClient(client)
@@ -466,10 +478,10 @@ func TestTasks(t *testing.T) {
}
})
t.Run("NoWorkspace", func(t *testing.T) {
t.Run("DeletedWorkspace", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
user := coderdtest.CreateFirstUser(t, client)
template := createAITemplate(t, client, user)
ctx := testutil.Context(t, testutil.WaitLong)
@@ -483,14 +495,54 @@ func TestTasks(t *testing.T) {
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
require.NoError(t, err)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
// Delete the task workspace
coderdtest.MustTransitionWorkspace(t, client, ws.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionDelete)
// We should still be able to fetch the task after deleting its workspace
// Mark the workspace as deleted directly in the database, bypassing provisionerd.
require.NoError(t, db.UpdateWorkspaceDeletedByID(dbauthz.AsProvisionerd(ctx), database.UpdateWorkspaceDeletedByIDParams{
ID: ws.ID,
Deleted: true,
}))
// We should still be able to fetch the task if its workspace was deleted.
// Provisionerdserver will attempt delete the related task when deleting a workspace.
// This test ensures that we can still handle the case where, for some reason, the
// task has not been marked as deleted, but the workspace has.
task, err = exp.TaskByID(ctx, task.ID)
require.NoError(t, err, "fetching a task should still work after deleting its related workspace")
require.NoError(t, err, "fetching a task should still work if its related workspace is deleted")
err = exp.DeleteTask(ctx, task.OwnerID.String(), task.ID)
require.NoError(t, err, "should be possible to delete a task with no workspace")
})
t.Run("DeletingTaskWorkspaceDeletesTask", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
user := coderdtest.CreateFirstUser(t, client)
template := createAITemplate(t, client, user)
ctx := testutil.Context(t, testutil.WaitLong)
exp := codersdk.NewExperimentalClient(client)
task, err := exp.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
TemplateVersionID: template.ActiveVersionID,
Input: "delete me",
})
require.NoError(t, err)
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
ws, err := client.Workspace(ctx, task.WorkspaceID.UUID)
require.NoError(t, err)
if assert.True(t, ws.TaskID.Valid, "task id should be set on workspace") {
assert.Equal(t, task.ID, ws.TaskID.UUID, "workspace task id should match")
}
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, ws.LatestBuild.ID)
// When; the task workspace is deleted
coderdtest.MustTransitionWorkspace(t, client, ws.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionDelete)
// Then: the task associated with the workspace is also deleted
_, err = exp.TaskByID(ctx, task.ID)
require.Error(t, err, "expected an error fetching the task")
var sdkErr *codersdk.Error
require.ErrorAs(t, err, &sdkErr, "expected a codersdk.Error")
require.Equal(t, http.StatusNotFound, sdkErr.StatusCode())
})
})
t.Run("Send", func(t *testing.T) {
+12 -11
View File
@@ -85,7 +85,7 @@ const docTemplate = `{
}
}
},
"/api/experimental/aibridge/interceptions": {
"/aibridge/interceptions": {
"get": {
"security": [
{
@@ -14316,11 +14316,9 @@ const docTemplate = `{
"web-push",
"oauth2",
"mcp-server-http",
"workspace-sharing",
"aibridge"
"workspace-sharing"
],
"x-enum-comments": {
"ExperimentAIBridge": "Enables AI Bridge functionality.",
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
"ExperimentExample": "This isn't used for anything.",
"ExperimentMCPServerHTTP": "Enables the MCP HTTP server functionality.",
@@ -14338,8 +14336,7 @@ const docTemplate = `{
"ExperimentWebPush",
"ExperimentOAuth2",
"ExperimentMCPServerHTTP",
"ExperimentWorkspaceSharing",
"ExperimentAIBridge"
"ExperimentWorkspaceSharing"
]
},
"codersdk.ExternalAPIKeyScopes": {
@@ -19715,6 +19712,14 @@ const docTemplate = `{
"description": "OwnerName is the username of the owner of the workspace.",
"type": "string"
},
"task_id": {
"description": "TaskID, if set, indicates that the workspace is relevant to the given codersdk.Task.",
"allOf": [
{
"$ref": "#/definitions/uuid.NullUUID"
}
]
},
"template_active_version_id": {
"type": "string",
"format": "uuid"
@@ -20522,7 +20527,7 @@ const docTemplate = `{
"type": "object",
"properties": {
"ai_task_sidebar_app_id": {
"description": "Deprecated: This field has been replaced with ` + "`" + `TaskAppID` + "`" + `",
"description": "Deprecated: This field has been replaced with ` + "`" + `Task.WorkspaceAppID` + "`" + `",
"type": "string",
"format": "uuid"
},
@@ -20604,10 +20609,6 @@ const docTemplate = `{
}
]
},
"task_app_id": {
"type": "string",
"format": "uuid"
},
"template_version_id": {
"type": "string",
"format": "uuid"
+12 -11
View File
@@ -65,7 +65,7 @@
}
}
},
"/api/experimental/aibridge/interceptions": {
"/aibridge/interceptions": {
"get": {
"security": [
{
@@ -12923,11 +12923,9 @@
"web-push",
"oauth2",
"mcp-server-http",
"workspace-sharing",
"aibridge"
"workspace-sharing"
],
"x-enum-comments": {
"ExperimentAIBridge": "Enables AI Bridge functionality.",
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
"ExperimentExample": "This isn't used for anything.",
"ExperimentMCPServerHTTP": "Enables the MCP HTTP server functionality.",
@@ -12945,8 +12943,7 @@
"ExperimentWebPush",
"ExperimentOAuth2",
"ExperimentMCPServerHTTP",
"ExperimentWorkspaceSharing",
"ExperimentAIBridge"
"ExperimentWorkspaceSharing"
]
},
"codersdk.ExternalAPIKeyScopes": {
@@ -18101,6 +18098,14 @@
"description": "OwnerName is the username of the owner of the workspace.",
"type": "string"
},
"task_id": {
"description": "TaskID, if set, indicates that the workspace is relevant to the given codersdk.Task.",
"allOf": [
{
"$ref": "#/definitions/uuid.NullUUID"
}
]
},
"template_active_version_id": {
"type": "string",
"format": "uuid"
@@ -18856,7 +18861,7 @@
"type": "object",
"properties": {
"ai_task_sidebar_app_id": {
"description": "Deprecated: This field has been replaced with `TaskAppID`",
"description": "Deprecated: This field has been replaced with `Task.WorkspaceAppID`",
"type": "string",
"format": "uuid"
},
@@ -18934,10 +18939,6 @@
}
]
},
"task_app_id": {
"type": "string",
"format": "uuid"
},
"template_version_id": {
"type": "string",
"format": "uuid"
+2 -2
View File
@@ -509,11 +509,11 @@ func (api *API) auditLogResourceLink(ctx context.Context, alog database.GetAudit
if err != nil {
return ""
}
workspace, err := api.Database.GetWorkspaceByID(ctx, task.WorkspaceID.UUID)
user, err := api.Database.GetUserByID(ctx, task.OwnerID)
if err != nil {
return ""
}
return fmt.Sprintf("/tasks/%s/%s", workspace.OwnerName, task.Name)
return fmt.Sprintf("/tasks/%s/%s", user.Username, task.ID)
default:
return ""
+172
View File
@@ -1764,3 +1764,175 @@ func TestExecutorAutostartSkipsWhenNoProvisionersAvailable(t *testing.T) {
assert.Len(t, stats.Transitions, 1, "should create builds when provisioners are available")
}
func TestExecutorTaskWorkspace(t *testing.T) {
t.Parallel()
createTaskTemplate := func(t *testing.T, client *codersdk.Client, orgID uuid.UUID, ctx context.Context, defaultTTL time.Duration) codersdk.Template {
t.Helper()
taskAppID := uuid.New()
version := coderdtest.CreateTemplateVersion(t, client, orgID, &echo.Responses{
Parse: echo.ParseComplete,
ProvisionPlan: []*proto.Response{
{
Type: &proto.Response_Plan{
Plan: &proto.PlanComplete{HasAiTasks: true},
},
},
},
ProvisionApply: []*proto.Response{
{
Type: &proto.Response_Apply{
Apply: &proto.ApplyComplete{
Resources: []*proto.Resource{
{
Agents: []*proto.Agent{
{
Id: uuid.NewString(),
Name: "dev",
Auth: &proto.Agent_Token{
Token: uuid.NewString(),
},
Apps: []*proto.App{
{
Id: taskAppID.String(),
Slug: "task-app",
},
},
},
},
},
},
AiTasks: []*proto.AITask{
{
AppId: taskAppID.String(),
},
},
},
},
},
},
})
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
template := coderdtest.CreateTemplate(t, client, orgID, version.ID)
if defaultTTL > 0 {
_, err := client.UpdateTemplateMeta(ctx, template.ID, codersdk.UpdateTemplateMeta{
DefaultTTLMillis: defaultTTL.Milliseconds(),
})
require.NoError(t, err)
}
return template
}
createTaskWorkspace := func(t *testing.T, client *codersdk.Client, template codersdk.Template, ctx context.Context, input string) codersdk.Workspace {
t.Helper()
exp := codersdk.NewExperimentalClient(client)
task, err := exp.CreateTask(ctx, "me", codersdk.CreateTaskRequest{
TemplateVersionID: template.ActiveVersionID,
Input: input,
})
require.NoError(t, err)
require.True(t, task.WorkspaceID.Valid, "task should have a workspace")
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
require.NoError(t, err)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
return workspace
}
t.Run("Autostart", func(t *testing.T) {
t.Parallel()
var (
ctx = testutil.Context(t, testutil.WaitShort)
sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *")
tickCh = make(chan time.Time)
statsCh = make(chan autobuild.Stats)
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
AutobuildTicker: tickCh,
IncludeProvisionerDaemon: true,
AutobuildStats: statsCh,
})
admin = coderdtest.CreateFirstUser(t, client)
)
// Given: A task workspace
template := createTaskTemplate(t, client, admin.OrganizationID, ctx, 0)
workspace := createTaskWorkspace(t, client, template, ctx, "test task for autostart")
// Given: The task workspace has an autostart schedule
err := client.UpdateWorkspaceAutostart(ctx, workspace.ID, codersdk.UpdateWorkspaceAutostartRequest{
Schedule: ptr.Ref(sched.String()),
})
require.NoError(t, err)
// Given: That the workspace is in a stopped state.
workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop)
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
require.NoError(t, err)
// When: the autobuild executor ticks after the scheduled time
go func() {
tickTime := sched.Next(workspace.LatestBuild.CreatedAt)
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
tickCh <- tickTime
close(tickCh)
}()
// Then: We expect to see a start transition
stats := <-statsCh
require.Len(t, stats.Transitions, 1, "lifecycle executor should transition the task workspace")
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
assert.Equal(t, database.WorkspaceTransitionStart, stats.Transitions[workspace.ID], "should autostart the workspace")
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
})
t.Run("Autostop", func(t *testing.T) {
t.Parallel()
var (
ctx = testutil.Context(t, testutil.WaitShort)
tickCh = make(chan time.Time)
statsCh = make(chan autobuild.Stats)
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
AutobuildTicker: tickCh,
IncludeProvisionerDaemon: true,
AutobuildStats: statsCh,
})
admin = coderdtest.CreateFirstUser(t, client)
)
// Given: A task workspace with an 8 hour deadline
template := createTaskTemplate(t, client, admin.OrganizationID, ctx, 8*time.Hour)
workspace := createTaskWorkspace(t, client, template, ctx, "test task for autostop")
// Given: The workspace is currently running
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
require.Equal(t, codersdk.WorkspaceTransitionStart, workspace.LatestBuild.Transition)
require.NotZero(t, workspace.LatestBuild.Deadline, "workspace should have a deadline for autostop")
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
require.NoError(t, err)
// When: the autobuild executor ticks after the deadline
go func() {
tickTime := workspace.LatestBuild.Deadline.Time.Add(time.Minute)
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
tickCh <- tickTime
close(tickCh)
}()
// Then: We expect to see a stop transition
stats := <-statsCh
require.Len(t, stats.Transitions, 1, "lifecycle executor should transition the task workspace")
assert.Contains(t, stats.Transitions, workspace.ID, "task workspace should be in transitions")
assert.Equal(t, database.WorkspaceTransitionStop, stats.Transitions[workspace.ID], "should autostop the workspace")
require.Empty(t, stats.Errors, "should have no errors when managing task workspaces")
})
}
+12 -4
View File
@@ -219,8 +219,8 @@ var (
rbac.ResourceUser.Type: {policy.ActionRead, policy.ActionReadPersonal, policy.ActionUpdatePersonal},
rbac.ResourceWorkspaceDormant.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStop},
rbac.ResourceWorkspace.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop, policy.ActionCreateAgent},
// Provisionerd needs to read and update tasks associated with workspaces.
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate},
// Provisionerd needs to read, update, and delete tasks associated with workspaces.
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
rbac.ResourceApiKey.Type: {policy.WildcardSymbol},
// When org scoped provisioner credentials are implemented,
// this can be reduced to read a specific org.
@@ -254,6 +254,7 @@ var (
rbac.ResourceFile.Type: {policy.ActionRead}, // Required to read terraform files
rbac.ResourceNotificationMessage.Type: {policy.ActionCreate, policy.ActionRead},
rbac.ResourceSystem.Type: {policy.WildcardSymbol},
rbac.ResourceTask.Type: {policy.ActionRead, policy.ActionUpdate},
rbac.ResourceTemplate.Type: {policy.ActionRead, policy.ActionUpdate},
rbac.ResourceUser.Type: {policy.ActionRead},
rbac.ResourceWorkspace.Type: {policy.ActionDelete, policy.ActionRead, policy.ActionUpdate, policy.ActionWorkspaceStart, policy.ActionWorkspaceStop},
@@ -2648,6 +2649,13 @@ func (q *querier) GetOrganizationsByUserID(ctx context.Context, userID database.
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetOrganizationsByUserID)(ctx, userID)
}
func (q *querier) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg database.GetOrganizationsWithPrebuildStatusParams) ([]database.GetOrganizationsWithPrebuildStatusRow, error) {
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceOrganization.All()); err != nil {
return nil, err
}
return q.db.GetOrganizationsWithPrebuildStatus(ctx, arg)
}
func (q *querier) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]database.ParameterSchema, error) {
version, err := q.db.GetTemplateVersionByJobID(ctx, jobID)
if err != nil {
@@ -4933,10 +4941,10 @@ func (q *querier) UpdateOrganizationDeletedByID(ctx context.Context, arg databas
return deleteQ(q.log, q.auth, q.db.GetOrganizationByID, deleteF)(ctx, arg.ID)
}
func (q *querier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error) {
func (q *querier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]database.UpdatePrebuildProvisionerJobWithCancelRow, error) {
// Prebuild operation for canceling pending prebuild jobs from non-active template versions
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourcePrebuiltWorkspace); err != nil {
return []uuid.UUID{}, err
return []database.UpdatePrebuildProvisionerJobWithCancelRow{}, err
}
return q.db.UpdatePrebuildProvisionerJobWithCancel(ctx, arg)
}
+14 -3
View File
@@ -646,10 +646,13 @@ func (s *MethodTestSuite) TestProvisionerJob() {
PresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true},
Now: dbtime.Now(),
}
jobIDs := []uuid.UUID{uuid.New(), uuid.New()}
canceledJobs := []database.UpdatePrebuildProvisionerJobWithCancelRow{
{ID: uuid.New(), WorkspaceID: uuid.New(), TemplateID: uuid.New(), TemplateVersionPresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true}},
{ID: uuid.New(), WorkspaceID: uuid.New(), TemplateID: uuid.New(), TemplateVersionPresetID: uuid.NullUUID{UUID: uuid.New(), Valid: true}},
}
dbm.EXPECT().UpdatePrebuildProvisionerJobWithCancel(gomock.Any(), arg).Return(jobIDs, nil).AnyTimes()
check.Args(arg).Asserts(rbac.ResourcePrebuiltWorkspace, policy.ActionUpdate).Returns(jobIDs)
dbm.EXPECT().UpdatePrebuildProvisionerJobWithCancel(gomock.Any(), arg).Return(canceledJobs, nil).AnyTimes()
check.Args(arg).Asserts(rbac.ResourcePrebuiltWorkspace, policy.ActionUpdate).Returns(canceledJobs)
}))
s.Run("GetProvisionerJobsByIDs", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
org := testutil.Fake(s.T(), faker, database.Organization{})
@@ -3756,6 +3759,14 @@ func (s *MethodTestSuite) TestPrebuilds() {
dbm.EXPECT().GetPrebuildMetrics(gomock.Any()).Return([]database.GetPrebuildMetricsRow{}, nil).AnyTimes()
check.Args().Asserts(rbac.ResourceWorkspace.All(), policy.ActionRead)
}))
s.Run("GetOrganizationsWithPrebuildStatus", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
arg := database.GetOrganizationsWithPrebuildStatusParams{
UserID: uuid.New(),
GroupName: "test",
}
dbm.EXPECT().GetOrganizationsWithPrebuildStatus(gomock.Any(), arg).Return([]database.GetOrganizationsWithPrebuildStatusRow{}, nil).AnyTimes()
check.Args(arg).Asserts(rbac.ResourceOrganization.All(), policy.ActionRead)
}))
s.Run("GetPrebuildsSettings", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
dbm.EXPECT().GetPrebuildsSettings(gomock.Any()).Return("{}", nil).AnyTimes()
check.Args().Asserts()
+8 -1
View File
@@ -1243,6 +1243,13 @@ func (m queryMetricsStore) GetOrganizationsByUserID(ctx context.Context, userID
return organizations, err
}
func (m queryMetricsStore) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg database.GetOrganizationsWithPrebuildStatusParams) ([]database.GetOrganizationsWithPrebuildStatusRow, error) {
start := time.Now()
r0, r1 := m.s.GetOrganizationsWithPrebuildStatus(ctx, arg)
m.queryLatencies.WithLabelValues("GetOrganizationsWithPrebuildStatus").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m queryMetricsStore) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]database.ParameterSchema, error) {
start := time.Now()
schemas, err := m.s.GetParameterSchemasByJobID(ctx, jobID)
@@ -3042,7 +3049,7 @@ func (m queryMetricsStore) UpdateOrganizationDeletedByID(ctx context.Context, ar
return r0
}
func (m queryMetricsStore) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error) {
func (m queryMetricsStore) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]database.UpdatePrebuildProvisionerJobWithCancelRow, error) {
start := time.Now()
r0, r1 := m.s.UpdatePrebuildProvisionerJobWithCancel(ctx, arg)
m.queryLatencies.WithLabelValues("UpdatePrebuildProvisionerJobWithCancel").Observe(time.Since(start).Seconds())
+17 -2
View File
@@ -2622,6 +2622,21 @@ func (mr *MockStoreMockRecorder) GetOrganizationsByUserID(ctx, arg any) *gomock.
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationsByUserID", reflect.TypeOf((*MockStore)(nil).GetOrganizationsByUserID), ctx, arg)
}
// GetOrganizationsWithPrebuildStatus mocks base method.
func (m *MockStore) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg database.GetOrganizationsWithPrebuildStatusParams) ([]database.GetOrganizationsWithPrebuildStatusRow, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetOrganizationsWithPrebuildStatus", ctx, arg)
ret0, _ := ret[0].([]database.GetOrganizationsWithPrebuildStatusRow)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetOrganizationsWithPrebuildStatus indicates an expected call of GetOrganizationsWithPrebuildStatus.
func (mr *MockStoreMockRecorder) GetOrganizationsWithPrebuildStatus(ctx, arg any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrganizationsWithPrebuildStatus", reflect.TypeOf((*MockStore)(nil).GetOrganizationsWithPrebuildStatus), ctx, arg)
}
// GetParameterSchemasByJobID mocks base method.
func (m *MockStore) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]database.ParameterSchema, error) {
m.ctrl.T.Helper()
@@ -6540,10 +6555,10 @@ func (mr *MockStoreMockRecorder) UpdateOrganizationDeletedByID(ctx, arg any) *go
}
// UpdatePrebuildProvisionerJobWithCancel mocks base method.
func (m *MockStore) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error) {
func (m *MockStore) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg database.UpdatePrebuildProvisionerJobWithCancelParams) ([]database.UpdatePrebuildProvisionerJobWithCancelRow, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdatePrebuildProvisionerJobWithCancel", ctx, arg)
ret0, _ := ret[0].([]uuid.UUID)
ret0, _ := ret[0].([]database.UpdatePrebuildProvisionerJobWithCancelRow)
ret1, _ := ret[1].(error)
return ret0, ret1
}
+5 -3
View File
@@ -2922,11 +2922,13 @@ CREATE VIEW workspaces_expanded AS
templates.name AS template_name,
templates.display_name AS template_display_name,
templates.icon AS template_icon,
templates.description AS template_description
FROM (((workspaces
templates.description AS template_description,
tasks.id AS task_id
FROM ((((workspaces
JOIN visible_users ON ((workspaces.owner_id = visible_users.id)))
JOIN organizations ON ((workspaces.organization_id = organizations.id)))
JOIN templates ON ((workspaces.template_id = templates.id)));
JOIN templates ON ((workspaces.template_id = templates.id)))
LEFT JOIN tasks ON ((workspaces.id = tasks.workspace_id)));
COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.';
@@ -0,0 +1,39 @@
DROP VIEW workspaces_expanded;
-- Recreate the view from 000354_workspace_acl.up.sql
CREATE VIEW workspaces_expanded AS
SELECT workspaces.id,
workspaces.created_at,
workspaces.updated_at,
workspaces.owner_id,
workspaces.organization_id,
workspaces.template_id,
workspaces.deleted,
workspaces.name,
workspaces.autostart_schedule,
workspaces.ttl,
workspaces.last_used_at,
workspaces.dormant_at,
workspaces.deleting_at,
workspaces.automatic_updates,
workspaces.favorite,
workspaces.next_start_at,
workspaces.group_acl,
workspaces.user_acl,
visible_users.avatar_url AS owner_avatar_url,
visible_users.username AS owner_username,
visible_users.name AS owner_name,
organizations.name AS organization_name,
organizations.display_name AS organization_display_name,
organizations.icon AS organization_icon,
organizations.description AS organization_description,
templates.name AS template_name,
templates.display_name AS template_display_name,
templates.icon AS template_icon,
templates.description AS template_description
FROM (((workspaces
JOIN visible_users ON ((workspaces.owner_id = visible_users.id)))
JOIN organizations ON ((workspaces.organization_id = organizations.id)))
JOIN templates ON ((workspaces.template_id = templates.id)));
COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.';
@@ -0,0 +1,42 @@
DROP VIEW workspaces_expanded;
-- Add nullable task_id to workspaces_expanded view
CREATE VIEW workspaces_expanded AS
SELECT workspaces.id,
workspaces.created_at,
workspaces.updated_at,
workspaces.owner_id,
workspaces.organization_id,
workspaces.template_id,
workspaces.deleted,
workspaces.name,
workspaces.autostart_schedule,
workspaces.ttl,
workspaces.last_used_at,
workspaces.dormant_at,
workspaces.deleting_at,
workspaces.automatic_updates,
workspaces.favorite,
workspaces.next_start_at,
workspaces.group_acl,
workspaces.user_acl,
visible_users.avatar_url AS owner_avatar_url,
visible_users.username AS owner_username,
visible_users.name AS owner_name,
organizations.name AS organization_name,
organizations.display_name AS organization_display_name,
organizations.icon AS organization_icon,
organizations.description AS organization_description,
templates.name AS template_name,
templates.display_name AS template_display_name,
templates.icon AS template_icon,
templates.description AS template_description,
tasks.id AS task_id
FROM ((((workspaces
JOIN visible_users ON ((workspaces.owner_id = visible_users.id)))
JOIN organizations ON ((workspaces.organization_id = organizations.id)))
JOIN templates ON ((workspaces.template_id = templates.id)))
LEFT JOIN tasks ON ((workspaces.id = tasks.workspace_id)));
COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.';
+1
View File
@@ -321,6 +321,7 @@ func (q *sqlQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg GetWorkspa
&i.TemplateDisplayName,
&i.TemplateIcon,
&i.TemplateDescription,
&i.TaskID,
&i.TemplateVersionID,
&i.TemplateVersionName,
&i.LatestBuildCompletedAt,
+1
View File
@@ -4663,6 +4663,7 @@ type Workspace struct {
TemplateDisplayName string `db:"template_display_name" json:"template_display_name"`
TemplateIcon string `db:"template_icon" json:"template_icon"`
TemplateDescription string `db:"template_description" json:"template_description"`
TaskID uuid.NullUUID `db:"task_id" json:"task_id"`
}
type WorkspaceAgent struct {
+4 -1
View File
@@ -269,6 +269,9 @@ type sqlcQuerier interface {
GetOrganizationResourceCountByID(ctx context.Context, organizationID uuid.UUID) (GetOrganizationResourceCountByIDRow, error)
GetOrganizations(ctx context.Context, arg GetOrganizationsParams) ([]Organization, error)
GetOrganizationsByUserID(ctx context.Context, arg GetOrganizationsByUserIDParams) ([]Organization, error)
// GetOrganizationsWithPrebuildStatus returns organizations with prebuilds configured and their
// membership status for the prebuilds system user (org membership, group existence, group membership).
GetOrganizationsWithPrebuildStatus(ctx context.Context, arg GetOrganizationsWithPrebuildStatusParams) ([]GetOrganizationsWithPrebuildStatusRow, error)
GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]ParameterSchema, error)
GetPrebuildMetrics(ctx context.Context) ([]GetPrebuildMetricsRow, error)
GetPrebuildsSettings(ctx context.Context) (string, error)
@@ -667,7 +670,7 @@ type sqlcQuerier interface {
// Cancels all pending provisioner jobs for prebuilt workspaces on a specific preset from an
// inactive template version.
// This is an optimization to clean up stale pending jobs.
UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error)
UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg UpdatePrebuildProvisionerJobWithCancelParams) ([]UpdatePrebuildProvisionerJobWithCancelRow, error)
UpdatePresetPrebuildStatus(ctx context.Context, arg UpdatePresetPrebuildStatusParams) error
UpdateProvisionerDaemonLastSeenAt(ctx context.Context, arg UpdateProvisionerDaemonLastSeenAtParams) error
UpdateProvisionerJobByID(ctx context.Context, arg UpdateProvisionerJobByIDParams) error
+130 -21
View File
@@ -8285,6 +8285,93 @@ func (q *sqlQuerier) FindMatchingPresetID(ctx context.Context, arg FindMatchingP
return template_version_preset_id, err
}
const getOrganizationsWithPrebuildStatus = `-- name: GetOrganizationsWithPrebuildStatus :many
WITH orgs_with_prebuilds AS (
-- Get unique organizations that have presets with prebuilds configured
SELECT DISTINCT o.id, o.name
FROM organizations o
INNER JOIN templates t ON t.organization_id = o.id
INNER JOIN template_versions tv ON tv.template_id = t.id
INNER JOIN template_version_presets tvp ON tvp.template_version_id = tv.id
WHERE tvp.desired_instances IS NOT NULL
),
prebuild_user_membership AS (
-- Check if the user is a member of the organizations
SELECT om.organization_id
FROM organization_members om
INNER JOIN orgs_with_prebuilds owp ON owp.id = om.organization_id
WHERE om.user_id = $1::uuid
),
prebuild_groups AS (
-- Check if the organizations have the prebuilds group
SELECT g.organization_id, g.id as group_id
FROM groups g
INNER JOIN orgs_with_prebuilds owp ON owp.id = g.organization_id
WHERE g.name = $2::text
),
prebuild_group_membership AS (
-- Check if the user is in the prebuilds group
SELECT pg.organization_id
FROM prebuild_groups pg
INNER JOIN group_members gm ON gm.group_id = pg.group_id
WHERE gm.user_id = $1::uuid
)
SELECT
owp.id AS organization_id,
owp.name AS organization_name,
(pum.organization_id IS NOT NULL)::boolean AS has_prebuild_user,
pg.group_id AS prebuilds_group_id,
(pgm.organization_id IS NOT NULL)::boolean AS has_prebuild_user_in_group
FROM orgs_with_prebuilds owp
LEFT JOIN prebuild_groups pg ON pg.organization_id = owp.id
LEFT JOIN prebuild_user_membership pum ON pum.organization_id = owp.id
LEFT JOIN prebuild_group_membership pgm ON pgm.organization_id = owp.id
`
type GetOrganizationsWithPrebuildStatusParams struct {
UserID uuid.UUID `db:"user_id" json:"user_id"`
GroupName string `db:"group_name" json:"group_name"`
}
type GetOrganizationsWithPrebuildStatusRow struct {
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
OrganizationName string `db:"organization_name" json:"organization_name"`
HasPrebuildUser bool `db:"has_prebuild_user" json:"has_prebuild_user"`
PrebuildsGroupID uuid.NullUUID `db:"prebuilds_group_id" json:"prebuilds_group_id"`
HasPrebuildUserInGroup bool `db:"has_prebuild_user_in_group" json:"has_prebuild_user_in_group"`
}
// GetOrganizationsWithPrebuildStatus returns organizations with prebuilds configured and their
// membership status for the prebuilds system user (org membership, group existence, group membership).
func (q *sqlQuerier) GetOrganizationsWithPrebuildStatus(ctx context.Context, arg GetOrganizationsWithPrebuildStatusParams) ([]GetOrganizationsWithPrebuildStatusRow, error) {
rows, err := q.db.QueryContext(ctx, getOrganizationsWithPrebuildStatus, arg.UserID, arg.GroupName)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetOrganizationsWithPrebuildStatusRow
for rows.Next() {
var i GetOrganizationsWithPrebuildStatusRow
if err := rows.Scan(
&i.OrganizationID,
&i.OrganizationName,
&i.HasPrebuildUser,
&i.PrebuildsGroupID,
&i.HasPrebuildUserInGroup,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getPrebuildMetrics = `-- name: GetPrebuildMetrics :many
SELECT
t.name as template_name,
@@ -8687,12 +8774,8 @@ func (q *sqlQuerier) GetTemplatePresetsWithPrebuilds(ctx context.Context, templa
}
const updatePrebuildProvisionerJobWithCancel = `-- name: UpdatePrebuildProvisionerJobWithCancel :many
UPDATE provisioner_jobs
SET
canceled_at = $1::timestamptz,
completed_at = $1::timestamptz
WHERE id IN (
SELECT pj.id
WITH jobs_to_cancel AS (
SELECT pj.id, w.id AS workspace_id, w.template_id, wpb.template_version_preset_id
FROM provisioner_jobs pj
INNER JOIN workspace_prebuild_builds wpb ON wpb.job_id = pj.id
INNER JOIN workspaces w ON w.id = wpb.workspace_id
@@ -8711,7 +8794,13 @@ WHERE id IN (
AND pj.canceled_at IS NULL
AND pj.completed_at IS NULL
)
RETURNING id
UPDATE provisioner_jobs
SET
canceled_at = $1::timestamptz,
completed_at = $1::timestamptz
FROM jobs_to_cancel
WHERE provisioner_jobs.id = jobs_to_cancel.id
RETURNING jobs_to_cancel.id, jobs_to_cancel.workspace_id, jobs_to_cancel.template_id, jobs_to_cancel.template_version_preset_id
`
type UpdatePrebuildProvisionerJobWithCancelParams struct {
@@ -8719,22 +8808,34 @@ type UpdatePrebuildProvisionerJobWithCancelParams struct {
PresetID uuid.NullUUID `db:"preset_id" json:"preset_id"`
}
type UpdatePrebuildProvisionerJobWithCancelRow struct {
ID uuid.UUID `db:"id" json:"id"`
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
TemplateVersionPresetID uuid.NullUUID `db:"template_version_preset_id" json:"template_version_preset_id"`
}
// Cancels all pending provisioner jobs for prebuilt workspaces on a specific preset from an
// inactive template version.
// This is an optimization to clean up stale pending jobs.
func (q *sqlQuerier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg UpdatePrebuildProvisionerJobWithCancelParams) ([]uuid.UUID, error) {
func (q *sqlQuerier) UpdatePrebuildProvisionerJobWithCancel(ctx context.Context, arg UpdatePrebuildProvisionerJobWithCancelParams) ([]UpdatePrebuildProvisionerJobWithCancelRow, error) {
rows, err := q.db.QueryContext(ctx, updatePrebuildProvisionerJobWithCancel, arg.Now, arg.PresetID)
if err != nil {
return nil, err
}
defer rows.Close()
var items []uuid.UUID
var items []UpdatePrebuildProvisionerJobWithCancelRow
for rows.Next() {
var id uuid.UUID
if err := rows.Scan(&id); err != nil {
var i UpdatePrebuildProvisionerJobWithCancelRow
if err := rows.Scan(
&i.ID,
&i.WorkspaceID,
&i.TemplateID,
&i.TemplateVersionPresetID,
); err != nil {
return nil, err
}
items = append(items, id)
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
@@ -21826,7 +21927,7 @@ func (q *sqlQuerier) GetWorkspaceACLByID(ctx context.Context, id uuid.UUID) (Get
const getWorkspaceByAgentID = `-- name: GetWorkspaceByAgentID :one
SELECT
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
FROM
workspaces_expanded as workspaces
WHERE
@@ -21887,13 +21988,14 @@ func (q *sqlQuerier) GetWorkspaceByAgentID(ctx context.Context, agentID uuid.UUI
&i.TemplateDisplayName,
&i.TemplateIcon,
&i.TemplateDescription,
&i.TaskID,
)
return i, err
}
const getWorkspaceByID = `-- name: GetWorkspaceByID :one
SELECT
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
FROM
workspaces_expanded
WHERE
@@ -21935,13 +22037,14 @@ func (q *sqlQuerier) GetWorkspaceByID(ctx context.Context, id uuid.UUID) (Worksp
&i.TemplateDisplayName,
&i.TemplateIcon,
&i.TemplateDescription,
&i.TaskID,
)
return i, err
}
const getWorkspaceByOwnerIDAndName = `-- name: GetWorkspaceByOwnerIDAndName :one
SELECT
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
FROM
workspaces_expanded as workspaces
WHERE
@@ -21990,13 +22093,14 @@ func (q *sqlQuerier) GetWorkspaceByOwnerIDAndName(ctx context.Context, arg GetWo
&i.TemplateDisplayName,
&i.TemplateIcon,
&i.TemplateDescription,
&i.TaskID,
)
return i, err
}
const getWorkspaceByResourceID = `-- name: GetWorkspaceByResourceID :one
SELECT
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
FROM
workspaces_expanded as workspaces
WHERE
@@ -22052,13 +22156,14 @@ func (q *sqlQuerier) GetWorkspaceByResourceID(ctx context.Context, resourceID uu
&i.TemplateDisplayName,
&i.TemplateIcon,
&i.TemplateDescription,
&i.TaskID,
)
return i, err
}
const getWorkspaceByWorkspaceAppID = `-- name: GetWorkspaceByWorkspaceAppID :one
SELECT
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, group_acl, user_acl, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description, task_id
FROM
workspaces_expanded as workspaces
WHERE
@@ -22126,6 +22231,7 @@ func (q *sqlQuerier) GetWorkspaceByWorkspaceAppID(ctx context.Context, workspace
&i.TemplateDisplayName,
&i.TemplateIcon,
&i.TemplateDescription,
&i.TaskID,
)
return i, err
}
@@ -22175,7 +22281,7 @@ SELECT
),
filtered_workspaces AS (
SELECT
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl, workspaces.owner_avatar_url, workspaces.owner_username, workspaces.owner_name, workspaces.organization_name, workspaces.organization_display_name, workspaces.organization_icon, workspaces.organization_description, workspaces.template_name, workspaces.template_display_name, workspaces.template_icon, workspaces.template_description,
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.group_acl, workspaces.user_acl, workspaces.owner_avatar_url, workspaces.owner_username, workspaces.owner_name, workspaces.organization_name, workspaces.organization_display_name, workspaces.organization_icon, workspaces.organization_description, workspaces.template_name, workspaces.template_display_name, workspaces.template_icon, workspaces.template_description, workspaces.task_id,
latest_build.template_version_id,
latest_build.template_version_name,
latest_build.completed_at as latest_build_completed_at,
@@ -22466,7 +22572,7 @@ WHERE
-- @authorize_filter
), filtered_workspaces_order AS (
SELECT
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.group_acl, fw.user_acl, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_ai_task, fw.latest_build_has_external_agent
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.group_acl, fw.user_acl, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.task_id, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_ai_task, fw.latest_build_has_external_agent
FROM
filtered_workspaces fw
ORDER BY
@@ -22487,7 +22593,7 @@ WHERE
$25
), filtered_workspaces_order_with_summary AS (
SELECT
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.group_acl, fwo.user_acl, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_ai_task, fwo.latest_build_has_external_agent
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.group_acl, fwo.user_acl, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.task_id, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_ai_task, fwo.latest_build_has_external_agent
FROM
filtered_workspaces_order fwo
-- Return a technical summary row with total count of workspaces.
@@ -22523,6 +22629,7 @@ WHERE
'', -- template_display_name
'', -- template_icon
'', -- template_description
'00000000-0000-0000-0000-000000000000'::uuid, -- task_id
-- Extra columns added to ` + "`" + `filtered_workspaces` + "`" + `
'00000000-0000-0000-0000-000000000000'::uuid, -- template_version_id
'', -- template_version_name
@@ -22542,7 +22649,7 @@ WHERE
filtered_workspaces
)
SELECT
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.group_acl, fwos.user_acl, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_ai_task, fwos.latest_build_has_external_agent,
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.group_acl, fwos.user_acl, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.task_id, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_ai_task, fwos.latest_build_has_external_agent,
tc.count
FROM
filtered_workspaces_order_with_summary fwos
@@ -22610,6 +22717,7 @@ type GetWorkspacesRow struct {
TemplateDisplayName string `db:"template_display_name" json:"template_display_name"`
TemplateIcon string `db:"template_icon" json:"template_icon"`
TemplateDescription string `db:"template_description" json:"template_description"`
TaskID uuid.NullUUID `db:"task_id" json:"task_id"`
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
TemplateVersionName sql.NullString `db:"template_version_name" json:"template_version_name"`
LatestBuildCompletedAt sql.NullTime `db:"latest_build_completed_at" json:"latest_build_completed_at"`
@@ -22692,6 +22800,7 @@ func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams)
&i.TemplateDisplayName,
&i.TemplateIcon,
&i.TemplateDescription,
&i.TaskID,
&i.TemplateVersionID,
&i.TemplateVersionName,
&i.LatestBuildCompletedAt,
+53 -7
View File
@@ -300,12 +300,8 @@ GROUP BY wpb.template_version_preset_id;
-- Cancels all pending provisioner jobs for prebuilt workspaces on a specific preset from an
-- inactive template version.
-- This is an optimization to clean up stale pending jobs.
UPDATE provisioner_jobs
SET
canceled_at = @now::timestamptz,
completed_at = @now::timestamptz
WHERE id IN (
SELECT pj.id
WITH jobs_to_cancel AS (
SELECT pj.id, w.id AS workspace_id, w.template_id, wpb.template_version_preset_id
FROM provisioner_jobs pj
INNER JOIN workspace_prebuild_builds wpb ON wpb.job_id = pj.id
INNER JOIN workspaces w ON w.id = wpb.workspace_id
@@ -324,4 +320,54 @@ WHERE id IN (
AND pj.canceled_at IS NULL
AND pj.completed_at IS NULL
)
RETURNING id;
UPDATE provisioner_jobs
SET
canceled_at = @now::timestamptz,
completed_at = @now::timestamptz
FROM jobs_to_cancel
WHERE provisioner_jobs.id = jobs_to_cancel.id
RETURNING jobs_to_cancel.id, jobs_to_cancel.workspace_id, jobs_to_cancel.template_id, jobs_to_cancel.template_version_preset_id;
-- name: GetOrganizationsWithPrebuildStatus :many
-- GetOrganizationsWithPrebuildStatus returns organizations with prebuilds configured and their
-- membership status for the prebuilds system user (org membership, group existence, group membership).
WITH orgs_with_prebuilds AS (
-- Get unique organizations that have presets with prebuilds configured
SELECT DISTINCT o.id, o.name
FROM organizations o
INNER JOIN templates t ON t.organization_id = o.id
INNER JOIN template_versions tv ON tv.template_id = t.id
INNER JOIN template_version_presets tvp ON tvp.template_version_id = tv.id
WHERE tvp.desired_instances IS NOT NULL
),
prebuild_user_membership AS (
-- Check if the user is a member of the organizations
SELECT om.organization_id
FROM organization_members om
INNER JOIN orgs_with_prebuilds owp ON owp.id = om.organization_id
WHERE om.user_id = @user_id::uuid
),
prebuild_groups AS (
-- Check if the organizations have the prebuilds group
SELECT g.organization_id, g.id as group_id
FROM groups g
INNER JOIN orgs_with_prebuilds owp ON owp.id = g.organization_id
WHERE g.name = @group_name::text
),
prebuild_group_membership AS (
-- Check if the user is in the prebuilds group
SELECT pg.organization_id
FROM prebuild_groups pg
INNER JOIN group_members gm ON gm.group_id = pg.group_id
WHERE gm.user_id = @user_id::uuid
)
SELECT
owp.id AS organization_id,
owp.name AS organization_name,
(pum.organization_id IS NOT NULL)::boolean AS has_prebuild_user,
pg.group_id AS prebuilds_group_id,
(pgm.organization_id IS NOT NULL)::boolean AS has_prebuild_user_in_group
FROM orgs_with_prebuilds owp
LEFT JOIN prebuild_groups pg ON pg.organization_id = owp.id
LEFT JOIN prebuild_user_membership pum ON pum.organization_id = owp.id
LEFT JOIN prebuild_group_membership pgm ON pgm.organization_id = owp.id;
+1
View File
@@ -457,6 +457,7 @@ WHERE
'', -- template_display_name
'', -- template_icon
'', -- template_description
'00000000-0000-0000-0000-000000000000'::uuid, -- task_id
-- Extra columns added to `filtered_workspaces`
'00000000-0000-0000-0000-000000000000'::uuid, -- template_version_id
'', -- template_version_name
+6 -1
View File
@@ -37,13 +37,18 @@ type ReconciliationOrchestrator interface {
TrackResourceReplacement(ctx context.Context, workspaceID, buildID uuid.UUID, replacements []*sdkproto.ResourceReplacement)
}
// ReconcileStats contains statistics about a reconciliation cycle.
type ReconcileStats struct {
Elapsed time.Duration
}
type Reconciler interface {
StateSnapshotter
// ReconcileAll orchestrates the reconciliation of all prebuilds across all templates.
// It takes a global snapshot of the system state and then reconciles each preset
// in parallel, creating or deleting prebuilds as needed to reach their desired states.
ReconcileAll(ctx context.Context) error
ReconcileAll(ctx context.Context) (ReconcileStats, error)
}
// StateSnapshotter defines the operations necessary to capture workspace prebuilds state.
+5 -1
View File
@@ -17,7 +17,11 @@ func (NoopReconciler) Run(context.Context) {}
func (NoopReconciler) Stop(context.Context, error) {}
func (NoopReconciler) TrackResourceReplacement(context.Context, uuid.UUID, uuid.UUID, []*sdkproto.ResourceReplacement) {
}
func (NoopReconciler) ReconcileAll(context.Context) error { return nil }
func (NoopReconciler) ReconcileAll(context.Context) (ReconcileStats, error) {
return ReconcileStats{}, nil
}
func (NoopReconciler) SnapshotState(context.Context, database.Store) (*GlobalSnapshot, error) {
return &GlobalSnapshot{}, nil
}
@@ -2278,6 +2278,14 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
if err != nil {
return xerrors.Errorf("update workspace deleted: %w", err)
}
if workspace.TaskID.Valid {
if _, err := db.DeleteTask(ctx, database.DeleteTaskParams{
ID: workspace.TaskID.UUID,
DeletedAt: dbtime.Now(),
}); err != nil && !errors.Is(err, sql.ErrNoRows) {
return xerrors.Errorf("delete task related to workspace: %w", err)
}
}
return nil
}, nil)
+9 -1
View File
@@ -335,6 +335,15 @@ func (api *API) postWorkspaceBuilds(rw http.ResponseWriter, r *http.Request) {
return
}
// We want to allow a delete build for a deleted workspace, but not a start or stop build.
if workspace.Deleted && createBuild.Transition != codersdk.WorkspaceTransitionDelete {
httpapi.Write(ctx, rw, http.StatusConflict, codersdk.Response{
Message: fmt.Sprintf("Cannot %s a deleted workspace!", createBuild.Transition),
Detail: "This workspace has been deleted and cannot be modified.",
})
return
}
apiBuild, err := api.postWorkspaceBuildsInternal(
ctx,
apiKey,
@@ -1219,7 +1228,6 @@ func (api *API) convertWorkspaceBuild(
TemplateVersionPresetID: presetID,
HasAITask: hasAITask,
AITaskSidebarAppID: taskAppID,
TaskAppID: taskAppID,
HasExternalAgent: hasExternalAgent,
}, nil
}
+62
View File
@@ -1840,6 +1840,68 @@ func TestPostWorkspaceBuild(t *testing.T) {
require.NoError(t, err)
require.Equal(t, codersdk.BuildReasonDashboard, build.Reason)
})
t.Run("DeletedWorkspace", func(t *testing.T) {
t.Parallel()
// Given: a workspace that has already been deleted
var (
ctx = testutil.Context(t, testutil.WaitShort)
logger = slogtest.Make(t, &slogtest.Options{}).Leveled(slog.LevelError)
adminClient, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
Logger: &logger,
})
admin = coderdtest.CreateFirstUser(t, adminClient)
workspaceOwnerClient, member1 = coderdtest.CreateAnotherUser(t, adminClient, admin.OrganizationID)
otherMemberClient, _ = coderdtest.CreateAnotherUser(t, adminClient, admin.OrganizationID)
ws = dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{OwnerID: member1.ID, OrganizationID: admin.OrganizationID}).
Seed(database.WorkspaceBuild{Transition: database.WorkspaceTransitionDelete}).
Do()
)
// This needs to be done separately as provisionerd handles marking the workspace as deleted
// and we're skipping provisionerd here for speed.
require.NoError(t, db.UpdateWorkspaceDeletedByID(dbauthz.AsProvisionerd(ctx), database.UpdateWorkspaceDeletedByIDParams{
ID: ws.Workspace.ID,
Deleted: true,
}))
// Assert test invariant: Workspace should be deleted
dbWs, err := db.GetWorkspaceByID(dbauthz.AsProvisionerd(ctx), ws.Workspace.ID)
require.NoError(t, err)
require.True(t, dbWs.Deleted, "workspace should be deleted")
for _, tc := range []struct {
user *codersdk.Client
tr codersdk.WorkspaceTransition
expectStatus int
}{
// You should not be allowed to mess with a workspace you don't own, regardless of its deleted state.
{otherMemberClient, codersdk.WorkspaceTransitionStart, http.StatusNotFound},
{otherMemberClient, codersdk.WorkspaceTransitionStop, http.StatusNotFound},
{otherMemberClient, codersdk.WorkspaceTransitionDelete, http.StatusNotFound},
// Starting or stopping a workspace is not allowed when it is deleted.
{workspaceOwnerClient, codersdk.WorkspaceTransitionStart, http.StatusConflict},
{workspaceOwnerClient, codersdk.WorkspaceTransitionStop, http.StatusConflict},
// We allow a delete just in case a retry is required. In most cases, this will be a no-op.
// Note: this is the last test case because it will change the state of the workspace.
{workspaceOwnerClient, codersdk.WorkspaceTransitionDelete, http.StatusOK},
} {
// When: we create a workspace build with the given transition
_, err = tc.user.CreateWorkspaceBuild(ctx, ws.Workspace.ID, codersdk.CreateWorkspaceBuildRequest{
Transition: tc.tr,
})
// Then: we allow ONLY a delete build for a deleted workspace.
if tc.expectStatus < http.StatusBadRequest {
require.NoError(t, err, "creating a %s build for a deleted workspace should not error", tc.tr)
} else {
var apiError *codersdk.Error
require.Error(t, err, "creating a %s build for a deleted workspace should return an error", tc.tr)
require.ErrorAs(t, err, &apiError)
require.Equal(t, tc.expectStatus, apiError.StatusCode())
}
}
})
}
func TestWorkspaceBuildTimings(t *testing.T) {
+1
View File
@@ -2654,6 +2654,7 @@ func convertWorkspace(
Favorite: requesterFavorite,
NextStartAt: nextStartAt,
IsPrebuild: workspace.IsPrebuild(),
TaskID: workspace.TaskID,
}, nil
}
+1 -1
View File
@@ -391,7 +391,7 @@ func (i *InstanceIdentitySessionTokenProvider) GetSessionToken() string {
defer cancel()
resp, err := i.TokenExchanger.exchange(ctx)
if err != nil {
i.logger.Error(ctx, "failed to exchange session token: %v", err)
i.logger.Error(ctx, "failed to exchange session token", slog.Error(err))
return ""
}
i.sessionToken = resp.SessionToken
+2 -2
View File
@@ -113,8 +113,8 @@ func (f AIBridgeListInterceptionsFilter) asRequestOption() RequestOption {
// AIBridgeListInterceptions returns AIBridge interceptions with the given
// filter.
func (c *ExperimentalClient) AIBridgeListInterceptions(ctx context.Context, filter AIBridgeListInterceptionsFilter) (AIBridgeListInterceptionsResponse, error) {
res, err := c.Request(ctx, http.MethodGet, "/api/experimental/aibridge/interceptions", nil, filter.asRequestOption(), filter.Pagination.asRequestOption(), filter.Pagination.asRequestOption())
func (c *Client) AIBridgeListInterceptions(ctx context.Context, filter AIBridgeListInterceptionsFilter) (AIBridgeListInterceptionsResponse, error) {
res, err := c.Request(ctx, http.MethodGet, "/api/v2/aibridge/interceptions", nil, filter.asRequestOption(), filter.Pagination.asRequestOption(), filter.Pagination.asRequestOption())
if err != nil {
return AIBridgeListInterceptionsResponse{}, err
}
+1 -15
View File
@@ -3241,14 +3241,13 @@ Write out the current server config as YAML to stdout.`,
// AIBridge Options
{
Name: "AIBridge Enabled",
Description: fmt.Sprintf("Whether to start an in-memory aibridged instance (%q experiment must be enabled, too).", ExperimentAIBridge),
Description: "Whether to start an in-memory aibridged instance.",
Flag: "aibridge-enabled",
Env: "CODER_AIBRIDGE_ENABLED",
Value: &c.AI.BridgeConfig.Enabled,
Default: "false",
Group: &deploymentGroupAIBridge,
YAML: "enabled",
Hidden: true,
},
{
Name: "AIBridge OpenAI Base URL",
@@ -3259,7 +3258,6 @@ Write out the current server config as YAML to stdout.`,
Default: "https://api.openai.com/v1/",
Group: &deploymentGroupAIBridge,
YAML: "openai_base_url",
Hidden: true,
},
{
Name: "AIBridge OpenAI Key",
@@ -3270,7 +3268,6 @@ Write out the current server config as YAML to stdout.`,
Default: "",
Group: &deploymentGroupAIBridge,
YAML: "openai_key",
Hidden: true,
},
{
Name: "AIBridge Anthropic Base URL",
@@ -3281,7 +3278,6 @@ Write out the current server config as YAML to stdout.`,
Default: "https://api.anthropic.com/",
Group: &deploymentGroupAIBridge,
YAML: "anthropic_base_url",
Hidden: true,
},
{
Name: "AIBridge Anthropic Key",
@@ -3292,7 +3288,6 @@ Write out the current server config as YAML to stdout.`,
Default: "",
Group: &deploymentGroupAIBridge,
YAML: "anthropic_key",
Hidden: true,
},
{
Name: "AIBridge Bedrock Region",
@@ -3303,7 +3298,6 @@ Write out the current server config as YAML to stdout.`,
Default: "",
Group: &deploymentGroupAIBridge,
YAML: "bedrock_region",
Hidden: true,
},
{
Name: "AIBridge Bedrock Access Key",
@@ -3314,7 +3308,6 @@ Write out the current server config as YAML to stdout.`,
Default: "",
Group: &deploymentGroupAIBridge,
YAML: "bedrock_access_key",
Hidden: true,
},
{
Name: "AIBridge Bedrock Access Key Secret",
@@ -3325,7 +3318,6 @@ Write out the current server config as YAML to stdout.`,
Default: "",
Group: &deploymentGroupAIBridge,
YAML: "bedrock_access_key_secret",
Hidden: true,
},
{
Name: "AIBridge Bedrock Model",
@@ -3336,7 +3328,6 @@ Write out the current server config as YAML to stdout.`,
Default: "global.anthropic.claude-sonnet-4-5-20250929-v1:0", // See https://docs.claude.com/en/api/claude-on-amazon-bedrock#accessing-bedrock.
Group: &deploymentGroupAIBridge,
YAML: "bedrock_model",
Hidden: true,
},
{
Name: "AIBridge Bedrock Small Fast Model",
@@ -3347,7 +3338,6 @@ Write out the current server config as YAML to stdout.`,
Default: "global.anthropic.claude-haiku-4-5-20251001-v1:0", // See https://docs.claude.com/en/api/claude-on-amazon-bedrock#accessing-bedrock.
Group: &deploymentGroupAIBridge,
YAML: "bedrock_small_fast_model",
Hidden: true,
},
{
Name: "Enable Authorization Recordings",
@@ -3645,7 +3635,6 @@ const (
ExperimentOAuth2 Experiment = "oauth2" // Enables OAuth2 provider functionality.
ExperimentMCPServerHTTP Experiment = "mcp-server-http" // Enables the MCP HTTP server functionality.
ExperimentWorkspaceSharing Experiment = "workspace-sharing" // Enables updating workspace ACLs for sharing with users and groups.
ExperimentAIBridge Experiment = "aibridge" // Enables AI Bridge functionality.
)
func (e Experiment) DisplayName() string {
@@ -3666,8 +3655,6 @@ func (e Experiment) DisplayName() string {
return "MCP HTTP Server Functionality"
case ExperimentWorkspaceSharing:
return "Workspace Sharing"
case ExperimentAIBridge:
return "AI Bridge"
default:
// Split on hyphen and convert to title case
// e.g. "web-push" -> "Web Push", "mcp-server-http" -> "Mcp Server Http"
@@ -3686,7 +3673,6 @@ var ExperimentsKnown = Experiments{
ExperimentOAuth2,
ExperimentMCPServerHTTP,
ExperimentWorkspaceSharing,
ExperimentAIBridge,
}
// ExperimentsSafe should include all experiments that are safe for
+2 -2
View File
@@ -851,7 +851,7 @@ func TestTools(t *testing.T) {
TemplateVersionID: r.TemplateVersion.ID.String(),
Input: "do yet another barrel roll",
},
error: "Template does not have required parameter \"AI Prompt\"",
error: "Template does not have a valid \"coder_ai_task\" resource.",
},
{
name: "WithPreset",
@@ -860,7 +860,7 @@ func TestTools(t *testing.T) {
TemplateVersionPresetID: presetID.String(),
Input: "not enough barrel rolls",
},
error: "Template does not have required parameter \"AI Prompt\"",
error: "Template does not have a valid \"coder_ai_task\" resource.",
},
}
+1 -2
View File
@@ -89,9 +89,8 @@ type WorkspaceBuild struct {
MatchedProvisioners *MatchedProvisioners `json:"matched_provisioners,omitempty"`
TemplateVersionPresetID *uuid.UUID `json:"template_version_preset_id" format:"uuid"`
HasAITask *bool `json:"has_ai_task,omitempty"`
// Deprecated: This field has been replaced with `TaskAppID`
// Deprecated: This field has been replaced with `Task.WorkspaceAppID`
AITaskSidebarAppID *uuid.UUID `json:"ai_task_sidebar_app_id,omitempty" format:"uuid"`
TaskAppID *uuid.UUID `json:"task_app_id,omitempty" format:"uuid"`
HasExternalAgent *bool `json:"has_external_agent,omitempty"`
}
+2
View File
@@ -72,6 +72,8 @@ type Workspace struct {
// Once a prebuilt workspace is claimed by a user, it transitions to a regular workspace,
// and IsPrebuild returns false.
IsPrebuild bool `json:"is_prebuild"`
// TaskID, if set, indicates that the workspace is relevant to the given codersdk.Task.
TaskID uuid.NullUUID `json:"task_id,omitempty"`
}
func (w Workspace) FullName() string {
+6 -12
View File
@@ -1,8 +1,5 @@
# AI Bridge
> [!NOTE]
> AI Bridge is currently an _experimental_ feature.
![AI bridge diagram](../images/aibridge/aibridge_diagram.png)
Bridge is a smart proxy for AI. It acts as a man-in-the-middle between your users' coding agents / IDEs
@@ -45,17 +42,14 @@ Bridge runs inside the Coder control plane, requiring no separate compute to dep
### Activation
To enable this feature, activate the `aibridge` experiment using an environment variable or a CLI flag.
Additionally, you will need to enable Bridge explicitly:
You will need to enable AI Bridge explicitly:
```sh
CODER_EXPERIMENTS="aibridge" CODER_AIBRIDGE_ENABLED=true coder server
CODER_AIBRIDGE_ENABLED=true coder server
# or
coder server --experiments=aibridge --aibridge-enabled=true
coder server --aibridge-enabled=true
```
_If you have other experiments enabled, separate them by commas._
### Providers
Bridge currently supports OpenAI and Anthropic APIs.
@@ -89,8 +83,8 @@ Once AI Bridge is enabled on the server, your users need to configure their AI c
The exact configuration method varies by client — some use environment variables, others use configuration files or UI settings:
- **OpenAI-compatible clients**: Set the base URL (commonly via the `OPENAI_BASE_URL` environment variable) to `https://coder.example.com/api/experimental/aibridge/openai/v1`
- **Anthropic-compatible clients**: Set the base URL (commonly via the `ANTHROPIC_BASE_URL` environment variable) to `https://coder.example.com/api/experimental/aibridge/anthropic`
- **OpenAI-compatible clients**: Set the base URL (commonly via the `OPENAI_BASE_URL` environment variable) to `https://coder.example.com/api/v2/aibridge/openai/v1`
- **Anthropic-compatible clients**: Set the base URL (commonly via the `ANTHROPIC_BASE_URL` environment variable) to `https://coder.example.com/api/v2/aibridge/anthropic`
Replace `coder.example.com` with your actual Coder deployment URL.
@@ -133,7 +127,7 @@ All of these records are associated to an "interception" record, which maps 1:1
These logs can be used to determine usage patterns, track costs, and evaluate tooling adoption.
This data is currently accessible through the API and CLI (experimental), which we advise administrators export to their observability platform of choice. We've configured a Grafana dashboard to display Claude Code usage internally which can be imported as a starting point for your tooling adoption metrics.
This data is currently accessible through the API and CLI, which we advise administrators export to their observability platform of choice. We've configured a Grafana dashboard to display Claude Code usage internally which can be imported as a starting point for your tooling adoption metrics.
![User Leaderboard](../images/aibridge/grafana_user_leaderboard.png)
+15
View File
@@ -1180,6 +1180,21 @@
"path": "./reference/cli/index.md",
"icon_path": "./images/icons/terminal.svg",
"children": [
{
"title": "aibridge",
"description": "Manage AIBridge.",
"path": "reference/cli/aibridge.md"
},
{
"title": "aibridge interceptions",
"description": "Manage AIBridge interceptions.",
"path": "reference/cli/aibridge_interceptions.md"
},
{
"title": "aibridge interceptions list",
"description": "List AIBridge interceptions as JSON.",
"path": "reference/cli/aibridge_interceptions_list.md"
},
{
"title": "autoupdate",
"description": "Toggle auto-update policy for a workspace",
+2 -2
View File
@@ -6,12 +6,12 @@
```shell
# Example request using curl
curl -X GET http://coder-server:8080/api/v2/api/experimental/aibridge/interceptions \
curl -X GET http://coder-server:8080/api/v2/aibridge/interceptions \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
`GET /api/experimental/aibridge/interceptions`
`GET /aibridge/interceptions`
### Parameters
+1 -7
View File
@@ -222,7 +222,6 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/workspace/{workspacenam
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -464,7 +463,6 @@ curl -X GET http://coder-server:8080/api/v2/workspacebuilds/{workspacebuild} \
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -1197,7 +1195,6 @@ curl -X GET http://coder-server:8080/api/v2/workspacebuilds/{workspacebuild}/sta
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -1512,7 +1509,6 @@ curl -X GET http://coder-server:8080/api/v2/workspaces/{workspace}/builds \
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -1540,7 +1536,7 @@ Status Code **200**
| Name | Type | Required | Restrictions | Description |
|----------------------------------|--------------------------------------------------------------------------------------------------------|----------|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `[array item]` | array | false | | |
| `» ai_task_sidebar_app_id` | string(uuid) | false | | Deprecated: This field has been replaced with `TaskAppID` |
| `» ai_task_sidebar_app_id` | string(uuid) | false | | Deprecated: This field has been replaced with `Task.WorkspaceAppID` |
| `» build_number` | integer | false | | |
| `» created_at` | string(date-time) | false | | |
| `» daily_cost` | integer | false | | |
@@ -1691,7 +1687,6 @@ Status Code **200**
| `»» type` | string | false | | |
| `»» workspace_transition` | [codersdk.WorkspaceTransition](schemas.md#codersdkworkspacetransition) | false | | |
| `» status` | [codersdk.WorkspaceStatus](schemas.md#codersdkworkspacestatus) | false | | |
| `» task_app_id` | string(uuid) | false | | |
| `» template_version_id` | string(uuid) | false | | |
| `» template_version_name` | string | false | | |
| `» template_version_preset_id` | string(uuid) | false | | |
@@ -2013,7 +2008,6 @@ curl -X POST http://coder-server:8080/api/v2/workspaces/{workspace}/builds \
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
+10 -6
View File
@@ -4059,7 +4059,6 @@ CreateWorkspaceRequest provides options for creating a new workspace. Only one o
| `oauth2` |
| `mcp-server-http` |
| `workspace-sharing` |
| `aibridge` |
## codersdk.ExternalAPIKeyScopes
@@ -10165,7 +10164,6 @@ If the schedule is empty, the user will be updated to use the default schedule.|
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -10185,6 +10183,10 @@ If the schedule is empty, the user will be updated to use the default schedule.|
"owner_avatar_url": "string",
"owner_id": "8826ee2e-7933-4665-aef2-2393f84a0d05",
"owner_name": "string",
"task_id": {
"uuid": "string",
"valid": true
},
"template_active_version_id": "b0da9c29-67d8-4c87-888c-bafe356f7f3c",
"template_allow_user_cancel_workspace_jobs": true,
"template_display_name": "string",
@@ -10223,6 +10225,7 @@ If the schedule is empty, the user will be updated to use the default schedule.|
| `owner_avatar_url` | string | false | | |
| `owner_id` | string | false | | |
| `owner_name` | string | false | | Owner name is the username of the owner of the workspace. |
| `task_id` | [uuid.NullUUID](#uuidnulluuid) | false | | Task ID if set, indicates that the workspace is relevant to the given codersdk.Task. |
| `template_active_version_id` | string | false | | |
| `template_allow_user_cancel_workspace_jobs` | boolean | false | | |
| `template_display_name` | string | false | | |
@@ -11335,7 +11338,6 @@ If the schedule is empty, the user will be updated to use the default schedule.|
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -11353,7 +11355,7 @@ If the schedule is empty, the user will be updated to use the default schedule.|
| Name | Type | Required | Restrictions | Description |
|------------------------------|-------------------------------------------------------------------|----------|--------------|---------------------------------------------------------------------|
| `ai_task_sidebar_app_id` | string | false | | Deprecated: This field has been replaced with `TaskAppID` |
| `ai_task_sidebar_app_id` | string | false | | Deprecated: This field has been replaced with `Task.WorkspaceAppID` |
| `build_number` | integer | false | | |
| `created_at` | string | false | | |
| `daily_cost` | integer | false | | |
@@ -11369,7 +11371,6 @@ If the schedule is empty, the user will be updated to use the default schedule.|
| `reason` | [codersdk.BuildReason](#codersdkbuildreason) | false | | |
| `resources` | array of [codersdk.WorkspaceResource](#codersdkworkspaceresource) | false | | |
| `status` | [codersdk.WorkspaceStatus](#codersdkworkspacestatus) | false | | |
| `task_app_id` | string | false | | |
| `template_version_id` | string | false | | |
| `template_version_name` | string | false | | |
| `template_version_preset_id` | string | false | | |
@@ -12159,7 +12160,6 @@ If the schedule is empty, the user will be updated to use the default schedule.|
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -12179,6 +12179,10 @@ If the schedule is empty, the user will be updated to use the default schedule.|
"owner_avatar_url": "string",
"owner_id": "8826ee2e-7933-4665-aef2-2393f84a0d05",
"owner_name": "string",
"task_id": {
"uuid": "string",
"valid": true
},
"template_active_version_id": "b0da9c29-67d8-4c87-888c-bafe356f7f3c",
"template_allow_user_cancel_workspace_jobs": true,
"template_display_name": "string",
+24 -6
View File
@@ -277,7 +277,6 @@ of the template will be used.
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -297,6 +296,10 @@ of the template will be used.
"owner_avatar_url": "string",
"owner_id": "8826ee2e-7933-4665-aef2-2393f84a0d05",
"owner_name": "string",
"task_id": {
"uuid": "string",
"valid": true
},
"template_active_version_id": "b0da9c29-67d8-4c87-888c-bafe356f7f3c",
"template_allow_user_cancel_workspace_jobs": true,
"template_display_name": "string",
@@ -569,7 +572,6 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/workspace/{workspacenam
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -589,6 +591,10 @@ curl -X GET http://coder-server:8080/api/v2/users/{user}/workspace/{workspacenam
"owner_avatar_url": "string",
"owner_id": "8826ee2e-7933-4665-aef2-2393f84a0d05",
"owner_name": "string",
"task_id": {
"uuid": "string",
"valid": true
},
"template_active_version_id": "b0da9c29-67d8-4c87-888c-bafe356f7f3c",
"template_allow_user_cancel_workspace_jobs": true,
"template_display_name": "string",
@@ -886,7 +892,6 @@ of the template will be used.
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -906,6 +911,10 @@ of the template will be used.
"owner_avatar_url": "string",
"owner_id": "8826ee2e-7933-4665-aef2-2393f84a0d05",
"owner_name": "string",
"task_id": {
"uuid": "string",
"valid": true
},
"template_active_version_id": "b0da9c29-67d8-4c87-888c-bafe356f7f3c",
"template_allow_user_cancel_workspace_jobs": true,
"template_display_name": "string",
@@ -1164,7 +1173,6 @@ curl -X GET http://coder-server:8080/api/v2/workspaces \
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -1184,6 +1192,10 @@ curl -X GET http://coder-server:8080/api/v2/workspaces \
"owner_avatar_url": "string",
"owner_id": "8826ee2e-7933-4665-aef2-2393f84a0d05",
"owner_name": "string",
"task_id": {
"uuid": "string",
"valid": true
},
"template_active_version_id": "b0da9c29-67d8-4c87-888c-bafe356f7f3c",
"template_allow_user_cancel_workspace_jobs": true,
"template_display_name": "string",
@@ -1457,7 +1469,6 @@ curl -X GET http://coder-server:8080/api/v2/workspaces/{workspace} \
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -1477,6 +1488,10 @@ curl -X GET http://coder-server:8080/api/v2/workspaces/{workspace} \
"owner_avatar_url": "string",
"owner_id": "8826ee2e-7933-4665-aef2-2393f84a0d05",
"owner_name": "string",
"task_id": {
"uuid": "string",
"valid": true
},
"template_active_version_id": "b0da9c29-67d8-4c87-888c-bafe356f7f3c",
"template_allow_user_cancel_workspace_jobs": true,
"template_display_name": "string",
@@ -2009,7 +2024,6 @@ curl -X PUT http://coder-server:8080/api/v2/workspaces/{workspace}/dormant \
}
],
"status": "pending",
"task_app_id": "ca438251-3e16-4fae-b9ab-dd3c237c3735",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
@@ -2029,6 +2043,10 @@ curl -X PUT http://coder-server:8080/api/v2/workspaces/{workspace}/dormant \
"owner_avatar_url": "string",
"owner_id": "8826ee2e-7933-4665-aef2-2393f84a0d05",
"owner_name": "string",
"task_id": {
"uuid": "string",
"valid": true
},
"template_active_version_id": "b0da9c29-67d8-4c87-888c-bafe356f7f3c",
"template_allow_user_cancel_workspace_jobs": true,
"template_display_name": "string",
+16
View File
@@ -0,0 +1,16 @@
<!-- DO NOT EDIT | GENERATED CONTENT -->
# aibridge
Manage AIBridge.
## Usage
```console
coder aibridge
```
## Subcommands
| Name | Purpose |
|-----------------------------------------------------------|--------------------------------|
| [<code>interceptions</code>](./aibridge_interceptions.md) | Manage AIBridge interceptions. |
+16
View File
@@ -0,0 +1,16 @@
<!-- DO NOT EDIT | GENERATED CONTENT -->
# aibridge interceptions
Manage AIBridge interceptions.
## Usage
```console
coder aibridge interceptions
```
## Subcommands
| Name | Purpose |
|-------------------------------------------------------|--------------------------------------|
| [<code>list</code>](./aibridge_interceptions_list.md) | List AIBridge interceptions as JSON. |
+69
View File
@@ -0,0 +1,69 @@
<!-- DO NOT EDIT | GENERATED CONTENT -->
# aibridge interceptions list
List AIBridge interceptions as JSON.
## Usage
```console
coder aibridge interceptions list [flags]
```
## Options
### --initiator
| | |
|------|---------------------|
| Type | <code>string</code> |
Only return interceptions initiated by this user. Accepts a user ID, username, or "me".
### --started-before
| | |
|------|---------------------|
| Type | <code>string</code> |
Only return interceptions started before this time. Must be after 'started-after' if set. Accepts a time in the RFC 3339 format, e.g. "2006-01-02T15:04:05Z07:00".
### --started-after
| | |
|------|---------------------|
| Type | <code>string</code> |
Only return interceptions started after this time. Must be before 'started-before' if set. Accepts a time in the RFC 3339 format, e.g. "2006-01-02T15:04:05Z07:00".
### --provider
| | |
|------|---------------------|
| Type | <code>string</code> |
Only return interceptions from this provider.
### --model
| | |
|------|---------------------|
| Type | <code>string</code> |
Only return interceptions from this model.
### --after-id
| | |
|------|---------------------|
| Type | <code>string</code> |
The ID of the last result on the previous page to use as a pagination cursor.
### --limit
| | |
|---------|------------------|
| Type | <code>int</code> |
| Default | <code>100</code> |
The limit of results to return. Must be between 1 and 1000.
+1
View File
@@ -68,6 +68,7 @@ Coder — A tool for provisioning self-hosted development environments with Terr
| [<code>groups</code>](./groups.md) | Manage groups |
| [<code>prebuilds</code>](./prebuilds.md) | Manage Coder prebuilds |
| [<code>external-workspaces</code>](./external-workspaces.md) | Create or manage external workspaces |
| [<code>aibridge</code>](./aibridge.md) | Manage AIBridge. |
## Options
+105
View File
@@ -1647,3 +1647,108 @@ How often to reconcile workspace prebuilds state.
| Default | <code>false</code> |
Hide AI tasks from the dashboard.
### --aibridge-enabled
| | |
|-------------|--------------------------------------|
| Type | <code>bool</code> |
| Environment | <code>$CODER_AIBRIDGE_ENABLED</code> |
| YAML | <code>aibridge.enabled</code> |
| Default | <code>false</code> |
Whether to start an in-memory aibridged instance.
### --aibridge-openai-base-url
| | |
|-------------|----------------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_OPENAI_BASE_URL</code> |
| YAML | <code>aibridge.openai_base_url</code> |
| Default | <code>https://api.openai.com/v1/</code> |
The base URL of the OpenAI API.
### --aibridge-openai-key
| | |
|-------------|-----------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_OPENAI_KEY</code> |
| YAML | <code>aibridge.openai_key</code> |
The key to authenticate against the OpenAI API.
### --aibridge-anthropic-base-url
| | |
|-------------|-------------------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_ANTHROPIC_BASE_URL</code> |
| YAML | <code>aibridge.anthropic_base_url</code> |
| Default | <code>https://api.anthropic.com/</code> |
The base URL of the Anthropic API.
### --aibridge-anthropic-key
| | |
|-------------|--------------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_ANTHROPIC_KEY</code> |
| YAML | <code>aibridge.anthropic_key</code> |
The key to authenticate against the Anthropic API.
### --aibridge-bedrock-region
| | |
|-------------|---------------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_BEDROCK_REGION</code> |
| YAML | <code>aibridge.bedrock_region</code> |
The AWS Bedrock API region.
### --aibridge-bedrock-access-key
| | |
|-------------|-------------------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_BEDROCK_ACCESS_KEY</code> |
| YAML | <code>aibridge.bedrock_access_key</code> |
The access key to authenticate against the AWS Bedrock API.
### --aibridge-bedrock-access-key-secret
| | |
|-------------|--------------------------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_BEDROCK_ACCESS_KEY_SECRET</code> |
| YAML | <code>aibridge.bedrock_access_key_secret</code> |
The access key secret to use with the access key to authenticate against the AWS Bedrock API.
### --aibridge-bedrock-model
| | |
|-------------|---------------------------------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_BEDROCK_MODEL</code> |
| YAML | <code>aibridge.bedrock_model</code> |
| Default | <code>global.anthropic.claude-sonnet-4-5-20250929-v1:0</code> |
The model to use when making requests to the AWS Bedrock API.
### --aibridge-bedrock-small-fastmodel
| | |
|-------------|--------------------------------------------------------------|
| Type | <code>string</code> |
| Environment | <code>$CODER_AIBRIDGE_BEDROCK_SMALL_FAST_MODEL</code> |
| YAML | <code>aibridge.bedrock_small_fast_model</code> |
| Default | <code>global.anthropic.claude-haiku-4-5-20251001-v1:0</code> |
The small fast model to use when making requests to the AWS Bedrock API. Claude Code uses Haiku-class models to perform background tasks. See https://docs.claude.com/en/docs/claude-code/settings#environment-variables.
+2 -2
View File
@@ -11,8 +11,8 @@ RUN cargo install jj-cli typos-cli watchexec-cli
FROM ubuntu:jammy@sha256:4e0171b9275e12d375863f2b3ae9ce00a4c53ddda176bd55868df97ac6f21a6e AS go
# Install Go manually, so that we can control the version
ARG GO_VERSION=1.24.6
ARG GO_CHECKSUM="bbca37cc395c974ffa4893ee35819ad23ebb27426df87af92e93a9ec66ef8712"
ARG GO_VERSION=1.24.10
ARG GO_CHECKSUM="dd52b974e3d9c5a7bbfb222c685806def6be5d6f7efd10f9caa9ca1fa2f47955"
# Boring Go is needed to build FIPS-compliant binaries.
RUN apt-get update && \
+1 -1
View File
@@ -479,7 +479,7 @@ resource "coder_agent" "dev" {
dir = local.repo_dir
env = {
OIDC_TOKEN : data.coder_workspace_owner.me.oidc_access_token,
ANTHROPIC_BASE_URL : "https://dev.coder.com/api/experimental/aibridge/anthropic",
ANTHROPIC_BASE_URL : "https://dev.coder.com/api/v2/aibridge/anthropic",
ANTHROPIC_AUTH_TOKEN : data.coder_workspace_owner.me.session_token
}
startup_script_behavior = "blocking"
@@ -19,7 +19,7 @@ var _ io.Closer = &Server{}
// Server provides the AI Bridge functionality.
// It is responsible for:
// - receiving requests on /api/experimental/aibridged/* // TODO: update endpoint once out of experimental
// - receiving requests on /api/v2/aibridged/*
// - manipulating the requests
// - relaying requests to upstream AI services and relaying responses to caller
//
@@ -19,8 +19,8 @@ import (
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/externalauth"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/enterprise/aibridged"
"github.com/coder/coder/v2/enterprise/coderd/coderdenttest"
"github.com/coder/coder/v2/enterprise/x/aibridged"
"github.com/coder/coder/v2/testutil"
)
@@ -18,9 +18,9 @@ import (
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/aibridge"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/enterprise/x/aibridged"
mock "github.com/coder/coder/v2/enterprise/x/aibridged/aibridgedmock"
"github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridged"
mock "github.com/coder/coder/v2/enterprise/aibridged/aibridgedmock"
"github.com/coder/coder/v2/enterprise/aibridged/proto"
"github.com/coder/coder/v2/testutil"
)
@@ -1,9 +1,9 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/coder/coder/v2/enterprise/x/aibridged (interfaces: DRPCClient)
// Source: github.com/coder/coder/v2/enterprise/aibridged (interfaces: DRPCClient)
//
// Generated by this command:
//
// mockgen -destination ./clientmock.go -package aibridgedmock github.com/coder/coder/v2/enterprise/x/aibridged DRPCClient
// mockgen -destination ./clientmock.go -package aibridgedmock github.com/coder/coder/v2/enterprise/aibridged DRPCClient
//
// Package aibridgedmock is a generated GoMock package.
@@ -13,7 +13,7 @@ import (
context "context"
reflect "reflect"
proto "github.com/coder/coder/v2/enterprise/x/aibridged/proto"
proto "github.com/coder/coder/v2/enterprise/aibridged/proto"
gomock "go.uber.org/mock/gomock"
drpc "storj.io/drpc"
)
@@ -1,4 +1,4 @@
package aibridgedmock
//go:generate mockgen -destination ./clientmock.go -package aibridgedmock github.com/coder/coder/v2/enterprise/x/aibridged DRPCClient
//go:generate mockgen -destination ./poolmock.go -package aibridgedmock github.com/coder/coder/v2/enterprise/x/aibridged Pooler
//go:generate mockgen -destination ./clientmock.go -package aibridgedmock github.com/coder/coder/v2/enterprise/aibridged DRPCClient
//go:generate mockgen -destination ./poolmock.go -package aibridgedmock github.com/coder/coder/v2/enterprise/aibridged Pooler
@@ -1,9 +1,9 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/coder/coder/v2/enterprise/x/aibridged (interfaces: Pooler)
// Source: github.com/coder/coder/v2/enterprise/aibridged (interfaces: Pooler)
//
// Generated by this command:
//
// mockgen -destination ./poolmock.go -package aibridgedmock github.com/coder/coder/v2/enterprise/x/aibridged Pooler
// mockgen -destination ./poolmock.go -package aibridgedmock github.com/coder/coder/v2/enterprise/aibridged Pooler
//
// Package aibridgedmock is a generated GoMock package.
@@ -14,7 +14,7 @@ import (
http "net/http"
reflect "reflect"
aibridged "github.com/coder/coder/v2/enterprise/x/aibridged"
aibridged "github.com/coder/coder/v2/enterprise/aibridged"
gomock "go.uber.org/mock/gomock"
)
@@ -5,7 +5,7 @@ import (
"storj.io/drpc"
"github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridged/proto"
)
type Dialer func(ctx context.Context) (DRPCClient, error)
@@ -9,7 +9,7 @@ import (
"cdr.dev/slog"
"github.com/coder/aibridge"
"github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridged/proto"
)
var _ http.Handler = &Server{}
@@ -10,7 +10,7 @@ import (
"cdr.dev/slog"
"github.com/coder/aibridge/mcp"
"github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridged/proto"
)
var (
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridged/proto"
"github.com/coder/coder/v2/testutil"
)
@@ -13,8 +13,8 @@ import (
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/aibridge/mcp"
"github.com/coder/aibridge/mcpmock"
"github.com/coder/coder/v2/enterprise/x/aibridged"
mock "github.com/coder/coder/v2/enterprise/x/aibridged/aibridgedmock"
"github.com/coder/coder/v2/enterprise/aibridged"
mock "github.com/coder/coder/v2/enterprise/aibridged/aibridgedmock"
)
// TestPool validates the published behavior of [aibridged.CachedBridgePool].
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go-drpc. DO NOT EDIT.
// protoc-gen-go-drpc version: v0.0.34
// source: enterprise/x/aibridged/proto/aibridged.proto
// source: enterprise/aibridged/proto/aibridged.proto
package proto
@@ -13,25 +13,25 @@ import (
drpcerr "storj.io/drpc/drpcerr"
)
type drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto struct{}
type drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto struct{}
func (drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto) Marshal(msg drpc.Message) ([]byte, error) {
func (drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto) Marshal(msg drpc.Message) ([]byte, error) {
return proto.Marshal(msg.(proto.Message))
}
func (drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto) MarshalAppend(buf []byte, msg drpc.Message) ([]byte, error) {
func (drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto) MarshalAppend(buf []byte, msg drpc.Message) ([]byte, error) {
return proto.MarshalOptions{}.MarshalAppend(buf, msg.(proto.Message))
}
func (drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto) Unmarshal(buf []byte, msg drpc.Message) error {
func (drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto) Unmarshal(buf []byte, msg drpc.Message) error {
return proto.Unmarshal(buf, msg.(proto.Message))
}
func (drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto) JSONMarshal(msg drpc.Message) ([]byte, error) {
func (drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto) JSONMarshal(msg drpc.Message) ([]byte, error) {
return protojson.Marshal(msg.(proto.Message))
}
func (drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto) JSONUnmarshal(buf []byte, msg drpc.Message) error {
func (drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto) JSONUnmarshal(buf []byte, msg drpc.Message) error {
return protojson.Unmarshal(buf, msg.(proto.Message))
}
@@ -57,7 +57,7 @@ func (c *drpcRecorderClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcRecorderClient) RecordInterception(ctx context.Context, in *RecordInterceptionRequest) (*RecordInterceptionResponse, error) {
out := new(RecordInterceptionResponse)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordInterception", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}, in, out)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordInterception", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}, in, out)
if err != nil {
return nil, err
}
@@ -66,7 +66,7 @@ func (c *drpcRecorderClient) RecordInterception(ctx context.Context, in *RecordI
func (c *drpcRecorderClient) RecordInterceptionEnded(ctx context.Context, in *RecordInterceptionEndedRequest) (*RecordInterceptionEndedResponse, error) {
out := new(RecordInterceptionEndedResponse)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordInterceptionEnded", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}, in, out)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordInterceptionEnded", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}, in, out)
if err != nil {
return nil, err
}
@@ -75,7 +75,7 @@ func (c *drpcRecorderClient) RecordInterceptionEnded(ctx context.Context, in *Re
func (c *drpcRecorderClient) RecordTokenUsage(ctx context.Context, in *RecordTokenUsageRequest) (*RecordTokenUsageResponse, error) {
out := new(RecordTokenUsageResponse)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordTokenUsage", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}, in, out)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordTokenUsage", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}, in, out)
if err != nil {
return nil, err
}
@@ -84,7 +84,7 @@ func (c *drpcRecorderClient) RecordTokenUsage(ctx context.Context, in *RecordTok
func (c *drpcRecorderClient) RecordPromptUsage(ctx context.Context, in *RecordPromptUsageRequest) (*RecordPromptUsageResponse, error) {
out := new(RecordPromptUsageResponse)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordPromptUsage", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}, in, out)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordPromptUsage", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}, in, out)
if err != nil {
return nil, err
}
@@ -93,7 +93,7 @@ func (c *drpcRecorderClient) RecordPromptUsage(ctx context.Context, in *RecordPr
func (c *drpcRecorderClient) RecordToolUsage(ctx context.Context, in *RecordToolUsageRequest) (*RecordToolUsageResponse, error) {
out := new(RecordToolUsageResponse)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordToolUsage", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}, in, out)
err := c.cc.Invoke(ctx, "/proto.Recorder/RecordToolUsage", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}, in, out)
if err != nil {
return nil, err
}
@@ -137,7 +137,7 @@ func (DRPCRecorderDescription) NumMethods() int { return 5 }
func (DRPCRecorderDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/proto.Recorder/RecordInterception", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{},
return "/proto.Recorder/RecordInterception", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCRecorderServer).
RecordInterception(
@@ -146,7 +146,7 @@ func (DRPCRecorderDescription) Method(n int) (string, drpc.Encoding, drpc.Receiv
)
}, DRPCRecorderServer.RecordInterception, true
case 1:
return "/proto.Recorder/RecordInterceptionEnded", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{},
return "/proto.Recorder/RecordInterceptionEnded", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCRecorderServer).
RecordInterceptionEnded(
@@ -155,7 +155,7 @@ func (DRPCRecorderDescription) Method(n int) (string, drpc.Encoding, drpc.Receiv
)
}, DRPCRecorderServer.RecordInterceptionEnded, true
case 2:
return "/proto.Recorder/RecordTokenUsage", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{},
return "/proto.Recorder/RecordTokenUsage", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCRecorderServer).
RecordTokenUsage(
@@ -164,7 +164,7 @@ func (DRPCRecorderDescription) Method(n int) (string, drpc.Encoding, drpc.Receiv
)
}, DRPCRecorderServer.RecordTokenUsage, true
case 3:
return "/proto.Recorder/RecordPromptUsage", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{},
return "/proto.Recorder/RecordPromptUsage", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCRecorderServer).
RecordPromptUsage(
@@ -173,7 +173,7 @@ func (DRPCRecorderDescription) Method(n int) (string, drpc.Encoding, drpc.Receiv
)
}, DRPCRecorderServer.RecordPromptUsage, true
case 4:
return "/proto.Recorder/RecordToolUsage", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{},
return "/proto.Recorder/RecordToolUsage", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCRecorderServer).
RecordToolUsage(
@@ -200,7 +200,7 @@ type drpcRecorder_RecordInterceptionStream struct {
}
func (x *drpcRecorder_RecordInterceptionStream) SendAndClose(m *RecordInterceptionResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}); err != nil {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}); err != nil {
return err
}
return x.CloseSend()
@@ -216,7 +216,7 @@ type drpcRecorder_RecordInterceptionEndedStream struct {
}
func (x *drpcRecorder_RecordInterceptionEndedStream) SendAndClose(m *RecordInterceptionEndedResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}); err != nil {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}); err != nil {
return err
}
return x.CloseSend()
@@ -232,7 +232,7 @@ type drpcRecorder_RecordTokenUsageStream struct {
}
func (x *drpcRecorder_RecordTokenUsageStream) SendAndClose(m *RecordTokenUsageResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}); err != nil {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}); err != nil {
return err
}
return x.CloseSend()
@@ -248,7 +248,7 @@ type drpcRecorder_RecordPromptUsageStream struct {
}
func (x *drpcRecorder_RecordPromptUsageStream) SendAndClose(m *RecordPromptUsageResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}); err != nil {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}); err != nil {
return err
}
return x.CloseSend()
@@ -264,7 +264,7 @@ type drpcRecorder_RecordToolUsageStream struct {
}
func (x *drpcRecorder_RecordToolUsageStream) SendAndClose(m *RecordToolUsageResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}); err != nil {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}); err != nil {
return err
}
return x.CloseSend()
@@ -289,7 +289,7 @@ func (c *drpcMCPConfiguratorClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcMCPConfiguratorClient) GetMCPServerConfigs(ctx context.Context, in *GetMCPServerConfigsRequest) (*GetMCPServerConfigsResponse, error) {
out := new(GetMCPServerConfigsResponse)
err := c.cc.Invoke(ctx, "/proto.MCPConfigurator/GetMCPServerConfigs", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}, in, out)
err := c.cc.Invoke(ctx, "/proto.MCPConfigurator/GetMCPServerConfigs", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}, in, out)
if err != nil {
return nil, err
}
@@ -298,7 +298,7 @@ func (c *drpcMCPConfiguratorClient) GetMCPServerConfigs(ctx context.Context, in
func (c *drpcMCPConfiguratorClient) GetMCPServerAccessTokensBatch(ctx context.Context, in *GetMCPServerAccessTokensBatchRequest) (*GetMCPServerAccessTokensBatchResponse, error) {
out := new(GetMCPServerAccessTokensBatchResponse)
err := c.cc.Invoke(ctx, "/proto.MCPConfigurator/GetMCPServerAccessTokensBatch", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}, in, out)
err := c.cc.Invoke(ctx, "/proto.MCPConfigurator/GetMCPServerAccessTokensBatch", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}, in, out)
if err != nil {
return nil, err
}
@@ -327,7 +327,7 @@ func (DRPCMCPConfiguratorDescription) NumMethods() int { return 2 }
func (DRPCMCPConfiguratorDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/proto.MCPConfigurator/GetMCPServerConfigs", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{},
return "/proto.MCPConfigurator/GetMCPServerConfigs", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCMCPConfiguratorServer).
GetMCPServerConfigs(
@@ -336,7 +336,7 @@ func (DRPCMCPConfiguratorDescription) Method(n int) (string, drpc.Encoding, drpc
)
}, DRPCMCPConfiguratorServer.GetMCPServerConfigs, true
case 1:
return "/proto.MCPConfigurator/GetMCPServerAccessTokensBatch", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{},
return "/proto.MCPConfigurator/GetMCPServerAccessTokensBatch", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCMCPConfiguratorServer).
GetMCPServerAccessTokensBatch(
@@ -363,7 +363,7 @@ type drpcMCPConfigurator_GetMCPServerConfigsStream struct {
}
func (x *drpcMCPConfigurator_GetMCPServerConfigsStream) SendAndClose(m *GetMCPServerConfigsResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}); err != nil {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}); err != nil {
return err
}
return x.CloseSend()
@@ -379,7 +379,7 @@ type drpcMCPConfigurator_GetMCPServerAccessTokensBatchStream struct {
}
func (x *drpcMCPConfigurator_GetMCPServerAccessTokensBatchStream) SendAndClose(m *GetMCPServerAccessTokensBatchResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}); err != nil {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}); err != nil {
return err
}
return x.CloseSend()
@@ -403,7 +403,7 @@ func (c *drpcAuthorizerClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcAuthorizerClient) IsAuthorized(ctx context.Context, in *IsAuthorizedRequest) (*IsAuthorizedResponse, error) {
out := new(IsAuthorizedResponse)
err := c.cc.Invoke(ctx, "/proto.Authorizer/IsAuthorized", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}, in, out)
err := c.cc.Invoke(ctx, "/proto.Authorizer/IsAuthorized", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}, in, out)
if err != nil {
return nil, err
}
@@ -427,7 +427,7 @@ func (DRPCAuthorizerDescription) NumMethods() int { return 1 }
func (DRPCAuthorizerDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/proto.Authorizer/IsAuthorized", drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{},
return "/proto.Authorizer/IsAuthorized", drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{},
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCAuthorizerServer).
IsAuthorized(
@@ -454,7 +454,7 @@ type drpcAuthorizer_IsAuthorizedStream struct {
}
func (x *drpcAuthorizer_IsAuthorizedStream) SendAndClose(m *IsAuthorizedResponse) error {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_x_aibridged_proto_aibridged_proto{}); err != nil {
if err := x.MsgSend(m, drpcEncoding_File_enterprise_aibridged_proto_aibridged_proto{}); err != nil {
return err
}
return x.CloseSend()
@@ -1,6 +1,6 @@
package aibridged
import "github.com/coder/coder/v2/enterprise/x/aibridged/proto"
import "github.com/coder/coder/v2/enterprise/aibridged/proto"
type DRPCServer interface {
proto.DRPCRecorderServer
@@ -11,7 +11,7 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/coder/coder/v2/coderd/util/ptr"
"github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridged/proto"
"github.com/coder/aibridge"
)
@@ -24,8 +24,8 @@ import (
"github.com/coder/coder/v2/coderd/httpmw"
codermcp "github.com/coder/coder/v2/coderd/mcp"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/enterprise/x/aibridged"
"github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridged"
"github.com/coder/coder/v2/enterprise/aibridged/proto"
)
var (
@@ -28,9 +28,9 @@ import (
codermcp "github.com/coder/coder/v2/coderd/mcp"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/cryptorand"
"github.com/coder/coder/v2/enterprise/x/aibridged"
"github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/x/aibridgedserver"
"github.com/coder/coder/v2/enterprise/aibridged"
"github.com/coder/coder/v2/enterprise/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridgedserver"
"github.com/coder/coder/v2/testutil"
)
@@ -134,8 +134,7 @@ func (r *RootCmd) aibridgeInterceptionsList() *serpent.Command {
return xerrors.Errorf("limit value must be between 1 and %d", maxInterceptionsLimit)
}
expCli := codersdk.NewExperimentalClient(client)
resp, err := expCli.AIBridgeListInterceptions(inv.Context(), codersdk.AIBridgeListInterceptionsFilter{
resp, err := client.AIBridgeListInterceptions(inv.Context(), codersdk.AIBridgeListInterceptionsFilter{
Pagination: codersdk.Pagination{
AfterID: afterID,
// #nosec G115 - Checked above.
@@ -27,7 +27,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, db, owner := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -55,7 +54,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
}, nil)
args := []string{
"exp",
"aibridge",
"interceptions",
"list",
@@ -78,7 +76,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, db, owner := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -137,7 +134,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
}, nil)
args := []string{
"exp",
"aibridge",
"interceptions",
"list",
@@ -164,7 +160,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, db, owner := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -192,7 +187,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
}, nil)
args := []string{
"exp",
"aibridge",
"interceptions",
"list",
+1 -1
View File
@@ -9,8 +9,8 @@ import (
"github.com/coder/aibridge"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/enterprise/aibridged"
"github.com/coder/coder/v2/enterprise/coderd"
"github.com/coder/coder/v2/enterprise/x/aibridged"
)
func newAIBridgeDaemon(coderAPI *coderd.API) (*aibridged.Server, error) {
+3 -4
View File
@@ -25,13 +25,12 @@ func (r *RootCmd) enterpriseOnly() []*serpent.Command {
r.prebuilds(),
r.provisionerd(),
r.externalWorkspaces(),
r.aibridge(),
}
}
func (r *RootCmd) enterpriseExperimental() []*serpent.Command {
return []*serpent.Command{
r.aibridge(),
}
func (*RootCmd) enterpriseExperimental() []*serpent.Command {
return []*serpent.Command{}
}
func (r *RootCmd) EnterpriseSubcommands() []*serpent.Command {
+12 -24
View File
@@ -7,7 +7,6 @@ import (
"database/sql"
"encoding/base64"
"errors"
"fmt"
"io"
"net/url"
@@ -16,8 +15,8 @@ import (
"tailscale.com/types/key"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/cryptorand"
"github.com/coder/coder/v2/enterprise/aibridged"
"github.com/coder/coder/v2/enterprise/audit"
"github.com/coder/coder/v2/enterprise/audit/backends"
"github.com/coder/coder/v2/enterprise/coderd"
@@ -25,7 +24,6 @@ import (
"github.com/coder/coder/v2/enterprise/coderd/usage"
"github.com/coder/coder/v2/enterprise/dbcrypt"
"github.com/coder/coder/v2/enterprise/trialer"
"github.com/coder/coder/v2/enterprise/x/aibridged"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/quartz"
"github.com/coder/serpent"
@@ -146,8 +144,6 @@ func (r *RootCmd) Server(_ func()) *serpent.Command {
}
closers.Add(publisher)
experiments := agplcoderd.ReadExperiments(options.Logger, options.DeploymentValues.Experiments.Value())
// In-memory aibridge daemon.
// TODO(@deansheather): the lifecycle of the aibridged server is
// probably better managed by the enterprise API type itself. Managing
@@ -155,26 +151,18 @@ func (r *RootCmd) Server(_ func()) *serpent.Command {
// is not entitled to the feature.
var aibridgeDaemon *aibridged.Server
if options.DeploymentValues.AI.BridgeConfig.Enabled {
if experiments.Enabled(codersdk.ExperimentAIBridge) {
aibridgeDaemon, err = newAIBridgeDaemon(api)
if err != nil {
return nil, nil, xerrors.Errorf("create aibridged: %w", err)
}
api.RegisterInMemoryAIBridgedHTTPHandler(aibridgeDaemon)
// When running as an in-memory daemon, the HTTP handler is wired into the
// coderd API and therefore is subject to its context. Calling Close() on
// aibridged will NOT affect in-flight requests but those will be closed once
// the API server is itself shutdown.
closers.Add(aibridgeDaemon)
} else {
api.Logger.Warn(ctx, fmt.Sprintf("CODER_AIBRIDGE_ENABLED=true but experiment %q not enabled", codersdk.ExperimentAIBridge))
}
} else {
if experiments.Enabled(codersdk.ExperimentAIBridge) {
api.Logger.Warn(ctx, "aibridge experiment enabled but CODER_AIBRIDGE_ENABLED=false")
aibridgeDaemon, err = newAIBridgeDaemon(api)
if err != nil {
return nil, nil, xerrors.Errorf("create aibridged: %w", err)
}
api.RegisterInMemoryAIBridgedHTTPHandler(aibridgeDaemon)
// When running as an in-memory daemon, the HTTP handler is wired into the
// coderd API and therefore is subject to its context. Calling Close() on
// aibridged will NOT affect in-flight requests but those will be closed once
// the API server is itself shutdown.
closers.Add(aibridgeDaemon)
}
return api.AGPL, closers, nil
+1
View File
@@ -14,6 +14,7 @@ USAGE:
$ coder templates init
SUBCOMMANDS:
aibridge Manage AIBridge.
external-workspaces Create or manage external workspaces
features List Enterprise features
groups Manage groups
+12
View File
@@ -0,0 +1,12 @@
coder v0.0.0-devel
USAGE:
coder aibridge
Manage AIBridge.
SUBCOMMANDS:
interceptions Manage AIBridge interceptions.
———
Run `coder --help` for a list of global options.
@@ -0,0 +1,12 @@
coder v0.0.0-devel
USAGE:
coder aibridge interceptions
Manage AIBridge interceptions.
SUBCOMMANDS:
list List AIBridge interceptions as JSON.
———
Run `coder --help` for a list of global options.
@@ -0,0 +1,37 @@
coder v0.0.0-devel
USAGE:
coder aibridge interceptions list [flags]
List AIBridge interceptions as JSON.
OPTIONS:
--after-id string
The ID of the last result on the previous page to use as a pagination
cursor.
--initiator string
Only return interceptions initiated by this user. Accepts a user ID,
username, or "me".
--limit int (default: 100)
The limit of results to return. Must be between 1 and 1000.
--model string
Only return interceptions from this model.
--provider string
Only return interceptions from this provider.
--started-after string
Only return interceptions started after this time. Must be before
'started-before' if set. Accepts a time in the RFC 3339 format, e.g.
"====[timestamp]=====07:00".
--started-before string
Only return interceptions started before this time. Must be after
'started-after' if set. Accepts a time in the RFC 3339 format, e.g.
"====[timestamp]=====07:00".
———
Run `coder --help` for a list of global options.
+35
View File
@@ -81,6 +81,41 @@ OPTIONS:
Periodically check for new releases of Coder and inform the owner. The
check is performed once per day.
AIBRIDGE OPTIONS:
--aibridge-anthropic-base-url string, $CODER_AIBRIDGE_ANTHROPIC_BASE_URL (default: https://api.anthropic.com/)
The base URL of the Anthropic API.
--aibridge-anthropic-key string, $CODER_AIBRIDGE_ANTHROPIC_KEY
The key to authenticate against the Anthropic API.
--aibridge-bedrock-access-key string, $CODER_AIBRIDGE_BEDROCK_ACCESS_KEY
The access key to authenticate against the AWS Bedrock API.
--aibridge-bedrock-access-key-secret string, $CODER_AIBRIDGE_BEDROCK_ACCESS_KEY_SECRET
The access key secret to use with the access key to authenticate
against the AWS Bedrock API.
--aibridge-bedrock-model string, $CODER_AIBRIDGE_BEDROCK_MODEL (default: global.anthropic.claude-sonnet-4-5-20250929-v1:0)
The model to use when making requests to the AWS Bedrock API.
--aibridge-bedrock-region string, $CODER_AIBRIDGE_BEDROCK_REGION
The AWS Bedrock API region.
--aibridge-bedrock-small-fastmodel string, $CODER_AIBRIDGE_BEDROCK_SMALL_FAST_MODEL (default: global.anthropic.claude-haiku-4-5-20251001-v1:0)
The small fast model to use when making requests to the AWS Bedrock
API. Claude Code uses Haiku-class models to perform background tasks.
See
https://docs.claude.com/en/docs/claude-code/settings#environment-variables.
--aibridge-enabled bool, $CODER_AIBRIDGE_ENABLED (default: false)
Whether to start an in-memory aibridged instance.
--aibridge-openai-base-url string, $CODER_AIBRIDGE_OPENAI_BASE_URL (default: https://api.openai.com/v1/)
The base URL of the OpenAI API.
--aibridge-openai-key string, $CODER_AIBRIDGE_OPENAI_KEY
The key to authenticate against the OpenAI API.
CLIENT OPTIONS:
These options change the behavior of how clients interact with the Coder.
Clients include the Coder CLI, Coder Desktop, IDE extensions, and the web UI.
+1 -1
View File
@@ -36,7 +36,7 @@ const (
// @Param after_id query string false "Cursor pagination after ID (cannot be used with offset)"
// @Param offset query int false "Offset pagination (cannot be used with after_id)"
// @Success 200 {object} codersdk.AIBridgeListInterceptionsResponse
// @Router /api/experimental/aibridge/interceptions [get]
// @Router /aibridge/interceptions [get]
func (api *API) aiBridgeListInterceptions(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
apiKey := httpmw.APIKey(r)
+12 -25
View File
@@ -27,7 +27,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, _ := coderdenttest.New(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -37,10 +36,10 @@ func TestAIBridgeListInterceptions(t *testing.T) {
Features: license.Features{},
},
})
experimentalClient := codersdk.NewExperimentalClient(client)
ctx := testutil.Context(t, testutil.WaitLong)
_, err := experimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
//nolint:gocritic // Owner role is irrelevant here.
_, err := client.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
var sdkErr *codersdk.Error
require.ErrorAs(t, err, &sdkErr)
require.Equal(t, http.StatusForbidden, sdkErr.StatusCode())
@@ -50,7 +49,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Run("EmptyDB", func(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, _ := coderdenttest.New(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -61,9 +59,9 @@ func TestAIBridgeListInterceptions(t *testing.T) {
},
},
})
experimentalClient := codersdk.NewExperimentalClient(client)
ctx := testutil.Context(t, testutil.WaitLong)
res, err := experimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
//nolint:gocritic // Owner role is irrelevant here.
res, err := client.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
require.NoError(t, err)
require.Empty(t, res.Results)
})
@@ -71,7 +69,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Run("OK", func(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, db, firstUser := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -82,7 +79,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
},
},
})
experimentalClient := codersdk.NewExperimentalClient(client)
ctx := testutil.Context(t, testutil.WaitLong)
user1, err := client.User(ctx, codersdk.Me)
@@ -143,7 +139,7 @@ func TestAIBridgeListInterceptions(t *testing.T) {
i1SDK := db2sdk.AIBridgeInterception(i1, user1Visible, []database.AIBridgeTokenUsage{i1tok2, i1tok1}, []database.AIBridgeUserPrompt{i1up2, i1up1}, []database.AIBridgeToolUsage{i1tool2, i1tool1})
i2SDK := db2sdk.AIBridgeInterception(i2, user2Visible, nil, nil, nil)
res, err := experimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
res, err := client.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
require.NoError(t, err)
require.Len(t, res.Results, 2)
require.Equal(t, i2SDK.ID, res.Results[0].ID)
@@ -183,7 +179,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, db, firstUser := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -194,7 +189,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
},
},
})
experimentalClient := codersdk.NewExperimentalClient(client)
ctx := testutil.Context(t, testutil.WaitLong)
allInterceptionIDs := make([]uuid.UUID, 0, 20)
@@ -225,7 +219,7 @@ func TestAIBridgeListInterceptions(t *testing.T) {
}
// Try to fetch with an invalid limit.
res, err := experimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{
res, err := client.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{
Pagination: codersdk.Pagination{
Limit: 1001,
},
@@ -236,7 +230,7 @@ func TestAIBridgeListInterceptions(t *testing.T) {
require.Empty(t, res.Results)
// Try to fetch with both after_id and offset pagination.
res, err = experimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{
res, err = client.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{
Pagination: codersdk.Pagination{
AfterID: allInterceptionIDs[0],
Offset: 1,
@@ -269,7 +263,7 @@ func TestAIBridgeListInterceptions(t *testing.T) {
} else {
pagination.Offset = len(interceptionIDs)
}
res, err := experimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{
res, err := client.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{
Pagination: pagination,
})
require.NoError(t, err)
@@ -299,7 +293,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Run("Authorized", func(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
adminClient, db, firstUser := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -310,11 +303,9 @@ func TestAIBridgeListInterceptions(t *testing.T) {
},
},
})
adminExperimentalClient := codersdk.NewExperimentalClient(adminClient)
ctx := testutil.Context(t, testutil.WaitLong)
secondUserClient, secondUser := coderdtest.CreateAnotherUser(t, adminClient, firstUser.OrganizationID)
secondUserExperimentalClient := codersdk.NewExperimentalClient(secondUserClient)
now := dbtime.Now()
i1 := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
@@ -327,7 +318,7 @@ func TestAIBridgeListInterceptions(t *testing.T) {
}, &now)
// Admin can see all interceptions.
res, err := adminExperimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
res, err := adminClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
require.NoError(t, err)
require.EqualValues(t, 2, res.Count)
require.Len(t, res.Results, 2)
@@ -335,7 +326,7 @@ func TestAIBridgeListInterceptions(t *testing.T) {
require.Equal(t, i2.ID, res.Results[1].ID)
// Second user can only see their own interceptions.
res, err = secondUserExperimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
res, err = secondUserClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{})
require.NoError(t, err)
require.EqualValues(t, 1, res.Count)
require.Len(t, res.Results, 1)
@@ -345,7 +336,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Run("Filter", func(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, db, firstUser := coderdenttest.NewWithDatabase(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -356,7 +346,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
},
},
})
experimentalClient := codersdk.NewExperimentalClient(client)
ctx := testutil.Context(t, testutil.WaitLong)
user1, err := client.User(ctx, codersdk.Me)
@@ -506,7 +495,7 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
res, err := experimentalClient.AIBridgeListInterceptions(ctx, tc.filter)
res, err := client.AIBridgeListInterceptions(ctx, tc.filter)
require.NoError(t, err)
require.EqualValues(t, len(tc.want), res.Count)
// We just compare UUID strings for the sake of this test.
@@ -526,7 +515,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Run("FilterErrors", func(t *testing.T) {
t.Parallel()
dv := coderdtest.DeploymentValues(t)
dv.Experiments = []string{string(codersdk.ExperimentAIBridge)}
client, _ := coderdenttest.New(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
@@ -537,7 +525,6 @@ func TestAIBridgeListInterceptions(t *testing.T) {
},
},
})
experimentalClient := codersdk.NewExperimentalClient(client)
// No need to insert any test data, we're just testing the filter
// errors.
@@ -594,7 +581,7 @@ func TestAIBridgeListInterceptions(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
res, err := experimentalClient.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{
res, err := client.AIBridgeListInterceptions(ctx, codersdk.AIBridgeListInterceptionsFilter{
FilterQuery: tc.q,
})
var sdkErr *codersdk.Error
+3 -3
View File
@@ -14,9 +14,9 @@ import (
"github.com/coder/coder/v2/coderd/tracing"
"github.com/coder/coder/v2/codersdk/drpcsdk"
"github.com/coder/coder/v2/enterprise/x/aibridged"
aibridgedproto "github.com/coder/coder/v2/enterprise/x/aibridged/proto"
"github.com/coder/coder/v2/enterprise/x/aibridgedserver"
"github.com/coder/coder/v2/enterprise/aibridged"
aibridgedproto "github.com/coder/coder/v2/enterprise/aibridged/proto"
"github.com/coder/coder/v2/enterprise/aibridgedserver"
)
// RegisterInMemoryAIBridgedHTTPHandler mounts [aibridged.Server]'s HTTP router onto
+3 -6
View File
@@ -226,12 +226,9 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
return api.refreshEntitlements(ctx)
}
api.AGPL.ExperimentalHandler.Group(func(r chi.Router) {
api.AGPL.APIHandler.Group(func(r chi.Router) {
r.Route("/aibridge", func(r chi.Router) {
r.Use(
api.RequireFeatureMW(codersdk.FeatureAIBridge),
httpmw.RequireExperimentWithDevBypass(api.AGPL.Experiments, codersdk.ExperimentAIBridge),
)
r.Use(api.RequireFeatureMW(codersdk.FeatureAIBridge))
r.Group(func(r chi.Router) {
r.Use(apiKeyMiddleware)
r.Get("/interceptions", api.aiBridgeListInterceptions)
@@ -246,7 +243,7 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
})
return
}
http.StripPrefix("/api/experimental/aibridge", api.aibridgedHandler).ServeHTTP(rw, r)
http.StripPrefix("/api/v2/aibridge", api.aibridgedHandler).ServeHTTP(rw, r)
})
})
})
+30 -27
View File
@@ -262,6 +262,36 @@ func LicensesEntitlements(
claims.FeatureSet = codersdk.FeatureSetEnterprise
}
// Temporary: If the license doesn't have a managed agent limit, we add
// a default of 1000 managed agents per deployment for a 100
// year license term.
// This only applies to "Premium" licenses.
if claims.FeatureSet == codersdk.FeatureSetPremium {
var (
// We intentionally use a fixed issue time here, before the
// entitlement was added to any new licenses, so any
// licenses with the corresponding features actually set
// trump this default entitlement, even if they are set to a
// smaller value.
defaultManagedAgentsIsuedAt = time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC)
defaultManagedAgentsStart = defaultManagedAgentsIsuedAt
defaultManagedAgentsEnd = defaultManagedAgentsStart.AddDate(100, 0, 0)
defaultManagedAgentsSoftLimit int64 = 1000
defaultManagedAgentsHardLimit int64 = 1000
)
entitlements.AddFeature(codersdk.FeatureManagedAgentLimit, codersdk.Feature{
Enabled: true,
Entitlement: entitlement,
SoftLimit: &defaultManagedAgentsSoftLimit,
Limit: &defaultManagedAgentsHardLimit,
UsagePeriod: &codersdk.UsagePeriod{
IssuedAt: defaultManagedAgentsIsuedAt,
Start: defaultManagedAgentsStart,
End: defaultManagedAgentsEnd,
},
})
}
// Add all features from the feature set defined.
for _, featureName := range claims.FeatureSet.Features() {
if _, ok := licenseForbiddenFeatures[featureName]; ok {
@@ -338,33 +368,6 @@ func LicensesEntitlements(
Limit: &featureValue,
Actual: &featureArguments.ActiveUserCount,
})
// Temporary: If the license doesn't have a managed agent limit,
// we add a default of 800 managed agents per user.
// This only applies to "Premium" licenses.
if claims.FeatureSet == codersdk.FeatureSetPremium {
var (
// We intentionally use a fixed issue time here, before the
// entitlement was added to any new licenses, so any
// licenses with the corresponding features actually set
// trump this default entitlement, even if they are set to a
// smaller value.
issueTime = time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC)
defaultSoftAgentLimit = 800 * featureValue
defaultHardAgentLimit = 1000 * featureValue
)
entitlements.AddFeature(codersdk.FeatureManagedAgentLimit, codersdk.Feature{
Enabled: true,
Entitlement: entitlement,
SoftLimit: &defaultSoftAgentLimit,
Limit: &defaultHardAgentLimit,
UsagePeriod: &codersdk.UsagePeriod{
IssuedAt: issueTime,
Start: usagePeriodStart,
End: usagePeriodEnd,
},
})
}
default:
if featureValue <= 0 {
// The feature is disabled.
+15 -13
View File
@@ -520,8 +520,8 @@ func TestEntitlements(t *testing.T) {
t.Run("Premium", func(t *testing.T) {
t.Parallel()
const userLimit = 1
const expectedAgentSoftLimit = 800 * userLimit
const expectedAgentHardLimit = 1000 * userLimit
const expectedAgentSoftLimit = 1000
const expectedAgentHardLimit = 1000
db, _ := dbtestutil.NewDB(t)
licenseOptions := coderdenttest.LicenseOptions{
@@ -530,9 +530,7 @@ func TestEntitlements(t *testing.T) {
ExpiresAt: dbtime.Now().Add(time.Hour * 24 * 2),
FeatureSet: codersdk.FeatureSetPremium,
Features: license.Features{
// Temporary: allows the default value for the
// managed_agent_limit feature to be used.
codersdk.FeatureUserLimit: 1,
codersdk.FeatureUserLimit: userLimit,
},
}
_, err := db.InsertLicense(context.Background(), database.InsertLicenseParams{
@@ -557,11 +555,15 @@ func TestEntitlements(t *testing.T) {
require.Equal(t, codersdk.EntitlementEntitled, agentEntitlement.Entitlement)
require.EqualValues(t, expectedAgentSoftLimit, *agentEntitlement.SoftLimit)
require.EqualValues(t, expectedAgentHardLimit, *agentEntitlement.Limit)
// This might be shocking, but there's a sound reason for this.
// See license.go for more details.
require.Equal(t, time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC), agentEntitlement.UsagePeriod.IssuedAt)
require.WithinDuration(t, licenseOptions.NotBefore, agentEntitlement.UsagePeriod.Start, time.Second)
require.WithinDuration(t, licenseOptions.ExpiresAt, agentEntitlement.UsagePeriod.End, time.Second)
agentUsagePeriodIssuedAt := time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC)
agentUsagePeriodStart := agentUsagePeriodIssuedAt
agentUsagePeriodEnd := agentUsagePeriodStart.AddDate(100, 0, 0)
require.Equal(t, agentUsagePeriodIssuedAt, agentEntitlement.UsagePeriod.IssuedAt)
require.WithinDuration(t, agentUsagePeriodStart, agentEntitlement.UsagePeriod.Start, time.Second)
require.WithinDuration(t, agentUsagePeriodEnd, agentEntitlement.UsagePeriod.End, time.Second)
continue
}
@@ -1496,14 +1498,14 @@ func TestManagedAgentLimitDefault(t *testing.T) {
})
// "Premium" licenses should receive a default managed agent limit of:
// soft = 800 * user_limit
// hard = 1000 * user_limit
// soft = 1000
// hard = 1000
t.Run("Premium", func(t *testing.T) {
t.Parallel()
const userLimit = 100
const softLimit = 800 * userLimit
const hardLimit = 1000 * userLimit
const userLimit = 33
const softLimit = 1000
const hardLimit = 1000
lic := database.License{
ID: 1,
UploadedAt: time.Now(),
+77 -73
View File
@@ -2,12 +2,13 @@ package prebuilds
import (
"context"
"database/sql"
"errors"
"github.com/google/uuid"
"golang.org/x/xerrors"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/quartz"
)
@@ -21,114 +22,117 @@ const (
// organizations for which prebuilt workspaces are requested. This is necessary because our data model requires that such
// prebuilt workspaces belong to a member of the organization of their eventual claimant.
type StoreMembershipReconciler struct {
store database.Store
clock quartz.Clock
store database.Store
clock quartz.Clock
logger slog.Logger
}
func NewStoreMembershipReconciler(store database.Store, clock quartz.Clock) StoreMembershipReconciler {
func NewStoreMembershipReconciler(store database.Store, clock quartz.Clock, logger slog.Logger) StoreMembershipReconciler {
return StoreMembershipReconciler{
store: store,
clock: clock,
store: store,
clock: clock,
logger: logger,
}
}
// ReconcileAll compares the current organization and group memberships of a user to the memberships required
// in order to create prebuilt workspaces. If the user in question is not yet a member of an organization that
// needs prebuilt workspaces, ReconcileAll will create the membership required.
// ReconcileAll ensures the prebuilds system user has the necessary memberships to create prebuilt workspaces.
// For each organization with prebuilds configured, it ensures:
// * The user is a member of the organization
// * A group exists with quota 0
// * The user is a member of that group
//
// To facilitate quota management, ReconcileAll will ensure:
// * the existence of a group (defined by PrebuiltWorkspacesGroupName) in each organization that needs prebuilt workspaces
// * that the prebuilds system user belongs to the group in each organization that needs prebuilt workspaces
// * that the group has a quota of 0 by default, which users can adjust based on their needs.
// Unique constraint violations are safely ignored (concurrent creation).
//
// ReconcileAll does not have an opinion on transaction or lock management. These responsibilities are left to the caller.
func (s StoreMembershipReconciler) ReconcileAll(ctx context.Context, userID uuid.UUID, presets []database.GetTemplatePresetsWithPrebuildsRow) error {
organizationMemberships, err := s.store.GetOrganizationsByUserID(ctx, database.GetOrganizationsByUserIDParams{
UserID: userID,
Deleted: sql.NullBool{
Bool: false,
Valid: true,
},
func (s StoreMembershipReconciler) ReconcileAll(ctx context.Context, userID uuid.UUID, groupName string) error {
orgStatuses, err := s.store.GetOrganizationsWithPrebuildStatus(ctx, database.GetOrganizationsWithPrebuildStatusParams{
UserID: userID,
GroupName: groupName,
})
if err != nil {
return xerrors.Errorf("determine prebuild organization membership: %w", err)
}
orgMemberships := make(map[uuid.UUID]struct{}, 0)
defaultOrg, err := s.store.GetDefaultOrganization(ctx)
if err != nil {
return xerrors.Errorf("get default organization: %w", err)
}
orgMemberships[defaultOrg.ID] = struct{}{}
for _, o := range organizationMemberships {
orgMemberships[o.ID] = struct{}{}
return xerrors.Errorf("get organizations with prebuild status: %w", err)
}
var membershipInsertionErrors error
for _, preset := range presets {
_, alreadyOrgMember := orgMemberships[preset.OrganizationID]
if !alreadyOrgMember {
// Add the organization to our list of memberships regardless of potential failure below
// to avoid a retry that will probably be doomed anyway.
orgMemberships[preset.OrganizationID] = struct{}{}
for _, orgStatus := range orgStatuses {
s.logger.Debug(ctx, "organization prebuild status",
slog.F("organization_id", orgStatus.OrganizationID),
slog.F("organization_name", orgStatus.OrganizationName),
slog.F("has_prebuild_user", orgStatus.HasPrebuildUser),
slog.F("has_prebuild_group", orgStatus.PrebuildsGroupID.Valid),
slog.F("has_prebuild_user_in_group", orgStatus.HasPrebuildUserInGroup))
// Insert the missing membership
// Add user to org if needed
if !orgStatus.HasPrebuildUser {
_, err = s.store.InsertOrganizationMember(ctx, database.InsertOrganizationMemberParams{
OrganizationID: preset.OrganizationID,
OrganizationID: orgStatus.OrganizationID,
UserID: userID,
CreatedAt: s.clock.Now(),
UpdatedAt: s.clock.Now(),
Roles: []string{},
})
if err != nil {
membershipInsertionErrors = errors.Join(membershipInsertionErrors, xerrors.Errorf("insert membership for prebuilt workspaces: %w", err))
// Unique violation means organization membership was created after status check, safe to ignore.
if err != nil && !database.IsUniqueViolation(err) {
membershipInsertionErrors = errors.Join(membershipInsertionErrors, err)
continue
}
if err == nil {
s.logger.Info(ctx, "added prebuilds user to organization",
slog.F("organization_id", orgStatus.OrganizationID),
slog.F("organization_name", orgStatus.OrganizationName),
slog.F("prebuilds_user", userID.String()))
}
}
// determine whether the org already has a prebuilds group
prebuildsGroupExists := true
prebuildsGroup, err := s.store.GetGroupByOrgAndName(ctx, database.GetGroupByOrgAndNameParams{
OrganizationID: preset.OrganizationID,
Name: PrebuiltWorkspacesGroupName,
})
if err != nil {
if !xerrors.Is(err, sql.ErrNoRows) {
membershipInsertionErrors = errors.Join(membershipInsertionErrors, xerrors.Errorf("get prebuilds group: %w", err))
continue
}
prebuildsGroupExists = false
}
// if the prebuilds group does not exist, create it
if !prebuildsGroupExists {
// create a "prebuilds" group in the organization and add the system user to it
// this group will have a quota of 0 by default, which users can adjust based on their needs
prebuildsGroup, err = s.store.InsertGroup(ctx, database.InsertGroupParams{
// Create group if it doesn't exist
var groupID uuid.UUID
if !orgStatus.PrebuildsGroupID.Valid {
// Group doesn't exist, create it
group, err := s.store.InsertGroup(ctx, database.InsertGroupParams{
ID: uuid.New(),
Name: PrebuiltWorkspacesGroupName,
DisplayName: PrebuiltWorkspacesGroupDisplayName,
OrganizationID: preset.OrganizationID,
OrganizationID: orgStatus.OrganizationID,
AvatarURL: "",
QuotaAllowance: 0, // Default quota of 0, users should set this based on their needs
QuotaAllowance: 0,
})
if err != nil {
membershipInsertionErrors = errors.Join(membershipInsertionErrors, xerrors.Errorf("create prebuilds group: %w", err))
// Unique violation means group was created after status check, safe to ignore.
if err != nil && !database.IsUniqueViolation(err) {
membershipInsertionErrors = errors.Join(membershipInsertionErrors, err)
continue
}
if err == nil {
s.logger.Info(ctx, "created prebuilds group in organization",
slog.F("organization_id", orgStatus.OrganizationID),
slog.F("organization_name", orgStatus.OrganizationName),
slog.F("prebuilds_group", group.ID.String()))
}
groupID = group.ID
} else {
// Group exists
groupID = orgStatus.PrebuildsGroupID.UUID
}
// add the system user to the prebuilds group
err = s.store.InsertGroupMember(ctx, database.InsertGroupMemberParams{
GroupID: prebuildsGroup.ID,
UserID: userID,
})
if err != nil {
// ignore unique violation errors as the user might already be in the group
if !database.IsUniqueViolation(err) {
membershipInsertionErrors = errors.Join(membershipInsertionErrors, xerrors.Errorf("add system user to prebuilds group: %w", err))
// Add user to group if needed
if !orgStatus.HasPrebuildUserInGroup {
err = s.store.InsertGroupMember(ctx, database.InsertGroupMemberParams{
GroupID: groupID,
UserID: userID,
})
// Unique violation means group membership was created after status check, safe to ignore.
if err != nil && !database.IsUniqueViolation(err) {
membershipInsertionErrors = errors.Join(membershipInsertionErrors, err)
continue
}
if err == nil {
s.logger.Info(ctx, "added prebuilds user to prebuilds group",
slog.F("organization_id", orgStatus.OrganizationID),
slog.F("organization_name", orgStatus.OrganizationName),
slog.F("prebuilds_user", userID.String()),
slog.F("prebuilds_group", groupID.String()))
}
}
}
return membershipInsertionErrors
}
+140 -130
View File
@@ -7,16 +7,17 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"tailscale.com/types/ptr"
"github.com/coder/quartz"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbfake"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/enterprise/coderd/prebuilds"
"github.com/coder/coder/v2/testutil"
"github.com/coder/quartz"
)
// TestReconcileAll verifies that StoreMembershipReconciler correctly updates membership
@@ -26,169 +27,178 @@ func TestReconcileAll(t *testing.T) {
clock := quartz.NewMock(t)
// Helper to build a minimal Preset row belonging to a given org.
newPresetRow := func(orgID uuid.UUID) database.GetTemplatePresetsWithPrebuildsRow {
return database.GetTemplatePresetsWithPrebuildsRow{
ID: uuid.New(),
OrganizationID: orgID,
}
}
tests := []struct {
name string
includePreset []bool
includePreset bool
preExistingOrgMembership []bool
preExistingGroup []bool
preExistingGroupMembership []bool
// Expected outcomes
expectOrgMembershipExists *bool
expectGroupExists *bool
expectUserInGroup *bool
expectOrgMembershipExists bool
expectGroupExists bool
expectUserInGroup bool
}{
{
name: "if there are no presets, membership reconciliation is a no-op",
includePreset: []bool{false},
includePreset: false,
preExistingOrgMembership: []bool{true, false},
preExistingGroup: []bool{true, false},
preExistingGroupMembership: []bool{true, false},
expectOrgMembershipExists: ptr.To(false),
expectGroupExists: ptr.To(false),
expectOrgMembershipExists: false,
expectGroupExists: false,
expectUserInGroup: false,
},
{
name: "if there is a preset, then we should enforce org and group membership in all cases",
includePreset: []bool{true},
includePreset: true,
preExistingOrgMembership: []bool{true, false},
preExistingGroup: []bool{true, false},
preExistingGroupMembership: []bool{true, false},
expectOrgMembershipExists: ptr.To(true),
expectGroupExists: ptr.To(true),
expectUserInGroup: ptr.To(true),
expectOrgMembershipExists: true,
expectGroupExists: true,
expectUserInGroup: true,
},
}
for _, tc := range tests {
tc := tc
for _, includePreset := range tc.includePreset {
includePreset := includePreset
for _, preExistingOrgMembership := range tc.preExistingOrgMembership {
preExistingOrgMembership := preExistingOrgMembership
for _, preExistingGroup := range tc.preExistingGroup {
preExistingGroup := preExistingGroup
for _, preExistingGroupMembership := range tc.preExistingGroupMembership {
preExistingGroupMembership := preExistingGroupMembership
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
includePreset := tc.includePreset
for _, preExistingOrgMembership := range tc.preExistingOrgMembership {
preExistingOrgMembership := preExistingOrgMembership
for _, preExistingGroup := range tc.preExistingGroup {
preExistingGroup := preExistingGroup
for _, preExistingGroupMembership := range tc.preExistingGroupMembership {
preExistingGroupMembership := preExistingGroupMembership
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// nolint:gocritic // Reconciliation happens as prebuilds system user, not a human user.
ctx := dbauthz.AsPrebuildsOrchestrator(testutil.Context(t, testutil.WaitLong))
_, db := coderdtest.NewWithDatabase(t, nil)
// nolint:gocritic // Reconciliation happens as prebuilds system user, not a human user.
ctx := dbauthz.AsPrebuildsOrchestrator(testutil.Context(t, testutil.WaitLong))
client, db := coderdtest.NewWithDatabase(t, nil)
owner := coderdtest.CreateFirstUser(t, client)
defaultOrg, err := db.GetDefaultOrganization(ctx)
require.NoError(t, err)
defaultOrg, err := db.GetDefaultOrganization(ctx)
require.NoError(t, err)
// introduce an unrelated organization to ensure that the membership reconciler doesn't interfere with it.
unrelatedOrg := dbgen.Organization(t, db, database.Organization{})
targetOrg := dbgen.Organization(t, db, database.Organization{})
// Introduce an unrelated organization to ensure that the membership reconciler doesn't interfere with it.
unrelatedOrg := dbgen.Organization(t, db, database.Organization{})
dbgen.OrganizationMember(t, db, database.OrganizationMember{OrganizationID: unrelatedOrg.ID, UserID: database.PrebuildsSystemUserID})
// Ensure membership to unrelated org.
dbgen.OrganizationMember(t, db, database.OrganizationMember{OrganizationID: unrelatedOrg.ID, UserID: database.PrebuildsSystemUserID})
// Organization to test
targetOrg := dbgen.Organization(t, db, database.Organization{})
if preExistingOrgMembership {
// System user already a member of both orgs.
dbgen.OrganizationMember(t, db, database.OrganizationMember{OrganizationID: targetOrg.ID, UserID: database.PrebuildsSystemUserID})
}
// Prebuilds system user is a member of the organization
if preExistingOrgMembership {
dbgen.OrganizationMember(t, db, database.OrganizationMember{OrganizationID: targetOrg.ID, UserID: database.PrebuildsSystemUserID})
}
// Create pre-existing prebuilds group if required by test case
var prebuildsGroup database.Group
if preExistingGroup {
prebuildsGroup = dbgen.Group(t, db, database.Group{
Name: prebuilds.PrebuiltWorkspacesGroupName,
DisplayName: prebuilds.PrebuiltWorkspacesGroupDisplayName,
OrganizationID: targetOrg.ID,
QuotaAllowance: 0,
})
// Add the system user to the group if preExistingGroupMembership is true
if preExistingGroupMembership {
dbgen.GroupMember(t, db, database.GroupMemberTable{
GroupID: prebuildsGroup.ID,
UserID: database.PrebuildsSystemUserID,
})
}
}
presets := []database.GetTemplatePresetsWithPrebuildsRow{newPresetRow(unrelatedOrg.ID)}
if includePreset {
presets = append(presets, newPresetRow(targetOrg.ID))
}
// Verify memberships before reconciliation.
preReconcileMemberships, err := db.GetOrganizationsByUserID(ctx, database.GetOrganizationsByUserIDParams{
UserID: database.PrebuildsSystemUserID,
})
require.NoError(t, err)
expectedMembershipsBefore := []uuid.UUID{defaultOrg.ID, unrelatedOrg.ID}
if preExistingOrgMembership {
expectedMembershipsBefore = append(expectedMembershipsBefore, targetOrg.ID)
}
require.ElementsMatch(t, expectedMembershipsBefore, extractOrgIDs(preReconcileMemberships))
// Reconcile
reconciler := prebuilds.NewStoreMembershipReconciler(db, clock)
require.NoError(t, reconciler.ReconcileAll(ctx, database.PrebuildsSystemUserID, presets))
// Verify memberships after reconciliation.
postReconcileMemberships, err := db.GetOrganizationsByUserID(ctx, database.GetOrganizationsByUserIDParams{
UserID: database.PrebuildsSystemUserID,
})
require.NoError(t, err)
expectedMembershipsAfter := expectedMembershipsBefore
if !preExistingOrgMembership && tc.expectOrgMembershipExists != nil && *tc.expectOrgMembershipExists {
expectedMembershipsAfter = append(expectedMembershipsAfter, targetOrg.ID)
}
require.ElementsMatch(t, expectedMembershipsAfter, extractOrgIDs(postReconcileMemberships))
// Verify prebuilds group behavior based on expected outcomes
prebuildsGroup, err = db.GetGroupByOrgAndName(ctx, database.GetGroupByOrgAndNameParams{
OrganizationID: targetOrg.ID,
// Organization has the prebuilds group
var prebuildsGroup database.Group
if preExistingGroup {
prebuildsGroup = dbgen.Group(t, db, database.Group{
Name: prebuilds.PrebuiltWorkspacesGroupName,
DisplayName: prebuilds.PrebuiltWorkspacesGroupDisplayName,
OrganizationID: targetOrg.ID,
QuotaAllowance: 0,
})
if tc.expectGroupExists != nil && *tc.expectGroupExists {
require.NoError(t, err)
require.Equal(t, prebuilds.PrebuiltWorkspacesGroupName, prebuildsGroup.Name)
require.Equal(t, prebuilds.PrebuiltWorkspacesGroupDisplayName, prebuildsGroup.DisplayName)
require.Equal(t, int32(0), prebuildsGroup.QuotaAllowance) // Default quota should be 0
if tc.expectUserInGroup != nil && *tc.expectUserInGroup {
// Check that the system user is a member of the prebuilds group
groupMembers, err := db.GetGroupMembersByGroupID(ctx, database.GetGroupMembersByGroupIDParams{
GroupID: prebuildsGroup.ID,
IncludeSystem: true,
})
require.NoError(t, err)
require.Len(t, groupMembers, 1)
require.Equal(t, database.PrebuildsSystemUserID, groupMembers[0].UserID)
}
// If no preset exists, then we do not enforce group membership:
if tc.expectUserInGroup != nil && !*tc.expectUserInGroup {
// Check that the system user is NOT a member of the prebuilds group
groupMembers, err := db.GetGroupMembersByGroupID(ctx, database.GetGroupMembersByGroupIDParams{
GroupID: prebuildsGroup.ID,
IncludeSystem: true,
})
require.NoError(t, err)
require.Len(t, groupMembers, 0)
}
// Add the system user to the group if required by test case
if preExistingGroupMembership {
dbgen.GroupMember(t, db, database.GroupMemberTable{
GroupID: prebuildsGroup.ID,
UserID: database.PrebuildsSystemUserID,
})
}
}
if !preExistingGroup && tc.expectGroupExists != nil && !*tc.expectGroupExists {
// Verify that no prebuilds group exists
require.Error(t, err)
require.True(t, errors.Is(err, sql.ErrNoRows))
}
// Setup unrelated org preset
dbfake.TemplateVersion(t, db).Seed(database.TemplateVersion{
OrganizationID: unrelatedOrg.ID,
CreatedBy: owner.UserID,
}).Preset(database.TemplateVersionPreset{
DesiredInstances: sql.NullInt32{
Int32: 1,
Valid: true,
},
}).Do()
// Setup target org preset
dbfake.TemplateVersion(t, db).Seed(database.TemplateVersion{
OrganizationID: targetOrg.ID,
CreatedBy: owner.UserID,
}).Preset(database.TemplateVersionPreset{
DesiredInstances: sql.NullInt32{
Int32: 0,
Valid: includePreset,
},
}).Do()
// Verify memberships before reconciliation.
preReconcileMemberships, err := db.GetOrganizationsByUserID(ctx, database.GetOrganizationsByUserIDParams{
UserID: database.PrebuildsSystemUserID,
})
}
require.NoError(t, err)
expectedMembershipsBefore := []uuid.UUID{defaultOrg.ID, unrelatedOrg.ID}
if preExistingOrgMembership {
expectedMembershipsBefore = append(expectedMembershipsBefore, targetOrg.ID)
}
require.ElementsMatch(t, expectedMembershipsBefore, extractOrgIDs(preReconcileMemberships))
// Reconcile
reconciler := prebuilds.NewStoreMembershipReconciler(db, clock, slogtest.Make(t, nil))
require.NoError(t, reconciler.ReconcileAll(ctx, database.PrebuildsSystemUserID, prebuilds.PrebuiltWorkspacesGroupName))
// Verify memberships after reconciliation.
postReconcileMemberships, err := db.GetOrganizationsByUserID(ctx, database.GetOrganizationsByUserIDParams{
UserID: database.PrebuildsSystemUserID,
})
require.NoError(t, err)
expectedMembershipsAfter := expectedMembershipsBefore
if !preExistingOrgMembership && tc.expectOrgMembershipExists {
expectedMembershipsAfter = append(expectedMembershipsAfter, targetOrg.ID)
}
require.ElementsMatch(t, expectedMembershipsAfter, extractOrgIDs(postReconcileMemberships))
// Verify prebuilds group behavior based on expected outcomes
prebuildsGroup, err = db.GetGroupByOrgAndName(ctx, database.GetGroupByOrgAndNameParams{
OrganizationID: targetOrg.ID,
Name: prebuilds.PrebuiltWorkspacesGroupName,
})
if tc.expectGroupExists {
require.NoError(t, err)
require.Equal(t, prebuilds.PrebuiltWorkspacesGroupName, prebuildsGroup.Name)
require.Equal(t, prebuilds.PrebuiltWorkspacesGroupDisplayName, prebuildsGroup.DisplayName)
require.Equal(t, int32(0), prebuildsGroup.QuotaAllowance) // Default quota should be 0
if tc.expectUserInGroup {
// Check that the system user is a member of the prebuilds group
groupMembers, err := db.GetGroupMembersByGroupID(ctx, database.GetGroupMembersByGroupIDParams{
GroupID: prebuildsGroup.ID,
IncludeSystem: true,
})
require.NoError(t, err)
require.Len(t, groupMembers, 1)
require.Equal(t, database.PrebuildsSystemUserID, groupMembers[0].UserID)
}
// If no preset exists, then we do not enforce group membership:
if !tc.expectUserInGroup {
// Check that the system user is NOT a member of the prebuilds group
groupMembers, err := db.GetGroupMembersByGroupID(ctx, database.GetGroupMembersByGroupIDParams{
GroupID: prebuildsGroup.ID,
IncludeSystem: true,
})
require.NoError(t, err)
require.Len(t, groupMembers, 0)
}
}
if !preExistingGroup && !tc.expectGroupExists {
// Verify that no prebuilds group exists
require.Error(t, err)
require.True(t, errors.Is(err, sql.ErrNoRows))
}
})
}
}
}
@@ -485,7 +485,7 @@ func TestMetricsCollector_ReconciliationPausedMetric(t *testing.T) {
require.NoError(t, err)
// Run reconciliation to update the metric
err = reconciler.ReconcileAll(ctx)
_, err = reconciler.ReconcileAll(ctx)
require.NoError(t, err)
// Check that the metric shows reconciliation is not paused
@@ -514,7 +514,7 @@ func TestMetricsCollector_ReconciliationPausedMetric(t *testing.T) {
require.NoError(t, err)
// Run reconciliation to update the metric
err = reconciler.ReconcileAll(ctx)
_, err = reconciler.ReconcileAll(ctx)
require.NoError(t, err)
// Check that the metric shows reconciliation is paused
@@ -543,7 +543,7 @@ func TestMetricsCollector_ReconciliationPausedMetric(t *testing.T) {
require.NoError(t, err)
// Run reconciliation to update the metric
err = reconciler.ReconcileAll(ctx)
_, err = reconciler.ReconcileAll(ctx)
require.NoError(t, err)
// Check that the metric shows reconciliation is not paused
+147 -59
View File
@@ -15,6 +15,7 @@ import (
"github.com/google/uuid"
"github.com/hashicorp/go-multierror"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
@@ -44,7 +45,6 @@ type StoreReconciler struct {
logger slog.Logger
clock quartz.Clock
registerer prometheus.Registerer
metrics *MetricsCollector
notifEnq notifications.Enqueuer
buildUsageChecker *atomic.Pointer[wsbuilder.UsageChecker]
@@ -53,10 +53,33 @@ type StoreReconciler struct {
stopped atomic.Bool
done chan struct{}
provisionNotifyCh chan database.ProvisionerJob
// Prebuild state metrics
metrics *MetricsCollector
// Operational metrics
reconciliationDuration prometheus.Histogram
}
var _ prebuilds.ReconciliationOrchestrator = &StoreReconciler{}
type DeprovisionMode int
const (
DeprovisionModeNormal DeprovisionMode = iota
DeprovisionModeOrphan
)
func (d DeprovisionMode) String() string {
switch d {
case DeprovisionModeOrphan:
return "orphan"
case DeprovisionModeNormal:
return "normal"
default:
return "unknown"
}
}
func NewStoreReconciler(store database.Store,
ps pubsub.Pubsub,
fileCache *files.Cache,
@@ -87,6 +110,15 @@ func NewStoreReconciler(store database.Store,
// If the registerer fails to register the metrics collector, it's not fatal.
logger.Error(context.Background(), "failed to register prometheus metrics", slog.Error(err))
}
factory := promauto.With(registerer)
reconciler.reconciliationDuration = factory.NewHistogram(prometheus.HistogramOpts{
Namespace: "coderd",
Subsystem: "prebuilds",
Name: "reconciliation_duration_seconds",
Help: "Duration of each prebuilds reconciliation cycle.",
Buckets: prometheus.DefBuckets,
})
}
return reconciler
@@ -158,10 +190,15 @@ func (c *StoreReconciler) Run(ctx context.Context) {
// instead of waiting for the next reconciliation interval
case <-ticker.C:
// Trigger a new iteration on each tick.
err := c.ReconcileAll(ctx)
stats, err := c.ReconcileAll(ctx)
if err != nil {
c.logger.Error(context.Background(), "reconciliation failed", slog.Error(err))
}
if c.reconciliationDuration != nil {
c.reconciliationDuration.Observe(stats.Elapsed.Seconds())
}
c.logger.Debug(ctx, "reconciliation stats", slog.F("elapsed", stats.Elapsed))
case <-ctx.Done():
// nolint:gocritic // it's okay to use slog.F() for an error in this case
// because we want to differentiate two different types of errors: ctx.Err() and context.Cause()
@@ -245,19 +282,24 @@ func (c *StoreReconciler) Stop(ctx context.Context, cause error) {
// be reconciled again, leading to another workspace being provisioned. Two workspace builds will be occurring
// simultaneously for the same preset, but once both jobs have completed the reconciliation loop will notice the
// extraneous instance and delete it.
func (c *StoreReconciler) ReconcileAll(ctx context.Context) error {
func (c *StoreReconciler) ReconcileAll(ctx context.Context) (stats prebuilds.ReconcileStats, err error) {
start := c.clock.Now()
defer func() {
stats.Elapsed = c.clock.Since(start)
}()
logger := c.logger.With(slog.F("reconcile_context", "all"))
select {
case <-ctx.Done():
logger.Warn(context.Background(), "reconcile exiting prematurely; context done", slog.Error(ctx.Err()))
return nil
return stats, nil
default:
}
logger.Debug(ctx, "starting reconciliation")
err := c.WithReconciliationLock(ctx, logger, func(ctx context.Context, _ database.Store) error {
err = c.WithReconciliationLock(ctx, logger, func(ctx context.Context, _ database.Store) error {
// Check if prebuilds reconciliation is paused
settingsJSON, err := c.store.GetPrebuildsSettings(ctx)
if err != nil {
@@ -280,6 +322,12 @@ func (c *StoreReconciler) ReconcileAll(ctx context.Context) error {
return nil
}
membershipReconciler := NewStoreMembershipReconciler(c.store, c.clock, logger)
err = membershipReconciler.ReconcileAll(ctx, database.PrebuildsSystemUserID, PrebuiltWorkspacesGroupName)
if err != nil {
return xerrors.Errorf("reconcile prebuild membership: %w", err)
}
snapshot, err := c.SnapshotState(ctx, c.store)
if err != nil {
return xerrors.Errorf("determine current snapshot: %w", err)
@@ -292,12 +340,6 @@ func (c *StoreReconciler) ReconcileAll(ctx context.Context) error {
return nil
}
membershipReconciler := NewStoreMembershipReconciler(c.store, c.clock)
err = membershipReconciler.ReconcileAll(ctx, database.PrebuildsSystemUserID, snapshot.Presets)
if err != nil {
return xerrors.Errorf("reconcile prebuild membership: %w", err)
}
var eg errgroup.Group
// Reconcile presets in parallel. Each preset in its own goroutine.
for _, preset := range snapshot.Presets {
@@ -330,7 +372,7 @@ func (c *StoreReconciler) ReconcileAll(ctx context.Context) error {
logger.Error(ctx, "failed to reconcile", slog.Error(err))
}
return err
return stats, err
}
func (c *StoreReconciler) reportHardLimitedPresets(snapshot *prebuilds.GlobalSnapshot) {
@@ -642,34 +684,7 @@ func (c *StoreReconciler) executeReconciliationAction(ctx context.Context, logge
return multiErr.ErrorOrNil()
case prebuilds.ActionTypeCancelPending:
// Cancel pending prebuild jobs from non-active template versions to avoid
// provisioning obsolete workspaces that would immediately be deprovisioned.
// This uses a criteria-based update to ensure only jobs that are still pending
// at execution time are canceled, avoiding race conditions where jobs may have
// transitioned to running status between query and update.
canceledJobs, err := c.store.UpdatePrebuildProvisionerJobWithCancel(
ctx,
database.UpdatePrebuildProvisionerJobWithCancelParams{
Now: c.clock.Now(),
PresetID: uuid.NullUUID{
UUID: ps.Preset.ID,
Valid: true,
},
})
if err != nil {
logger.Error(ctx, "failed to cancel pending prebuild jobs",
slog.F("template_version_id", ps.Preset.TemplateVersionID.String()),
slog.F("preset_id", ps.Preset.ID),
slog.Error(err))
return err
}
if len(canceledJobs) > 0 {
logger.Info(ctx, "canceled pending prebuild jobs for inactive version",
slog.F("template_version_id", ps.Preset.TemplateVersionID.String()),
slog.F("preset_id", ps.Preset.ID),
slog.F("count", len(canceledJobs)))
}
return nil
return c.cancelAndOrphanDeletePendingPrebuilds(ctx, ps.Preset.TemplateID, ps.Preset.TemplateVersionID, ps.Preset.ID)
default:
return xerrors.Errorf("unknown action type: %v", action.ActionType)
@@ -717,7 +732,91 @@ func (c *StoreReconciler) createPrebuiltWorkspace(ctx context.Context, prebuiltW
c.logger.Info(ctx, "attempting to create prebuild", slog.F("name", name),
slog.F("workspace_id", prebuiltWorkspaceID.String()), slog.F("preset_id", presetID.String()))
return c.provision(ctx, db, prebuiltWorkspaceID, template, presetID, database.WorkspaceTransitionStart, workspace)
return c.provision(ctx, db, prebuiltWorkspaceID, template, presetID, database.WorkspaceTransitionStart, workspace, DeprovisionModeNormal)
}, &database.TxOptions{
Isolation: sql.LevelRepeatableRead,
ReadOnly: false,
})
}
// provisionDelete provisions a delete transition for a prebuilt workspace.
//
// If mode is DeprovisionModeOrphan, the builder will not send Terraform state to the provisioner.
// This allows the workspace to be deleted even when no provisioners are available, and is safe
// when no Terraform resources were actually created (e.g., for pending prebuilds that were canceled
// before provisioning started).
//
// IMPORTANT: This function must be called within a database transaction. It does not create its own transaction.
// The caller is responsible for managing the transaction boundary via db.InTx().
func (c *StoreReconciler) provisionDelete(ctx context.Context, db database.Store, workspaceID uuid.UUID, templateID uuid.UUID, presetID uuid.UUID, mode DeprovisionMode) error {
workspace, err := db.GetWorkspaceByID(ctx, workspaceID)
if err != nil {
return xerrors.Errorf("get workspace by ID: %w", err)
}
template, err := db.GetTemplateByID(ctx, templateID)
if err != nil {
return xerrors.Errorf("failed to get template: %w", err)
}
if workspace.OwnerID != database.PrebuildsSystemUserID {
return xerrors.Errorf("prebuilt workspace is not owned by prebuild user anymore, probably it was claimed")
}
c.logger.Info(ctx, "attempting to delete prebuild", slog.F("orphan", mode.String()),
slog.F("name", workspace.Name), slog.F("workspace_id", workspaceID.String()), slog.F("preset_id", presetID.String()))
return c.provision(ctx, db, workspaceID, template, presetID,
database.WorkspaceTransitionDelete, workspace, mode)
}
// cancelAndOrphanDeletePendingPrebuilds cancels pending prebuild jobs from inactive template versions
// and orphan-deletes their associated workspaces.
//
// The cancel operation uses a criteria-based update to ensure only jobs that are still pending at
// execution time are canceled, avoiding race conditions where jobs may have transitioned to running.
//
// Since these jobs were never processed by a provisioner, no Terraform resources were created,
// making it safe to orphan-delete the workspaces (skipping Terraform destroy).
func (c *StoreReconciler) cancelAndOrphanDeletePendingPrebuilds(ctx context.Context, templateID uuid.UUID, templateVersionID uuid.UUID, presetID uuid.UUID) error {
return c.store.InTx(func(db database.Store) error {
canceledJobs, err := db.UpdatePrebuildProvisionerJobWithCancel(
ctx,
database.UpdatePrebuildProvisionerJobWithCancelParams{
Now: c.clock.Now(),
PresetID: uuid.NullUUID{
UUID: presetID,
Valid: true,
},
})
if err != nil {
c.logger.Error(ctx, "failed to cancel pending prebuild jobs",
slog.F("template_id", templateID.String()),
slog.F("template_version_id", templateVersionID.String()),
slog.F("preset_id", presetID.String()),
slog.Error(err))
return err
}
if len(canceledJobs) > 0 {
c.logger.Info(ctx, "canceled pending prebuild jobs for inactive version",
slog.F("template_id", templateID.String()),
slog.F("template_version_id", templateVersionID.String()),
slog.F("preset_id", presetID.String()),
slog.F("count", len(canceledJobs)))
}
var multiErr multierror.Error
for _, job := range canceledJobs {
err = c.provisionDelete(ctx, db, job.WorkspaceID, job.TemplateID, presetID, DeprovisionModeOrphan)
if err != nil {
c.logger.Error(ctx, "failed to orphan delete canceled prebuild",
slog.F("workspace_id", job.WorkspaceID.String()), slog.Error(err))
multiErr.Errors = append(multiErr.Errors, err)
}
}
return multiErr.ErrorOrNil()
}, &database.TxOptions{
Isolation: sql.LevelRepeatableRead,
ReadOnly: false,
@@ -726,24 +825,7 @@ func (c *StoreReconciler) createPrebuiltWorkspace(ctx context.Context, prebuiltW
func (c *StoreReconciler) deletePrebuiltWorkspace(ctx context.Context, prebuiltWorkspaceID uuid.UUID, templateID uuid.UUID, presetID uuid.UUID) error {
return c.store.InTx(func(db database.Store) error {
workspace, err := db.GetWorkspaceByID(ctx, prebuiltWorkspaceID)
if err != nil {
return xerrors.Errorf("get workspace by ID: %w", err)
}
template, err := db.GetTemplateByID(ctx, templateID)
if err != nil {
return xerrors.Errorf("failed to get template: %w", err)
}
if workspace.OwnerID != database.PrebuildsSystemUserID {
return xerrors.Errorf("prebuilt workspace is not owned by prebuild user anymore, probably it was claimed")
}
c.logger.Info(ctx, "attempting to delete prebuild",
slog.F("workspace_id", prebuiltWorkspaceID.String()), slog.F("preset_id", presetID.String()))
return c.provision(ctx, db, prebuiltWorkspaceID, template, presetID, database.WorkspaceTransitionDelete, workspace)
return c.provisionDelete(ctx, db, prebuiltWorkspaceID, templateID, presetID, DeprovisionModeNormal)
}, &database.TxOptions{
Isolation: sql.LevelRepeatableRead,
ReadOnly: false,
@@ -758,6 +840,7 @@ func (c *StoreReconciler) provision(
presetID uuid.UUID,
transition database.WorkspaceTransition,
workspace database.Workspace,
mode DeprovisionMode,
) error {
tvp, err := db.GetPresetParametersByTemplateVersionID(ctx, template.ActiveVersionID)
if err != nil {
@@ -795,6 +878,11 @@ func (c *StoreReconciler) provision(
builder = builder.RichParameterValues(params)
}
// Use orphan mode for deletes when no Terraform resources exist
if transition == database.WorkspaceTransitionDelete && mode == DeprovisionModeOrphan {
builder = builder.Orphan()
}
_, provisionerJob, _, err := builder.Build(
ctx,
db,
+149 -47
View File
@@ -72,7 +72,8 @@ func TestNoReconciliationActionsIfNoPresets(t *testing.T) {
require.Equal(t, templateVersion, gotTemplateVersion)
// when we trigger the reconciliation loop for all templates
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
// then no reconciliation actions are taken
// because without presets, there are no prebuilds
@@ -126,7 +127,8 @@ func TestNoReconciliationActionsIfNoPrebuilds(t *testing.T) {
require.NotEmpty(t, presetParameters)
// when we trigger the reconciliation loop for all templates
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
// then no reconciliation actions are taken
// because without prebuilds, there is nothing to reconcile
@@ -204,7 +206,10 @@ func TestPrebuildReconciliation(t *testing.T) {
templateDeleted: []bool{false},
},
{
name: "never attempt to interfere with active builds",
// TODO(ssncferreira): Investigate why the GetRunningPrebuiltWorkspaces query is returning 0 rows.
// When a template version is inactive (templateVersionActive = false), any prebuilds in the
// database.ProvisionerJobStatusRunning state should be deleted.
name: "never attempt to interfere with prebuilds from an active template version",
// The workspace builder does not allow scheduling a new build if there is already a build
// pending, running, or canceling. As such, we should never attempt to start, stop or delete
// such prebuilds. Rather, we should wait for the existing build to complete and reconcile
@@ -215,7 +220,7 @@ func TestPrebuildReconciliation(t *testing.T) {
database.ProvisionerJobStatusRunning,
database.ProvisionerJobStatusCanceling,
},
templateVersionActive: []bool{true, false},
templateVersionActive: []bool{true},
shouldDeleteOldPrebuild: ptr.To(false),
templateDeleted: []bool{false},
},
@@ -425,7 +430,8 @@ func (tc testCase) run(t *testing.T) {
// Run the reconciliation multiple times to ensure idempotency
// 8 was arbitrary, but large enough to reasonably trust the result
for i := 1; i <= 8; i++ {
require.NoErrorf(t, controller.ReconcileAll(ctx), "failed on iteration %d", i)
_, err := controller.ReconcileAll(ctx)
require.NoErrorf(t, err, "failed on iteration %d", i)
if tc.shouldCreateNewPrebuild != nil {
newPrebuildCount := 0
@@ -539,7 +545,8 @@ func TestMultiplePresetsPerTemplateVersion(t *testing.T) {
// Run the reconciliation multiple times to ensure idempotency
// 8 was arbitrary, but large enough to reasonably trust the result
for i := 1; i <= 8; i++ {
require.NoErrorf(t, controller.ReconcileAll(ctx), "failed on iteration %d", i)
_, err := controller.ReconcileAll(ctx)
require.NoErrorf(t, err, "failed on iteration %d", i)
newPrebuildCount := 0
workspaces, err := db.GetWorkspacesByTemplateID(ctx, template.ID)
@@ -665,7 +672,7 @@ func TestPrebuildScheduling(t *testing.T) {
DesiredInstances: 5,
})
err := controller.ReconcileAll(ctx)
_, err := controller.ReconcileAll(ctx)
require.NoError(t, err)
// get workspace builds
@@ -748,7 +755,8 @@ func TestInvalidPreset(t *testing.T) {
// Run the reconciliation multiple times to ensure idempotency
// 8 was arbitrary, but large enough to reasonably trust the result
for i := 1; i <= 8; i++ {
require.NoErrorf(t, controller.ReconcileAll(ctx), "failed on iteration %d", i)
_, err := controller.ReconcileAll(ctx)
require.NoErrorf(t, err, "failed on iteration %d", i)
workspaces, err := db.GetWorkspacesByTemplateID(ctx, template.ID)
require.NoError(t, err)
@@ -814,7 +822,8 @@ func TestDeletionOfPrebuiltWorkspaceWithInvalidPreset(t *testing.T) {
})
// Old prebuilt workspace should be deleted.
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
builds, err := db.GetWorkspaceBuildsByWorkspaceID(ctx, database.GetWorkspaceBuildsByWorkspaceIDParams{
WorkspaceID: prebuiltWorkspace.ID,
@@ -913,12 +922,15 @@ func TestSkippingHardLimitedPresets(t *testing.T) {
// Trigger reconciliation to attempt creating a new prebuild.
// The outcome depends on whether the hard limit has been reached.
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
// These two additional calls to ReconcileAll should not trigger any notifications.
// A notification is only sent once.
require.NoError(t, controller.ReconcileAll(ctx))
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
// Verify the final state after reconciliation.
workspaces, err = db.GetWorkspacesByTemplateID(ctx, template.ID)
@@ -1090,12 +1102,15 @@ func TestHardLimitedPresetShouldNotBlockDeletion(t *testing.T) {
// Trigger reconciliation to attempt creating a new prebuild.
// The outcome depends on whether the hard limit has been reached.
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
// These two additional calls to ReconcileAll should not trigger any notifications.
// A notification is only sent once.
require.NoError(t, controller.ReconcileAll(ctx))
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
// Verify the final state after reconciliation.
// When hard limit is reached, no new workspace should be created.
@@ -1138,7 +1153,8 @@ func TestHardLimitedPresetShouldNotBlockDeletion(t *testing.T) {
}
// Trigger reconciliation to make sure that successful, but outdated prebuilt workspace will be deleted.
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
workspaces, err = db.GetWorkspacesByTemplateID(ctx, template.ID)
require.NoError(t, err)
@@ -1737,7 +1753,8 @@ func TestExpiredPrebuildsMultipleActions(t *testing.T) {
}
// Trigger reconciliation to process expired prebuilds and enforce desired state.
require.NoError(t, controller.ReconcileAll(ctx))
_, err = controller.ReconcileAll(ctx)
require.NoError(t, err)
// Sort non-expired workspaces by CreatedAt in ascending order (oldest first)
sort.Slice(nonExpiredWorkspaces, func(i, j int) bool {
@@ -2121,16 +2138,16 @@ func TestCancelPendingPrebuilds(t *testing.T) {
},
}).SkipCreateTemplate().Do()
var workspace dbfake.WorkspaceResponse
var pendingWorkspace dbfake.WorkspaceResponse
if tt.activeTemplateVersion {
// Given: a prebuilt workspace, workspace build and respective provisioner job from an
// active template version
workspace = tt.setupBuild(t, db, client,
pendingWorkspace = tt.setupBuild(t, db, client,
owner.OrganizationID, templateID, activeTemplateVersion.TemplateVersion.ID, activePresetID)
} else {
// Given: a prebuilt workspace, workspace build and respective provisioner job from a
// non-active template version
workspace = tt.setupBuild(t, db, client,
pendingWorkspace = tt.setupBuild(t, db, client,
owner.OrganizationID, templateID, nonActiveTemplateVersion.TemplateVersion.ID, nonActivePresetID)
}
@@ -2142,18 +2159,32 @@ func TestCancelPendingPrebuilds(t *testing.T) {
require.NoError(t, err)
// When: the reconciliation loop is triggered
require.NoError(t, reconciler.ReconcileAll(ctx))
_, err = reconciler.ReconcileAll(ctx)
require.NoError(t, err)
if tt.shouldCancel {
// Then: the prebuild related jobs from non-active version should be canceled
cancelledJob, err := db.GetProvisionerJobByID(ctx, workspace.Build.JobID)
// Then: the pending prebuild job from non-active version should be canceled
cancelledJob, err := db.GetProvisionerJobByID(ctx, pendingWorkspace.Build.JobID)
require.NoError(t, err)
require.Equal(t, clock.Now().UTC(), cancelledJob.CanceledAt.Time.UTC())
require.Equal(t, clock.Now().UTC(), cancelledJob.CompletedAt.Time.UTC())
require.Equal(t, database.ProvisionerJobStatusCanceled, cancelledJob.JobStatus)
// Then: the workspace should be deleted
deletedWorkspace, err := db.GetWorkspaceByID(ctx, pendingWorkspace.Workspace.ID)
require.NoError(t, err)
require.True(t, deletedWorkspace.Deleted)
latestBuild, err := db.GetLatestWorkspaceBuildByWorkspaceID(ctx, deletedWorkspace.ID)
require.NoError(t, err)
require.Equal(t, database.WorkspaceTransitionDelete, latestBuild.Transition)
deleteJob, err := db.GetProvisionerJobByID(ctx, latestBuild.JobID)
require.NoError(t, err)
require.True(t, deleteJob.CompletedAt.Valid)
require.False(t, deleteJob.WorkerID.Valid)
require.Equal(t, database.ProvisionerJobStatusSucceeded, deleteJob.JobStatus)
} else {
// Then: the provisioner job should not be canceled
job, err := db.GetProvisionerJobByID(ctx, workspace.Build.JobID)
// Then: the pending prebuild job should not be canceled
job, err := db.GetProvisionerJobByID(ctx, pendingWorkspace.Build.JobID)
require.NoError(t, err)
if !tt.previouslyCanceled {
require.Zero(t, job.CanceledAt.Time.UTC())
@@ -2162,6 +2193,11 @@ func TestCancelPendingPrebuilds(t *testing.T) {
if !tt.previouslyCompleted {
require.Zero(t, job.CompletedAt.Time.UTC())
}
// Then: the workspace should not be deleted
workspace, err := db.GetWorkspaceByID(ctx, pendingWorkspace.Workspace.ID)
require.NoError(t, err)
require.False(t, workspace.Deleted)
}
})
}
@@ -2235,25 +2271,45 @@ func TestCancelPendingPrebuilds(t *testing.T) {
return prebuilds
}
checkIfJobCanceled := func(
checkIfJobCanceledAndDeleted := func(
t *testing.T,
clock *quartz.Mock,
ctx context.Context,
db database.Store,
shouldBeCanceled bool,
shouldBeCanceledAndDeleted bool,
prebuilds []dbfake.WorkspaceResponse,
) {
for _, prebuild := range prebuilds {
job, err := db.GetProvisionerJobByID(ctx, prebuild.Build.JobID)
pendingJob, err := db.GetProvisionerJobByID(ctx, prebuild.Build.JobID)
require.NoError(t, err)
if shouldBeCanceled {
require.Equal(t, database.ProvisionerJobStatusCanceled, job.JobStatus)
require.Equal(t, clock.Now().UTC(), job.CanceledAt.Time.UTC())
require.Equal(t, clock.Now().UTC(), job.CompletedAt.Time.UTC())
if shouldBeCanceledAndDeleted {
// Pending job should be canceled
require.Equal(t, database.ProvisionerJobStatusCanceled, pendingJob.JobStatus)
require.Equal(t, clock.Now().UTC(), pendingJob.CanceledAt.Time.UTC())
require.Equal(t, clock.Now().UTC(), pendingJob.CompletedAt.Time.UTC())
// Workspace should be deleted
deletedWorkspace, err := db.GetWorkspaceByID(ctx, prebuild.Workspace.ID)
require.NoError(t, err)
require.True(t, deletedWorkspace.Deleted)
latestBuild, err := db.GetLatestWorkspaceBuildByWorkspaceID(ctx, deletedWorkspace.ID)
require.NoError(t, err)
require.Equal(t, database.WorkspaceTransitionDelete, latestBuild.Transition)
deleteJob, err := db.GetProvisionerJobByID(ctx, latestBuild.JobID)
require.NoError(t, err)
require.True(t, deleteJob.CompletedAt.Valid)
require.False(t, deleteJob.WorkerID.Valid)
require.Equal(t, database.ProvisionerJobStatusSucceeded, deleteJob.JobStatus)
} else {
require.NotEqual(t, database.ProvisionerJobStatusCanceled, job.JobStatus)
require.Zero(t, job.CanceledAt.Time.UTC())
// Pending job should not be canceled
require.NotEqual(t, database.ProvisionerJobStatusCanceled, pendingJob.JobStatus)
require.Zero(t, pendingJob.CanceledAt.Time.UTC())
// Workspace should not be deleted
workspace, err := db.GetWorkspaceByID(ctx, prebuild.Workspace.ID)
require.NoError(t, err)
require.False(t, workspace.Deleted)
}
}
}
@@ -2306,28 +2362,74 @@ func TestCancelPendingPrebuilds(t *testing.T) {
templateBVersion3Pending := setupPrebuilds(t, db, owner.OrganizationID, templateBID, templateBVersion3ID, templateBVersion3PresetID, 1, true)
// When: the reconciliation loop is executed
require.NoError(t, reconciler.ReconcileAll(ctx))
_, err := reconciler.ReconcileAll(ctx)
require.NoError(t, err)
// Then: template A version 1 running workspaces should not be canceled
checkIfJobCanceled(t, clock, ctx, db, false, templateAVersion1Running)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, false, templateAVersion1Running)
// Then: template A version 1 pending workspaces should be canceled
checkIfJobCanceled(t, clock, ctx, db, true, templateAVersion1Pending)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, true, templateAVersion1Pending)
// Then: template A version 2 running and pending workspaces should not be canceled
checkIfJobCanceled(t, clock, ctx, db, false, templateAVersion2Running)
checkIfJobCanceled(t, clock, ctx, db, false, templateAVersion2Pending)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, false, templateAVersion2Running)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, false, templateAVersion2Pending)
// Then: template B version 1 running workspaces should not be canceled
checkIfJobCanceled(t, clock, ctx, db, false, templateBVersion1Running)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, false, templateBVersion1Running)
// Then: template B version 1 pending workspaces should be canceled
checkIfJobCanceled(t, clock, ctx, db, true, templateBVersion1Pending)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, true, templateBVersion1Pending)
// Then: template B version 2 pending workspaces should be canceled
checkIfJobCanceled(t, clock, ctx, db, true, templateBVersion2Pending)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, true, templateBVersion2Pending)
// Then: template B version 3 running and pending workspaces should not be canceled
checkIfJobCanceled(t, clock, ctx, db, false, templateBVersion3Running)
checkIfJobCanceled(t, clock, ctx, db, false, templateBVersion3Pending)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, false, templateBVersion3Running)
checkIfJobCanceledAndDeleted(t, clock, ctx, db, false, templateBVersion3Pending)
})
}
func TestReconciliationStats(t *testing.T) {
t.Parallel()
// Setup
clock := quartz.NewReal()
db, ps := dbtestutil.NewDB(t)
client, _, _ := coderdtest.NewWithAPI(t, &coderdtest.Options{
Database: db,
Pubsub: ps,
Clock: clock,
})
fakeEnqueuer := newFakeEnqueuer()
registry := prometheus.NewRegistry()
cache := files.New(registry, &coderdtest.FakeAuthorizer{})
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: false}).Leveled(slog.LevelDebug)
reconciler := prebuilds.NewStoreReconciler(db, ps, cache, codersdk.PrebuildsConfig{}, logger, clock, registry, fakeEnqueuer, newNoopUsageCheckerPtr())
owner := coderdtest.CreateFirstUser(t, client)
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel()
// Create a template version with a preset
dbfake.TemplateVersion(t, db).Seed(database.TemplateVersion{
OrganizationID: owner.OrganizationID,
CreatedBy: owner.UserID,
}).Preset(database.TemplateVersionPreset{
DesiredInstances: sql.NullInt32{
Int32: 1,
Valid: true,
},
}).Do()
// Verify that ReconcileAll tracks and returns elapsed time
start := time.Now()
stats, err := reconciler.ReconcileAll(ctx)
actualElapsed := time.Since(start)
require.NoError(t, err)
require.Greater(t, stats.Elapsed, time.Duration(0))
// Verify stats.Elapsed matches actual execution time
require.InDelta(t, actualElapsed.Milliseconds(), stats.Elapsed.Milliseconds(), 100)
// Verify reconciliation loop is not unexpectedly slow
require.Less(t, stats.Elapsed, 5*time.Second)
}
func newNoopEnqueuer() *notifications.NoopEnqueuer {
return notifications.NewNoopEnqueuer()
}
@@ -2822,7 +2924,7 @@ func TestReconciliationRespectsPauseSetting(t *testing.T) {
_ = setupTestDBPreset(t, db, templateVersionID, 2, "test")
// Initially, reconciliation should create prebuilds
err := reconciler.ReconcileAll(ctx)
_, err := reconciler.ReconcileAll(ctx)
require.NoError(t, err)
// Verify that prebuilds were created
@@ -2849,7 +2951,7 @@ func TestReconciliationRespectsPauseSetting(t *testing.T) {
require.Len(t, workspaces, 0, "prebuilds should be deleted")
// Run reconciliation again - it should be paused and not recreate prebuilds
err = reconciler.ReconcileAll(ctx)
_, err = reconciler.ReconcileAll(ctx)
require.NoError(t, err)
// Verify that no new prebuilds were created because reconciliation is paused
@@ -2862,7 +2964,7 @@ func TestReconciliationRespectsPauseSetting(t *testing.T) {
require.NoError(t, err)
// Run reconciliation again - it should now recreate the prebuilds
err = reconciler.ReconcileAll(ctx)
_, err = reconciler.ReconcileAll(ctx)
require.NoError(t, err)
// Verify that prebuilds were recreated
+1 -1
View File
@@ -1,6 +1,6 @@
module github.com/coder/coder/v2
go 1.24.6
go 1.24.10
// Required until a v3 of chroma is created to lazily initialize all XML files.
// None of our dependencies seem to use the registries anyways, so this
+5 -4
View File
@@ -1894,7 +1894,6 @@ export const EntitlementsWarningHeader = "X-Coder-Entitlements-Warning";
// From codersdk/deployment.go
export type Experiment =
| "aibridge"
| "auto-fill-parameters"
| "example"
| "mcp-server-http"
@@ -1905,7 +1904,6 @@ export type Experiment =
| "workspace-usage";
export const Experiments: Experiment[] = [
"aibridge",
"auto-fill-parameters",
"example",
"mcp-server-http",
@@ -5877,6 +5875,10 @@ export interface Workspace {
* and IsPrebuild returns false.
*/
readonly is_prebuild: boolean;
/**
* TaskID, if set, indicates that the workspace is relevant to the given codersdk.Task.
*/
readonly task_id?: string;
}
// From codersdk/workspaces.go
@@ -6397,10 +6399,9 @@ export interface WorkspaceBuild {
readonly template_version_preset_id: string | null;
readonly has_ai_task?: boolean;
/**
* Deprecated: This field has been replaced with `TaskAppID`
* Deprecated: This field has been replaced with `Task.WorkspaceAppID`
*/
readonly ai_task_sidebar_app_id?: string;
readonly task_app_id?: string;
readonly has_external_agent?: boolean;
}
@@ -480,3 +480,123 @@ export const CheckExternalAuthOnChangingVersions: Story = {
});
},
};
export const CheckPresetsWhenChangingTemplate: Story = {
args: {
templates: [
{
...MockTemplate,
id: "claude-code",
name: "claude-code",
display_name: "Claude Code",
active_version_id: "claude-code-version",
},
{
...MockTemplate,
id: "codex",
name: "codex",
display_name: "Codex",
active_version_id: "codex-version",
},
],
},
beforeEach: () => {
spyOn(API, "getTemplateVersionPresets").mockImplementation((versionId) => {
if (versionId === "claude-code-version") {
return Promise.resolve([
{
...MockPresets[0],
ID: "claude-code-preset-1",
Name: "Claude Code Dev",
},
]);
}
if (versionId === "codex-version") {
return Promise.resolve([
{
...MockPresets[0],
ID: "codex-preset-1",
Name: "Codex Dev",
},
]);
}
return Promise.resolve([]);
});
spyOn(API, "getTemplateVersions").mockImplementation((templateId) => {
if (templateId === "claude-code") {
return Promise.resolve([
{
...MockTemplateVersion,
id: "claude-code-version",
name: "claude-code-version",
},
]);
}
if (templateId === "codex") {
return Promise.resolve([
{
...MockTemplateVersion,
id: "codex-version",
name: "codex-version",
},
]);
}
return Promise.resolve([]);
});
},
play: async ({ canvasElement, step }) => {
const canvas = within(canvasElement);
const body = within(canvasElement.ownerDocument.body);
await step("Presets are initially present", async () => {
const presetSelect = await canvas.findByLabelText(/preset/i);
await userEvent.click(presetSelect);
const options = await body.findAllByRole("option");
expect(options).toHaveLength(1);
expect(options[0]).toContainHTML("Claude Code Dev");
await userEvent.click(options[0]);
});
await step("Switch template", async () => {
const templateSelect = await canvas.findByLabelText(/select template/i);
await userEvent.click(templateSelect);
const codexTemplateOption = await body.findByRole("option", {
name: /codex/i,
});
await userEvent.click(codexTemplateOption);
});
await step("Presets are present in new template", async () => {
const presetSelect = await canvas.findByLabelText(/preset/i);
await userEvent.click(presetSelect);
const options = await body.findAllByRole("option");
expect(options).toHaveLength(1);
expect(options[0]).toContainHTML("Codex Dev");
await userEvent.click(options[0]);
});
await step("Switch template back", async () => {
const templateSelect = await canvas.findByLabelText(/select template/i);
await userEvent.click(templateSelect);
const codexTemplateOption = await body.findByRole("option", {
name: /claude code/i,
});
await userEvent.click(codexTemplateOption);
});
await step("Presets are present in original template", async () => {
const presetSelect = await canvas.findByLabelText(/preset/i);
await userEvent.click(presetSelect);
const options = await body.findAllByRole("option");
expect(options).toHaveLength(1);
expect(options[0]).toContainHTML("Claude Code Dev");
});
},
};
@@ -270,7 +270,12 @@ const CreateTaskForm: FC<CreateTaskFormProps> = ({ templates, onSuccess }) => {
</label>
<Select
name="templateID"
onValueChange={(value) => setSelectedTemplateId(value)}
onValueChange={(value) => {
setSelectedTemplateId(value);
if (value !== selectedTemplateId) {
setSelectedPresetId(undefined);
}
}}
defaultValue={templates[0].id}
required
>
@@ -1,4 +1,8 @@
import { MockFailedWorkspace, MockWorkspace } from "testHelpers/entities";
import {
MockFailedWorkspace,
MockTaskWorkspace,
MockWorkspace,
} from "testHelpers/entities";
import type { Meta, StoryObj } from "@storybook/react-vite";
import { daysAgo } from "utils/time";
import { WorkspaceDeleteDialog } from "./WorkspaceDeleteDialog";
@@ -45,3 +49,9 @@ export const UnhealthyAdminView: Story = {
canDeleteFailedWorkspace: true,
},
};
export const WithTask: Story = {
args: {
workspace: MockTaskWorkspace,
},
};

Some files were not shown because too many files have changed in this diff Show More