Compare commits

...

22 Commits

Author SHA1 Message Date
blinkagent[bot] a7e9dfa7dc feat: add user:read scope (#23348) (#23841)
Co-authored-by: Kerem Kacel <keremkacel@gmail.com>
2026-04-01 16:29:11 +05:00
Jakub Domeracki 8885d180fe chore: remove trivy GHA job (backport v2.31) (#23858) 2026-04-01 12:32:24 +05:00
Paweł Banaszewski 58446cc865 fix: update aibridge library to include AWS Bedrock fixes (#23801)
Updates aibridge library to include Bedrock fixes.
Contains fixes to https://github.com/coder/aibridge/issues/219 and
https://github.com/coder/aibridge/issues/221
2026-03-31 15:53:46 +02:00
Rowan Smith f7650296ce chore: switch agent gone response from 502 to 404 (backport #23090) (#23635)
Backport of #23090 to `release/2.31`.

When a user creates a workspace, opens the web terminal, then the
workspace stops but the web terminal remains open, the web terminal will
retry the connection. Coder would issue a HTTP 502 Bad Gateway response
when this occurred because coderd could not connect to the workspace
agent, however this is problematic as any load balancer sitting in front
of Coder sees a 502 and thinks Coder is unhealthy.

This PR changes the response to a HTTP 404 after internal discussion.

Cherry-picked from merge commit
c33812a430.
2026-03-25 16:49:28 -04:00
Charlie Voiselle e419eb3101 fix: open coder_app links in new tab when open_in is tab (cherry-pick #23000) (#23620)
Cherry-pick of #23000 onto release/2.31.

Co-authored-by: Kayla はな <kayla@tree.camp>
2026-03-25 15:32:22 -04:00
Ethan 1a774ab7ce fix(tailnet): retry after transport dial timeouts (#22977) (cherry-pick/v2.31) (#22992)
Backport of #22977 to 2.31
2026-03-13 14:26:48 -04:00
Rowan Smith 581e956b49 fix: prevent ui error when last org member is removed (#23019)
Backport of #22975 to release/2.31.
2026-03-13 14:22:40 -04:00
Jon Ayers 2cd4e03f11 fix: prevent emitting build duration metric for devcontainer subagents (#22930) 2026-03-10 20:31:05 -05:00
Susana Ferreira 61b513e586 fix: bump aibridge to v1.0.9 to forward Anthropic-Beta header (#22842)
Bumps aibridge to v1.0.9, which forwards the `Anthropic-Beta` header
from client requests to the upstream Anthropic API:
https://github.com/coder/aibridge/pull/205

This fixes the `context_management: Extra inputs are not permitted`
error when using Claude Code with AI Bridge.

Note: v1.0.8 was retracted due to a conflict marker cached by the Go
module proxy https://github.com/coder/aibridge/pull/208. v1.0.9 contains
the same fix.
2026-03-10 15:52:04 -04:00
Jon Ayers 757634c720 fix: filter sub-agents from build duration metric (#22732) (#22919) 2026-03-10 14:11:01 -05:00
Jon Ayers a3792153de feat: add Prometheus collector for DERP server expvar metrics (#22583) (#22917)
backports the derp prometheus metrics
2026-03-10 12:29:15 -05:00
Steven Masley deaacff843 fix: early oidc refresh with fake idp tests (#22712) (cherry 2.31) (#22716)
Confirmed manually using this branch with 5min tokens (always refreshed)
and 15min tokens (refreshed after 5min elapsed)
2026-03-06 14:33:33 -05:00
Steven Masley 2828d28e0c chore: prematurely refresh oidc token near expiry during workspace (cherry 2.31) (#22606)
(cherry picked from commit f49dea683c)
2026-03-04 10:55:40 -06:00
Garrett Delfosse 4b95b8b4f9 fix(coderd): add organization_name label to insights Prometheus metrics (cherry-pick #22296) (#22566)
Backport of #22296 to release/2.31.
2026-03-03 14:15:20 -05:00
Ehab Younes 3a061ccb21 refactor(site): use dedicated task pause/resume API endpoints (#22303) (cherry-pick/v2.31) (#22326)
Switch from workspace stop/start operations to the dedicated tasks pause
and resume endpoints for cleaner semantics.

(cherry picked from commit bf639d0016)

<!--

If you have used AI to produce some or all of this PR, please ensure you
have read our [AI Contribution
guidelines](https://coder.com/docs/about/contributing/AI_CONTRIBUTING)
before submitting.

-->
2026-03-03 13:14:19 -06:00
Ehab Younes 22c2da53e9 fix: register task pause/resume routes under /api/v2 (#22544) (#22550)
The pause/resume endpoints were only registered under /api/experimental
but the frontend and Go SDK were calling /api/v2, resulting in 404s.
Register the routes in the v2 group, update the SDK client paths, and
fix swagger annotations (Accept → Produce) since these POST endpoints
have no request body.

(cherry picked from commit 9d2aed88c4)
2026-03-03 13:13:48 -06:00
Kayla はな ccb529e98a fix: disable sharing ui when sharing is unavailable (#22390) (#22561) 2026-03-03 11:07:10 -07:00
Rowan Smith 107fd97a61 fix: avoid derp-related panic during wsproxy registration (backport release/2.31) (#22526)
Backport of #22322.

- Cherry-picked 7f03bd7.

Co-authored-by: Dean Sheather <dean@deansheather.com>
2026-03-03 13:46:42 +05:00
Jakub Domeracki 955637a79d fix(codersdk): use header auth for non-browser websocket dials (#22461) (cherry-pick/v2.31) (#22508)
Cherry-pick of #22461 to `release/2.31`.

Applies the non-browser websocket auth principle from #22226 to
remaining
`codersdk` websocket callsites, replacing cookie-jar session auth with
header-token auth. Fixes `401` failures on deployments with
`--host-prefix-cookie` enabled.

Closes #22461 (cherry-pick)

---------

Co-authored-by: ethan <ethanndickson@gmail.com>
2026-03-02 20:40:43 +01:00
Cian Johnston 85f1d70c4f ci: add temporary deploy override (#22378) (#22475)
Temporary override for deploying `main` to `dev.coder.com`.

(cherry picked from commit 67da4e8b56)
2026-03-02 13:58:06 +00:00
Cian Johnston e9e438b06e fix(stringutil): operate on runes instead of bytes in Truncate (#22388) (#22469)
Fixes https://github.com/coder/coder/issues/22375

Updates `stringutil.Truncate` to properly handle multi-byte UTF-8
characters.
Adds tests for multi-byte truncation with word boundary.

Created by Mux using Opus 4.6

(cherry picked from commit 0cfa03718e)
2026-03-02 11:19:16 +00:00
Steven Masley c339aa99ee chore: use header auth over cookies for agents (#22226) (cherry-pick/v2.31) (#22313)
All non-browser connections should not use cookies

(cherry picked from commit 3353e687e7)
2026-02-26 11:11:00 -06:00
82 changed files with 2328 additions and 503 deletions
-113
View File
@@ -63,116 +63,3 @@ jobs:
--data "{\"content\": \"$msg\"}" \
"${{ secrets.SLACK_SECURITY_FAILURE_WEBHOOK_URL }}"
trivy:
permissions:
security-events: write
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2
with:
egress-policy: audit
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
persist-credentials: false
- name: Setup Go
uses: ./.github/actions/setup-go
- name: Setup Node
uses: ./.github/actions/setup-node
- name: Setup sqlc
uses: ./.github/actions/setup-sqlc
- name: Install cosign
uses: ./.github/actions/install-cosign
- name: Install syft
uses: ./.github/actions/install-syft
- name: Install yq
run: go run github.com/mikefarah/yq/v4@v4.44.3
- name: Install mockgen
run: ./.github/scripts/retry.sh -- go install go.uber.org/mock/mockgen@v0.6.0
- name: Install protoc-gen-go
run: ./.github/scripts/retry.sh -- go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
- name: Install protoc-gen-go-drpc
run: ./.github/scripts/retry.sh -- go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.34
- name: Install Protoc
run: |
# protoc must be in lockstep with our dogfood Dockerfile or the
# version in the comments will differ. This is also defined in
# ci.yaml.
set -euxo pipefail
cd dogfood/coder
mkdir -p /usr/local/bin
mkdir -p /usr/local/include
DOCKER_BUILDKIT=1 docker build . --target proto -t protoc
protoc_path=/usr/local/bin/protoc
docker run --rm --entrypoint cat protoc /tmp/bin/protoc > $protoc_path
chmod +x $protoc_path
protoc --version
# Copy the generated files to the include directory.
docker run --rm -v /usr/local/include:/target protoc cp -r /tmp/include/google /target/
ls -la /usr/local/include/google/protobuf/
stat /usr/local/include/google/protobuf/timestamp.proto
- name: Build Coder linux amd64 Docker image
id: build
run: |
set -euo pipefail
version="$(./scripts/version.sh)"
image_job="build/coder_${version}_linux_amd64.tag"
# This environment variable force make to not build packages and
# archives (which the Docker image depends on due to technical reasons
# related to concurrent FS writes).
export DOCKER_IMAGE_NO_PREREQUISITES=true
# This environment variables forces scripts/build_docker.sh to build
# the base image tag locally instead of using the cached version from
# the registry.
CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
export CODER_IMAGE_BUILD_BASE_TAG
# We would like to use make -j here, but it doesn't work with the some recent additions
# to our code generation.
make "$image_job"
echo "image=$(cat "$image_job")" >> "$GITHUB_OUTPUT"
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@c1824fd6edce30d7ab345a9989de00bbd46ef284 # v0.34.0
with:
image-ref: ${{ steps.build.outputs.image }}
format: sarif
output: trivy-results.sarif
severity: "CRITICAL,HIGH"
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v3.29.5
with:
sarif_file: trivy-results.sarif
category: "Trivy"
- name: Upload Trivy scan results as an artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: trivy
path: trivy-results.sarif
retention-days: 7
- name: Send Slack notification on failure
if: ${{ failure() }}
run: |
msg="❌ Trivy Failed\n\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
curl \
-qfsSL \
-X POST \
-H "Content-Type: application/json" \
--data "{\"content\": \"$msg\"}" \
"${{ secrets.SLACK_SECURITY_FAILURE_WEBHOOK_URL }}"
+56
View File
@@ -3040,6 +3040,62 @@ func TestAgent_Reconnect(t *testing.T) {
closer.Close()
}
func TestAgent_ReconnectNoLifecycleReemit(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
logger := testutil.Logger(t)
fCoordinator := tailnettest.NewFakeCoordinator()
agentID := uuid.New()
statsCh := make(chan *proto.Stats, 50)
derpMap, _ := tailnettest.RunDERPAndSTUN(t)
client := agenttest.NewClient(t,
logger,
agentID,
agentsdk.Manifest{
DERPMap: derpMap,
Scripts: []codersdk.WorkspaceAgentScript{{
Script: "echo hello",
Timeout: 30 * time.Second,
RunOnStart: true,
}},
},
statsCh,
fCoordinator,
)
defer client.Close()
closer := agent.New(agent.Options{
Client: client,
Logger: logger.Named("agent"),
})
defer closer.Close()
// Wait for the agent to reach Ready state.
require.Eventually(t, func() bool {
return slices.Contains(client.GetLifecycleStates(), codersdk.WorkspaceAgentLifecycleReady)
}, testutil.WaitShort, testutil.IntervalFast)
statesBefore := slices.Clone(client.GetLifecycleStates())
// Disconnect by closing the coordinator response channel.
call1 := testutil.RequireReceive(ctx, t, fCoordinator.CoordinateCalls)
close(call1.Resps)
// Wait for reconnect.
testutil.RequireReceive(ctx, t, fCoordinator.CoordinateCalls)
// Wait for a stats report as a deterministic steady-state proof.
testutil.RequireReceive(ctx, t, statsCh)
statesAfter := client.GetLifecycleStates()
require.Equal(t, statesBefore, statesAfter,
"lifecycle states should not be re-reported after reconnect")
closer.Close()
}
func TestAgent_WriteVSCodeConfigs(t *testing.T) {
t.Parallel()
logger := testutil.Logger(t)
+6 -3
View File
@@ -134,9 +134,12 @@ func (a *LifecycleAPI) UpdateLifecycle(ctx context.Context, req *agentproto.Upda
case database.WorkspaceAgentLifecycleStateReady,
database.WorkspaceAgentLifecycleStateStartTimeout,
database.WorkspaceAgentLifecycleStateStartError:
a.emitMetricsOnce.Do(func() {
a.emitBuildDurationMetric(ctx, workspaceAgent.ResourceID)
})
// Only emit metrics for the parent agent, this metric is not intended to measure devcontainer durations.
if !workspaceAgent.ParentID.Valid {
a.emitMetricsOnce.Do(func() {
a.emitBuildDurationMetric(ctx, workspaceAgent.ResourceID)
})
}
}
return req.Lifecycle, nil
+58
View File
@@ -582,6 +582,64 @@ func TestUpdateLifecycle(t *testing.T) {
require.Equal(t, uint64(1), got.GetSampleCount())
require.Equal(t, expectedDuration, got.GetSampleSum())
})
t.Run("SubAgentDoesNotEmitMetric", func(t *testing.T) {
t.Parallel()
parentID := uuid.New()
subAgent := database.WorkspaceAgent{
ID: uuid.New(),
ParentID: uuid.NullUUID{UUID: parentID, Valid: true},
LifecycleState: database.WorkspaceAgentLifecycleStateStarting,
StartedAt: sql.NullTime{Valid: true, Time: someTime},
ReadyAt: sql.NullTime{Valid: false},
}
lifecycle := &agentproto.Lifecycle{
State: agentproto.Lifecycle_READY,
ChangedAt: timestamppb.New(now),
}
dbM := dbmock.NewMockStore(gomock.NewController(t))
dbM.EXPECT().UpdateWorkspaceAgentLifecycleStateByID(gomock.Any(), database.UpdateWorkspaceAgentLifecycleStateByIDParams{
ID: subAgent.ID,
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
StartedAt: subAgent.StartedAt,
ReadyAt: sql.NullTime{
Time: now,
Valid: true,
},
}).Return(nil)
// GetWorkspaceBuildMetricsByResourceID should NOT be called
// because sub-agents should be skipped before querying.
reg := prometheus.NewRegistry()
metrics := agentapi.NewLifecycleMetrics(reg)
api := &agentapi.LifecycleAPI{
AgentFn: func(ctx context.Context) (database.WorkspaceAgent, error) {
return subAgent, nil
},
WorkspaceID: workspaceID,
Database: dbM,
Log: testutil.Logger(t),
Metrics: metrics,
PublishWorkspaceUpdateFn: nil,
}
resp, err := api.UpdateLifecycle(context.Background(), &agentproto.UpdateLifecycleRequest{
Lifecycle: lifecycle,
})
require.NoError(t, err)
require.Equal(t, lifecycle, resp)
// We don't expect the metric to be emitted for sub-agents, by default this will fail anyway but it doesn't hurt
// to document the test explicitly.
dbM.EXPECT().GetWorkspaceBuildMetricsByResourceID(gomock.Any(), gomock.Any()).Times(0)
// If we were emitting the metric we would have failed by now since it would include a call to the database that we're not expecting.
pm, err := reg.Gather()
require.NoError(t, err)
for _, m := range pm {
if m.GetName() == fullMetricName {
t.Fatal("metric should not be emitted for sub-agent")
}
}
})
}
func TestUpdateStartup(t *testing.T) {
+2 -2
View File
@@ -1248,7 +1248,7 @@ func (api *API) postWorkspaceAgentTaskLogSnapshot(rw http.ResponseWriter, r *htt
// @Summary Pause task
// @ID pause-task
// @Security CoderSessionToken
// @Accept json
// @Produce json
// @Tags Tasks
// @Param user path string true "Username, user ID, or 'me' for the authenticated user"
// @Param task path string true "Task ID" format(uuid)
@@ -1325,7 +1325,7 @@ func (api *API) pauseTask(rw http.ResponseWriter, r *http.Request) {
// @Summary Resume task
// @ID resume-task
// @Security CoderSessionToken
// @Accept json
// @Produce json
// @Tags Tasks
// @Param user path string true "Username, user ID, or 'me' for the authenticated user"
// @Param task path string true "Task ID" format(uuid)
+2 -2
View File
@@ -5894,7 +5894,7 @@ const docTemplate = `{
"CoderSessionToken": []
}
],
"consumes": [
"produces": [
"application/json"
],
"tags": [
@@ -5936,7 +5936,7 @@ const docTemplate = `{
"CoderSessionToken": []
}
],
"consumes": [
"produces": [
"application/json"
],
"tags": [
+2 -2
View File
@@ -5213,7 +5213,7 @@
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Tasks"],
"summary": "Pause task",
"operationId": "pause-task",
@@ -5251,7 +5251,7 @@
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Tasks"],
"summary": "Resume task",
"operationId": "resume-task",
+10 -5
View File
@@ -98,6 +98,7 @@ import (
"github.com/coder/coder/v2/provisionersdk"
"github.com/coder/coder/v2/site"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/derpmetrics"
"github.com/coder/quartz"
"github.com/coder/serpent"
)
@@ -329,9 +330,10 @@ func New(options *Options) *API {
panic("developer error: options.PrometheusRegistry is nil and not running a unit test")
}
if options.DeploymentValues.DisableOwnerWorkspaceExec {
if options.DeploymentValues.DisableOwnerWorkspaceExec || options.DeploymentValues.DisableWorkspaceSharing {
rbac.ReloadBuiltinRoles(&rbac.RoleOptions{
NoOwnerWorkspaceExec: true,
NoOwnerWorkspaceExec: bool(options.DeploymentValues.DisableOwnerWorkspaceExec),
NoWorkspaceSharing: bool(options.DeploymentValues.DisableWorkspaceSharing),
})
}
@@ -882,17 +884,18 @@ func New(options *Options) *API {
apiRateLimiter := httpmw.RateLimit(options.APIRateLimit, time.Minute)
// Register DERP on expvar HTTP handler, which we serve below in the router, c.f. expvar.Handler()
// These are the metrics the DERP server exposes.
// TODO: export via prometheus
expDERPOnce.Do(func() {
// We need to do this via a global Once because expvar registry is global and panics if we
// register multiple times. In production there is only one Coderd and one DERP server per
// process, but in testing, we create multiple of both, so the Once protects us from
// panicking.
if options.DERPServer != nil {
if options.DERPServer != nil && expvar.Get("derp") == nil {
expvar.Publish("derp", api.DERPServer.ExpVar())
}
})
if options.PrometheusRegistry != nil && options.DERPServer != nil {
options.PrometheusRegistry.MustRegister(derpmetrics.NewDERPExpvarCollector(options.DERPServer))
}
cors := httpmw.Cors(options.DeploymentValues.Dangerous.AllowAllCors.Value())
prometheusMW := httpmw.Prometheus(options.PrometheusRegistry)
@@ -1734,6 +1737,8 @@ func New(options *Options) *API {
r.Patch("/input", api.taskUpdateInput)
r.Post("/send", api.taskSend)
r.Get("/logs", api.taskLogs)
r.Post("/pause", api.pauseTask)
r.Post("/resume", api.resumeTask)
})
})
})
+28 -2
View File
@@ -384,9 +384,35 @@ func TestCSRFExempt(t *testing.T) {
data, _ := io.ReadAll(resp.Body)
_ = resp.Body.Close()
// A StatusBadGateway means Coderd tried to proxy to the agent and failed because the agent
// A StatusNotFound means Coderd tried to proxy to the agent and failed because the agent
// was not there. This means CSRF did not block the app request, which is what we want.
require.Equal(t, http.StatusBadGateway, resp.StatusCode, "status code 500 is CSRF failure")
require.Equal(t, http.StatusNotFound, resp.StatusCode, "status code 500 is CSRF failure")
require.NotContains(t, string(data), "CSRF")
})
}
func TestDERPMetrics(t *testing.T) {
t.Parallel()
_, _, api := coderdtest.NewWithAPI(t, nil)
require.NotNil(t, api.Options.DERPServer, "DERP server should be configured")
require.NotNil(t, api.Options.PrometheusRegistry, "Prometheus registry should be configured")
// The registry is created internally by coderd. Gather from it
// to verify DERP metrics were registered during startup.
metrics, err := api.Options.PrometheusRegistry.Gather()
require.NoError(t, err)
names := make(map[string]struct{})
for _, m := range metrics {
names[m.GetName()] = struct{}{}
}
assert.Contains(t, names, "coder_derp_server_connections",
"expected coder_derp_server_connections to be registered")
assert.Contains(t, names, "coder_derp_server_bytes_received_total",
"expected coder_derp_server_bytes_received_total to be registered")
assert.Contains(t, names, "coder_derp_server_packets_dropped_reason_total",
"expected coder_derp_server_packets_dropped_reason_total to be registered")
}
+14 -2
View File
@@ -106,6 +106,8 @@ import (
"github.com/coder/quartz"
)
const DefaultDERPMeshKey = "test-key"
const defaultTestDaemonName = "test-daemon"
type Options struct {
@@ -512,8 +514,18 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
stunAddresses = options.DeploymentValues.DERP.Server.STUNAddresses.Value()
}
derpServer := derp.NewServer(key.NewNode(), tailnet.Logger(options.Logger.Named("derp").Leveled(slog.LevelDebug)))
derpServer.SetMeshKey("test-key")
const derpMeshKey = "test-key"
// Technically AGPL coderd servers don't set this value, but it doesn't
// change any behavior. It's useful for enterprise tests.
err = options.Database.InsertDERPMeshKey(dbauthz.AsSystemRestricted(ctx), derpMeshKey) //nolint:gocritic // test
if !database.IsUniqueViolation(err, database.UniqueSiteConfigsKeyKey) {
require.NoError(t, err, "insert DERP mesh key")
}
var derpServer *derp.Server
if options.DeploymentValues.DERP.Server.Enable.Value() {
derpServer = derp.NewServer(key.NewNode(), tailnet.Logger(options.Logger.Named("derp").Leveled(slog.LevelDebug)))
derpServer.SetMeshKey(derpMeshKey)
}
// match default with cli default
if options.SSHKeygenAlgorithm == "" {
+120
View File
@@ -8742,3 +8742,123 @@ func TestInsertWorkspaceAgentDevcontainers(t *testing.T) {
})
}
}
func TestGetWorkspaceBuildMetricsByResourceID(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
ctx := context.Background()
org := dbgen.Organization(t, db, database.Organization{})
user := dbgen.User(t, db, database.User{})
tmpl := dbgen.Template(t, db, database.Template{
OrganizationID: org.ID,
CreatedBy: user.ID,
})
tv := dbgen.TemplateVersion(t, db, database.TemplateVersion{
OrganizationID: org.ID,
TemplateID: uuid.NullUUID{UUID: tmpl.ID, Valid: true},
CreatedBy: user.ID,
})
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
OrganizationID: org.ID,
TemplateID: tmpl.ID,
OwnerID: user.ID,
AutomaticUpdates: database.AutomaticUpdatesNever,
})
job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
WorkspaceID: ws.ID,
TemplateVersionID: tv.ID,
JobID: job.ID,
InitiatorID: user.ID,
})
resource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
JobID: job.ID,
})
parentReadyAt := dbtime.Now()
parentStartedAt := parentReadyAt.Add(-time.Second)
_ = dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
ResourceID: resource.ID,
StartedAt: sql.NullTime{Time: parentStartedAt, Valid: true},
ReadyAt: sql.NullTime{Time: parentReadyAt, Valid: true},
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
})
row, err := db.GetWorkspaceBuildMetricsByResourceID(ctx, resource.ID)
require.NoError(t, err)
require.True(t, row.AllAgentsReady)
require.True(t, parentReadyAt.Equal(row.LastAgentReadyAt))
require.Equal(t, "success", row.WorstStatus)
})
t.Run("SubAgentExcluded", func(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
ctx := context.Background()
org := dbgen.Organization(t, db, database.Organization{})
user := dbgen.User(t, db, database.User{})
tmpl := dbgen.Template(t, db, database.Template{
OrganizationID: org.ID,
CreatedBy: user.ID,
})
tv := dbgen.TemplateVersion(t, db, database.TemplateVersion{
OrganizationID: org.ID,
TemplateID: uuid.NullUUID{UUID: tmpl.ID, Valid: true},
CreatedBy: user.ID,
})
ws := dbgen.Workspace(t, db, database.WorkspaceTable{
OrganizationID: org.ID,
TemplateID: tmpl.ID,
OwnerID: user.ID,
AutomaticUpdates: database.AutomaticUpdatesNever,
})
job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: org.ID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
WorkspaceID: ws.ID,
TemplateVersionID: tv.ID,
JobID: job.ID,
InitiatorID: user.ID,
})
resource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
JobID: job.ID,
})
parentReadyAt := dbtime.Now()
parentStartedAt := parentReadyAt.Add(-time.Second)
parentAgent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
ResourceID: resource.ID,
StartedAt: sql.NullTime{Time: parentStartedAt, Valid: true},
ReadyAt: sql.NullTime{Time: parentReadyAt, Valid: true},
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
})
// Sub-agent with ready_at 1 hour later should be excluded.
subAgentReadyAt := parentReadyAt.Add(time.Hour)
subAgentStartedAt := subAgentReadyAt.Add(-time.Second)
_ = dbgen.WorkspaceSubAgent(t, db, parentAgent, database.WorkspaceAgent{
StartedAt: sql.NullTime{Time: subAgentStartedAt, Valid: true},
ReadyAt: sql.NullTime{Time: subAgentReadyAt, Valid: true},
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
})
row, err := db.GetWorkspaceBuildMetricsByResourceID(ctx, resource.ID)
require.NoError(t, err)
require.True(t, row.AllAgentsReady)
// LastAgentReadyAt should be the parent's, not the sub-agent's.
require.True(t, parentReadyAt.Equal(row.LastAgentReadyAt))
require.Equal(t, "success", row.WorstStatus)
})
}
+1 -1
View File
@@ -21599,7 +21599,7 @@ JOIN workspaces w ON wb.workspace_id = w.id
JOIN templates t ON w.template_id = t.id
JOIN organizations o ON t.organization_id = o.id
JOIN workspace_resources wr ON wr.job_id = wb.job_id
JOIN workspace_agents wa ON wa.resource_id = wr.id
JOIN workspace_agents wa ON wa.resource_id = wr.id AND wa.parent_id IS NULL
WHERE wb.job_id = (SELECT job_id FROM workspace_resources WHERE workspace_resources.id = $1)
GROUP BY wb.created_at, wb.transition, t.name, o.name, w.owner_id
`
+1 -1
View File
@@ -268,7 +268,7 @@ JOIN workspaces w ON wb.workspace_id = w.id
JOIN templates t ON w.template_id = t.id
JOIN organizations o ON t.organization_id = o.id
JOIN workspace_resources wr ON wr.job_id = wb.job_id
JOIN workspace_agents wa ON wa.resource_id = wr.id
JOIN workspace_agents wa ON wa.resource_id = wr.id AND wa.parent_id IS NULL
WHERE wb.job_id = (SELECT job_id FROM workspace_resources WHERE workspace_resources.id = $1)
GROUP BY wb.created_at, wb.transition, t.name, o.name, w.owner_id;
+9
View File
@@ -287,9 +287,18 @@ func (api *API) paginatedMembers(rw http.ResponseWriter, r *http.Request) {
memberRows = append(memberRows, row)
}
if len(paginatedMemberRows) == 0 {
httpapi.Write(ctx, rw, http.StatusOK, codersdk.PaginatedMembersResponse{
Members: []codersdk.OrganizationMemberWithUserData{},
Count: 0,
})
return
}
members, err := convertOrganizationMembersWithUserData(ctx, api.Database, memberRows)
if err != nil {
httpapi.InternalServerError(rw, err)
return
}
resp := codersdk.PaginatedMembersResponse{
@@ -19,9 +19,9 @@ import (
)
var (
templatesActiveUsersDesc = prometheus.NewDesc("coderd_insights_templates_active_users", "The number of active users of the template.", []string{"template_name"}, nil)
applicationsUsageSecondsDesc = prometheus.NewDesc("coderd_insights_applications_usage_seconds", "The application usage per template.", []string{"template_name", "application_name", "slug"}, nil)
parametersDesc = prometheus.NewDesc("coderd_insights_parameters", "The parameter usage per template.", []string{"template_name", "parameter_name", "parameter_type", "parameter_value"}, nil)
templatesActiveUsersDesc = prometheus.NewDesc("coderd_insights_templates_active_users", "The number of active users of the template.", []string{"template_name", "organization_name"}, nil)
applicationsUsageSecondsDesc = prometheus.NewDesc("coderd_insights_applications_usage_seconds", "The application usage per template.", []string{"template_name", "application_name", "slug", "organization_name"}, nil)
parametersDesc = prometheus.NewDesc("coderd_insights_parameters", "The parameter usage per template.", []string{"template_name", "parameter_name", "parameter_type", "parameter_value", "organization_name"}, nil)
)
type MetricsCollector struct {
@@ -38,7 +38,8 @@ type insightsData struct {
apps []database.GetTemplateAppInsightsByTemplateRow
params []parameterRow
templateNames map[uuid.UUID]string
templateNames map[uuid.UUID]string
organizationNames map[uuid.UUID]string // template ID → org name
}
type parameterRow struct {
@@ -137,6 +138,7 @@ func (mc *MetricsCollector) Run(ctx context.Context) (func(), error) {
templateIDs := uniqueTemplateIDs(templateInsights, appInsights, paramInsights)
templateNames := make(map[uuid.UUID]string, len(templateIDs))
organizationNames := make(map[uuid.UUID]string, len(templateIDs))
if len(templateIDs) > 0 {
templates, err := mc.database.GetTemplatesWithFilter(ctx, database.GetTemplatesWithFilterParams{
IDs: templateIDs,
@@ -146,6 +148,31 @@ func (mc *MetricsCollector) Run(ctx context.Context) (func(), error) {
return
}
templateNames = onlyTemplateNames(templates)
// Build org name lookup so that metrics can
// distinguish templates with the same name across
// different organizations.
orgIDs := make([]uuid.UUID, 0, len(templates))
for _, t := range templates {
orgIDs = append(orgIDs, t.OrganizationID)
}
orgIDs = slice.Unique(orgIDs)
orgs, err := mc.database.GetOrganizations(ctx, database.GetOrganizationsParams{
IDs: orgIDs,
})
if err != nil {
mc.logger.Error(ctx, "unable to fetch organizations from database", slog.Error(err))
return
}
orgNameByID := make(map[uuid.UUID]string, len(orgs))
for _, o := range orgs {
orgNameByID[o.ID] = o.Name
}
organizationNames = make(map[uuid.UUID]string, len(templates))
for _, t := range templates {
organizationNames[t.ID] = orgNameByID[t.OrganizationID]
}
}
// Refresh the collector state
@@ -154,7 +181,8 @@ func (mc *MetricsCollector) Run(ctx context.Context) (func(), error) {
apps: appInsights,
params: paramInsights,
templateNames: templateNames,
templateNames: templateNames,
organizationNames: organizationNames,
})
}
@@ -194,44 +222,46 @@ func (mc *MetricsCollector) Collect(metricsCh chan<- prometheus.Metric) {
// Custom apps
for _, appRow := range data.apps {
metricsCh <- prometheus.MustNewConstMetric(applicationsUsageSecondsDesc, prometheus.GaugeValue, float64(appRow.UsageSeconds), data.templateNames[appRow.TemplateID],
appRow.DisplayName, appRow.SlugOrPort)
appRow.DisplayName, appRow.SlugOrPort, data.organizationNames[appRow.TemplateID])
}
// Built-in apps
for _, templateRow := range data.templates {
orgName := data.organizationNames[templateRow.TemplateID]
metricsCh <- prometheus.MustNewConstMetric(applicationsUsageSecondsDesc, prometheus.GaugeValue,
float64(templateRow.UsageVscodeSeconds),
data.templateNames[templateRow.TemplateID],
codersdk.TemplateBuiltinAppDisplayNameVSCode,
"")
"", orgName)
metricsCh <- prometheus.MustNewConstMetric(applicationsUsageSecondsDesc, prometheus.GaugeValue,
float64(templateRow.UsageJetbrainsSeconds),
data.templateNames[templateRow.TemplateID],
codersdk.TemplateBuiltinAppDisplayNameJetBrains,
"")
"", orgName)
metricsCh <- prometheus.MustNewConstMetric(applicationsUsageSecondsDesc, prometheus.GaugeValue,
float64(templateRow.UsageReconnectingPtySeconds),
data.templateNames[templateRow.TemplateID],
codersdk.TemplateBuiltinAppDisplayNameWebTerminal,
"")
"", orgName)
metricsCh <- prometheus.MustNewConstMetric(applicationsUsageSecondsDesc, prometheus.GaugeValue,
float64(templateRow.UsageSshSeconds),
data.templateNames[templateRow.TemplateID],
codersdk.TemplateBuiltinAppDisplayNameSSH,
"")
"", orgName)
}
// Templates
for _, templateRow := range data.templates {
metricsCh <- prometheus.MustNewConstMetric(templatesActiveUsersDesc, prometheus.GaugeValue, float64(templateRow.ActiveUsers), data.templateNames[templateRow.TemplateID])
metricsCh <- prometheus.MustNewConstMetric(templatesActiveUsersDesc, prometheus.GaugeValue, float64(templateRow.ActiveUsers), data.templateNames[templateRow.TemplateID], data.organizationNames[templateRow.TemplateID])
}
// Parameters
for _, parameterRow := range data.params {
metricsCh <- prometheus.MustNewConstMetric(parametersDesc, prometheus.GaugeValue, float64(parameterRow.count), data.templateNames[parameterRow.templateID], parameterRow.name, parameterRow.aType, parameterRow.value)
metricsCh <- prometheus.MustNewConstMetric(parametersDesc, prometheus.GaugeValue, float64(parameterRow.count), data.templateNames[parameterRow.templateID], parameterRow.name, parameterRow.aType, parameterRow.value, data.organizationNames[parameterRow.templateID])
}
}
@@ -1,13 +1,13 @@
{
"coderd_insights_applications_usage_seconds[application_name=JetBrains,slug=,template_name=golden-template]": 60,
"coderd_insights_applications_usage_seconds[application_name=Visual Studio Code,slug=,template_name=golden-template]": 60,
"coderd_insights_applications_usage_seconds[application_name=Web Terminal,slug=,template_name=golden-template]": 0,
"coderd_insights_applications_usage_seconds[application_name=SSH,slug=,template_name=golden-template]": 60,
"coderd_insights_applications_usage_seconds[application_name=Golden Slug,slug=golden-slug,template_name=golden-template]": 180,
"coderd_insights_parameters[parameter_name=first_parameter,parameter_type=string,parameter_value=Foobar,template_name=golden-template]": 1,
"coderd_insights_parameters[parameter_name=first_parameter,parameter_type=string,parameter_value=Baz,template_name=golden-template]": 1,
"coderd_insights_parameters[parameter_name=second_parameter,parameter_type=bool,parameter_value=true,template_name=golden-template]": 2,
"coderd_insights_parameters[parameter_name=third_parameter,parameter_type=number,parameter_value=789,template_name=golden-template]": 1,
"coderd_insights_parameters[parameter_name=third_parameter,parameter_type=number,parameter_value=999,template_name=golden-template]": 1,
"coderd_insights_templates_active_users[template_name=golden-template]": 1
"coderd_insights_applications_usage_seconds[application_name=JetBrains,organization_name=coder,slug=,template_name=golden-template]": 60,
"coderd_insights_applications_usage_seconds[application_name=Visual Studio Code,organization_name=coder,slug=,template_name=golden-template]": 60,
"coderd_insights_applications_usage_seconds[application_name=Web Terminal,organization_name=coder,slug=,template_name=golden-template]": 0,
"coderd_insights_applications_usage_seconds[application_name=SSH,organization_name=coder,slug=,template_name=golden-template]": 60,
"coderd_insights_applications_usage_seconds[application_name=Golden Slug,organization_name=coder,slug=golden-slug,template_name=golden-template]": 180,
"coderd_insights_parameters[organization_name=coder,parameter_name=first_parameter,parameter_type=string,parameter_value=Foobar,template_name=golden-template]": 1,
"coderd_insights_parameters[organization_name=coder,parameter_name=first_parameter,parameter_type=string,parameter_value=Baz,template_name=golden-template]": 1,
"coderd_insights_parameters[organization_name=coder,parameter_name=second_parameter,parameter_type=bool,parameter_value=true,template_name=golden-template]": 2,
"coderd_insights_parameters[organization_name=coder,parameter_name=third_parameter,parameter_type=number,parameter_value=789,template_name=golden-template]": 1,
"coderd_insights_parameters[organization_name=coder,parameter_name=third_parameter,parameter_type=number,parameter_value=999,template_name=golden-template]": 1,
"coderd_insights_templates_active_users[organization_name=coder,template_name=golden-template]": 1
}
@@ -564,7 +564,7 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
// The check `s.OIDCConfig != nil` is not as strict, since it can be an interface
// pointing to a typed nil.
if !reflect.ValueOf(s.OIDCConfig).IsNil() {
workspaceOwnerOIDCAccessToken, err = obtainOIDCAccessToken(ctx, s.Database, s.OIDCConfig, owner.ID)
workspaceOwnerOIDCAccessToken, err = ObtainOIDCAccessToken(ctx, s.Logger, s.Database, s.OIDCConfig, owner.ID)
if err != nil {
return nil, failJob(fmt.Sprintf("obtain OIDC access token: %s", err))
}
@@ -3075,9 +3075,37 @@ func deleteSessionTokenForUserAndWorkspace(ctx context.Context, db database.Stor
return nil
}
// obtainOIDCAccessToken returns a valid OpenID Connect access token
func shouldRefreshOIDCToken(link database.UserLink) (bool, time.Time) {
if link.OAuthRefreshToken == "" {
// We cannot refresh even if we wanted to
return false, link.OAuthExpiry
}
if link.OAuthExpiry.IsZero() {
// 0 expire means the token never expires, so we shouldn't refresh
return false, link.OAuthExpiry
}
// This handles an edge case where the token is about to expire. A workspace
// build takes a non-trivial amount of time. If the token is to expire during the
// build, then the build risks failure. To mitigate this, refresh the token
// prematurely.
//
// If an OIDC provider issues short-lived tokens less than our defined period,
// the token will always be refreshed on every workspace build.
//
// By setting the expiration backwards, we are effectively shortening the
// time a token can be alive for by 10 minutes.
// Note: This is how it is done in the oauth2 package's own token refreshing logic.
expiresAt := link.OAuthExpiry.Add(-time.Minute * 10)
// Return if the token is assumed to be expired.
return expiresAt.Before(dbtime.Now()), expiresAt
}
// ObtainOIDCAccessToken returns a valid OpenID Connect access token
// for the user if it's able to obtain one, otherwise it returns an empty string.
func obtainOIDCAccessToken(ctx context.Context, db database.Store, oidcConfig promoauth.OAuth2Config, userID uuid.UUID) (string, error) {
func ObtainOIDCAccessToken(ctx context.Context, logger slog.Logger, db database.Store, oidcConfig promoauth.OAuth2Config, userID uuid.UUID) (string, error) {
link, err := db.GetUserLinkByUserIDLoginType(ctx, database.GetUserLinkByUserIDLoginTypeParams{
UserID: userID,
LoginType: database.LoginTypeOIDC,
@@ -3089,11 +3117,13 @@ func obtainOIDCAccessToken(ctx context.Context, db database.Store, oidcConfig pr
return "", xerrors.Errorf("get owner oidc link: %w", err)
}
if link.OAuthExpiry.Before(dbtime.Now()) && !link.OAuthExpiry.IsZero() && link.OAuthRefreshToken != "" {
if shouldRefresh, expiresAt := shouldRefreshOIDCToken(link); shouldRefresh {
token, err := oidcConfig.TokenSource(ctx, &oauth2.Token{
AccessToken: link.OAuthAccessToken,
RefreshToken: link.OAuthRefreshToken,
Expiry: link.OAuthExpiry,
// Use the expiresAt returned by shouldRefreshOIDCToken.
// It will force a refresh with an expired time.
Expiry: expiresAt,
}).Token()
if err != nil {
// If OIDC fails to refresh, we return an empty string and don't fail.
@@ -3118,6 +3148,7 @@ func obtainOIDCAccessToken(ctx context.Context, db database.Store, oidcConfig pr
if err != nil {
return "", xerrors.Errorf("update user link: %w", err)
}
logger.Info(ctx, "refreshed expired OIDC token for user during workspace build", slog.F("user_id", userID))
}
return link.OAuthAccessToken, nil
@@ -16,13 +16,109 @@ import (
"github.com/coder/coder/v2/testutil"
)
func TestShouldRefreshOIDCToken(t *testing.T) {
t.Parallel()
now := dbtime.Now()
testCases := []struct {
name string
link database.UserLink
want bool
}{
{
name: "NoRefreshToken",
link: database.UserLink{OAuthExpiry: now.Add(-time.Hour)},
want: false,
},
{
name: "ZeroExpiry",
link: database.UserLink{OAuthRefreshToken: "refresh"},
want: false,
},
{
name: "LongExpired",
link: database.UserLink{
OAuthRefreshToken: "refresh",
OAuthExpiry: now.Add(-1 * time.Hour),
},
want: true,
},
{
// Edge being "+/- 10 minutes"
name: "EdgeExpired",
link: database.UserLink{
OAuthRefreshToken: "refresh",
OAuthExpiry: now.Add(-1 * time.Minute * 10),
},
want: true,
},
{
name: "Expired",
link: database.UserLink{
OAuthRefreshToken: "refresh",
OAuthExpiry: now.Add(-1 * time.Minute),
},
want: true,
},
{
name: "SoonToBeExpired",
link: database.UserLink{
OAuthRefreshToken: "refresh",
OAuthExpiry: now.Add(5 * time.Minute),
},
want: true,
},
{
name: "SoonToBeExpiredEdge",
link: database.UserLink{
OAuthRefreshToken: "refresh",
OAuthExpiry: now.Add(9 * time.Minute),
},
want: true,
},
{
name: "AfterEdge",
link: database.UserLink{
OAuthRefreshToken: "refresh",
OAuthExpiry: now.Add(11 * time.Minute),
},
want: false,
},
{
name: "NotExpired",
link: database.UserLink{
OAuthRefreshToken: "refresh",
OAuthExpiry: now.Add(time.Hour),
},
want: false,
},
{
name: "NotEvenCloseExpired",
link: database.UserLink{
OAuthRefreshToken: "refresh",
OAuthExpiry: now.Add(time.Hour * 24),
},
want: false,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
shouldRefresh, _ := shouldRefreshOIDCToken(tc.link)
require.Equal(t, tc.want, shouldRefresh)
})
}
}
func TestObtainOIDCAccessToken(t *testing.T) {
t.Parallel()
ctx := context.Background()
t.Run("NoToken", func(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
_, err := obtainOIDCAccessToken(ctx, db, nil, uuid.Nil)
_, err := ObtainOIDCAccessToken(ctx, testutil.Logger(t), db, nil, uuid.Nil)
require.NoError(t, err)
})
t.Run("InvalidConfig", func(t *testing.T) {
@@ -35,7 +131,7 @@ func TestObtainOIDCAccessToken(t *testing.T) {
LoginType: database.LoginTypeOIDC,
OAuthExpiry: dbtime.Now().Add(-time.Hour),
})
_, err := obtainOIDCAccessToken(ctx, db, &oauth2.Config{}, user.ID)
_, err := ObtainOIDCAccessToken(ctx, testutil.Logger(t), db, &oauth2.Config{}, user.ID)
require.NoError(t, err)
})
t.Run("MissingLink", func(t *testing.T) {
@@ -44,7 +140,7 @@ func TestObtainOIDCAccessToken(t *testing.T) {
user := dbgen.User(t, db, database.User{
LoginType: database.LoginTypeOIDC,
})
tok, err := obtainOIDCAccessToken(ctx, db, &oauth2.Config{}, user.ID)
tok, err := ObtainOIDCAccessToken(ctx, testutil.Logger(t), db, &oauth2.Config{}, user.ID)
require.Empty(t, tok)
require.NoError(t, err)
})
@@ -57,7 +153,7 @@ func TestObtainOIDCAccessToken(t *testing.T) {
LoginType: database.LoginTypeOIDC,
OAuthExpiry: dbtime.Now().Add(-time.Hour),
})
_, err := obtainOIDCAccessToken(ctx, db, &testutil.OAuth2Config{
_, err := ObtainOIDCAccessToken(ctx, testutil.Logger(t), db, &testutil.OAuth2Config{
Token: &oauth2.Token{
AccessToken: "token",
},
@@ -15,6 +15,7 @@ import (
"testing"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
@@ -30,6 +31,7 @@ import (
"github.com/coder/coder/v2/coderd"
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/coderdtest/oidctest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbgen"
@@ -58,6 +60,175 @@ import (
"github.com/coder/serpent"
)
// TestTokenIsRefreshedEarly creates a fake OIDC IDP that sets expiration times
// of the token to values that are "near expiration". Expiration being 10minutes
// earlier than it needs to be. The `ObtainOIDCAccessToken` should refresh these
// tokens early.
func TestTokenIsRefreshedEarly(t *testing.T) {
t.Parallel()
t.Run("WithCoderd", func(t *testing.T) {
t.Parallel()
tokenRefreshCount := 0
fake := oidctest.NewFakeIDP(t,
oidctest.WithServing(),
oidctest.WithDefaultExpire(time.Minute*8),
oidctest.WithRefresh(func(email string) error {
tokenRefreshCount++
return nil
}),
)
cfg := fake.OIDCConfig(t, nil, func(cfg *coderd.OIDCConfig) {
cfg.AllowSignups = true
})
db, ps := dbtestutil.NewDB(t)
owner := coderdtest.New(t, &coderdtest.Options{
OIDCConfig: cfg,
IncludeProvisionerDaemon: true,
Database: db,
Pubsub: ps,
})
first := coderdtest.CreateFirstUser(t, owner)
version := coderdtest.CreateTemplateVersion(t, owner, first.OrganizationID, nil)
coderdtest.AwaitTemplateVersionJobCompleted(t, owner, version.ID)
template := coderdtest.CreateTemplate(t, owner, first.OrganizationID, version.ID)
// Setup an OIDC user.
client, _ := fake.Login(t, owner, jwt.MapClaims{
"email": "user@unauthorized.com",
"email_verified": true,
"sub": uuid.NewString(),
})
// Creating a workspace should refresh the oidc early.
tokenRefreshCount = 0
wrk := coderdtest.CreateWorkspace(t, client, template.ID)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, wrk.LatestBuild.ID)
require.Equal(t, 1, tokenRefreshCount)
})
}
//nolint:tparallel,paralleltest // Sub tests need to run sequentially.
func TestTokenIsRefreshedEarlyWithoutCoderd(t *testing.T) {
t.Parallel()
tokenRefreshCount := 0
fake := oidctest.NewFakeIDP(t,
oidctest.WithServing(),
oidctest.WithDefaultExpire(time.Minute*8),
oidctest.WithRefresh(func(email string) error {
tokenRefreshCount++
return nil
}),
)
cfg := fake.OIDCConfig(t, nil)
// Fetch a valid token from the fake OIDC provider
token, err := fake.GenerateAuthenticatedToken(jwt.MapClaims{
"email": "user@unauthorized.com",
"email_verified": true,
"sub": uuid.NewString(),
})
require.NoError(t, err)
db, _ := dbtestutil.NewDB(t)
user := dbgen.User(t, db, database.User{})
dbgen.UserLink(t, db, database.UserLink{
UserID: user.ID,
LoginType: database.LoginTypeOIDC,
LinkedID: "foo",
OAuthAccessToken: token.AccessToken,
OAuthRefreshToken: token.RefreshToken,
// The oauth expiry does not really matter, since each test will manually control
// this value.
OAuthExpiry: dbtime.Now().Add(time.Hour),
})
setLinkExpiration := func(t *testing.T, exp time.Time) database.UserLink {
ctx := testutil.Context(t, testutil.WaitShort)
links, err := db.GetUserLinksByUserID(ctx, user.ID)
require.NoError(t, err)
require.Len(t, links, 1)
link := links[0]
newLink, err := db.UpdateUserLink(ctx, database.UpdateUserLinkParams{
OAuthAccessToken: link.OAuthAccessToken,
OAuthAccessTokenKeyID: link.OAuthAccessTokenKeyID,
OAuthRefreshToken: link.OAuthRefreshToken,
OAuthRefreshTokenKeyID: link.OAuthRefreshTokenKeyID,
OAuthExpiry: exp,
Claims: link.Claims,
UserID: link.UserID,
LoginType: link.LoginType,
})
require.NoError(t, err)
return newLink
}
for _, c := range []struct {
name string
// expires is a function to return a more up to date "now".
// Because the oauth library is calling `time.Now()`, we cannot use
// mocked clocks.
expires func() time.Time
refreshExpected bool
}{
{
name: "ZeroExpiry",
expires: func() time.Time { return time.Time{} },
refreshExpected: false,
},
{
name: "LongExpired",
expires: func() time.Time { return dbtime.Now().Add(-time.Hour) },
refreshExpected: true,
},
{
name: "EdgeExpired",
expires: func() time.Time { return dbtime.Now().Add(-time.Minute * 10) },
refreshExpected: true,
},
{
name: "RecentExpired",
expires: func() time.Time { return dbtime.Now().Add(-time.Second * -1) },
refreshExpected: true,
},
{
name: "Future",
expires: func() time.Time { return dbtime.Now().Add(time.Hour) },
refreshExpected: false,
},
{
name: "FutureWithinRefreshWindow",
expires: func() time.Time { return dbtime.Now().Add(time.Minute * 8) },
refreshExpected: true,
},
} {
t.Run(c.name, func(t *testing.T) {
ctx := testutil.Context(t, testutil.WaitShort)
oldLink := setLinkExpiration(t, c.expires())
tokenRefreshCount = 0
_, err := provisionerdserver.ObtainOIDCAccessToken(ctx, testutil.Logger(t), db, cfg, user.ID)
require.NoError(t, err)
links, err := db.GetUserLinksByUserID(ctx, user.ID)
require.NoError(t, err)
require.Len(t, links, 1)
newLink := links[0]
if c.refreshExpected {
require.Equal(t, 1, tokenRefreshCount)
require.NotEqual(t, oldLink.OAuthAccessToken, newLink.OAuthAccessToken)
require.NotEqual(t, oldLink.OAuthRefreshToken, newLink.OAuthRefreshToken)
} else {
require.Equal(t, 0, tokenRefreshCount)
require.Equal(t, oldLink.OAuthAccessToken, newLink.OAuthAccessToken)
require.Equal(t, oldLink.OAuthRefreshToken, newLink.OAuthRefreshToken)
}
})
}
}
func testTemplateScheduleStore() *atomic.Pointer[schedule.TemplateScheduleStore] {
poitr := &atomic.Pointer[schedule.TemplateScheduleStore]{}
store := schedule.NewAGPLTemplateScheduleStore()
+38 -18
View File
@@ -244,6 +244,7 @@ func SystemRoleName(name string) bool {
type RoleOptions struct {
NoOwnerWorkspaceExec bool
NoWorkspaceSharing bool
}
// ReservedRoleName exists because the database should only allow unique role
@@ -267,12 +268,23 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
opts = &RoleOptions{}
}
denyPermissions := []Permission{}
if opts.NoWorkspaceSharing {
denyPermissions = append(denyPermissions, Permission{
Negate: true,
ResourceType: ResourceWorkspace.Type,
Action: policy.ActionShare,
})
}
ownerWorkspaceActions := ResourceWorkspace.AvailableActions()
if opts.NoOwnerWorkspaceExec {
// Remove ssh and application connect from the owner role. This
// prevents owners from have exec access to all workspaces.
ownerWorkspaceActions = slice.Omit(ownerWorkspaceActions,
policy.ActionApplicationConnect, policy.ActionSSH)
ownerWorkspaceActions = slice.Omit(
ownerWorkspaceActions,
policy.ActionApplicationConnect, policy.ActionSSH,
)
}
// Static roles that never change should be allocated in a closure.
@@ -295,7 +307,8 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
// Explicitly setting PrebuiltWorkspace permissions for clarity.
// Note: even without PrebuiltWorkspace permissions, access is still granted via Workspace permissions.
ResourcePrebuiltWorkspace.Type: {policy.ActionUpdate, policy.ActionDelete},
})...),
})...,
),
User: []Permission{},
ByOrgID: map[string]OrgPermissions{},
}.withCachedRegoValue()
@@ -303,13 +316,17 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
memberRole := Role{
Identifier: RoleMember(),
DisplayName: "Member",
Site: Permissions(map[string][]policy.Action{
ResourceAssignRole.Type: {policy.ActionRead},
// All users can see OAuth2 provider applications.
ResourceOauth2App.Type: {policy.ActionRead},
ResourceWorkspaceProxy.Type: {policy.ActionRead},
}),
User: append(allPermsExcept(ResourceWorkspaceDormant, ResourcePrebuiltWorkspace, ResourceWorkspace, ResourceUser, ResourceOrganizationMember, ResourceOrganizationMember, ResourceBoundaryUsage),
Site: append(
Permissions(map[string][]policy.Action{
ResourceAssignRole.Type: {policy.ActionRead},
// All users can see OAuth2 provider applications.
ResourceOauth2App.Type: {policy.ActionRead},
ResourceWorkspaceProxy.Type: {policy.ActionRead},
}),
denyPermissions...,
),
User: append(
allPermsExcept(ResourceWorkspaceDormant, ResourcePrebuiltWorkspace, ResourceWorkspace, ResourceUser, ResourceOrganizationMember, ResourceOrganizationMember, ResourceBoundaryUsage),
Permissions(map[string][]policy.Action{
// Users cannot do create/update/delete on themselves, but they
// can read their own details.
@@ -433,14 +450,17 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
ByOrgID: map[string]OrgPermissions{
// Org admins should not have workspace exec perms.
organizationID.String(): {
Org: append(allPermsExcept(ResourceWorkspace, ResourceWorkspaceDormant, ResourcePrebuiltWorkspace, ResourceAssignRole, ResourceUserSecret, ResourceBoundaryUsage), Permissions(map[string][]policy.Action{
ResourceWorkspaceDormant.Type: {policy.ActionRead, policy.ActionDelete, policy.ActionCreate, policy.ActionUpdate, policy.ActionWorkspaceStop, policy.ActionCreateAgent, policy.ActionDeleteAgent, policy.ActionUpdateAgent},
ResourceWorkspace.Type: slice.Omit(ResourceWorkspace.AvailableActions(), policy.ActionApplicationConnect, policy.ActionSSH),
// PrebuiltWorkspaces are a subset of Workspaces.
// Explicitly setting PrebuiltWorkspace permissions for clarity.
// Note: even without PrebuiltWorkspace permissions, access is still granted via Workspace permissions.
ResourcePrebuiltWorkspace.Type: {policy.ActionUpdate, policy.ActionDelete},
})...),
Org: append(
allPermsExcept(ResourceWorkspace, ResourceWorkspaceDormant, ResourcePrebuiltWorkspace, ResourceAssignRole, ResourceUserSecret, ResourceBoundaryUsage),
Permissions(map[string][]policy.Action{
ResourceWorkspace.Type: slice.Omit(ResourceWorkspace.AvailableActions(), policy.ActionApplicationConnect, policy.ActionSSH),
ResourceWorkspaceDormant.Type: {policy.ActionRead, policy.ActionDelete, policy.ActionCreate, policy.ActionUpdate, policy.ActionWorkspaceStop, policy.ActionCreateAgent, policy.ActionDeleteAgent, policy.ActionUpdateAgent},
// PrebuiltWorkspaces are a subset of Workspaces.
// Explicitly setting PrebuiltWorkspace permissions for clarity.
// Note: even without PrebuiltWorkspace permissions, access is still granted via Workspace permissions.
ResourcePrebuiltWorkspace.Type: {policy.ActionUpdate, policy.ActionDelete},
})...,
),
Member: []Permission{},
},
},
+2 -1
View File
@@ -40,7 +40,8 @@ var externalLowLevel = map[ScopeName]struct{}{
"file:create": {},
"file:*": {},
// Users (personal profile only)
// Users
"user:read": {},
"user:read_personal": {},
"user:update_personal": {},
"user.*": {},
@@ -62,6 +62,7 @@ func TestIsExternalScope(t *testing.T) {
require.True(t, IsExternalScope("template:use"))
require.True(t, IsExternalScope("workspace:*"))
require.True(t, IsExternalScope("coder:workspaces.create"))
require.True(t, IsExternalScope("user:read"))
require.False(t, IsExternalScope("debug_info:read")) // internal-only
require.False(t, IsExternalScope("unknown:read"))
}
+2 -2
View File
@@ -177,7 +177,7 @@ func generateFromPrompt(prompt string) (TaskName, error) {
// Ensure display name is never empty
displayName = strings.ReplaceAll(name, "-", " ")
}
displayName = strings.ToUpper(displayName[:1]) + displayName[1:]
displayName = strutil.Capitalize(displayName)
return TaskName{
Name: taskName,
@@ -269,7 +269,7 @@ func generateFromAnthropic(ctx context.Context, prompt string, apiKey string, mo
// Ensure display name is never empty
displayName = strings.ReplaceAll(taskNameResponse.Name, "-", " ")
}
displayName = strings.ToUpper(displayName[:1]) + displayName[1:]
displayName = strutil.Capitalize(displayName)
return TaskName{
Name: name,
+13
View File
@@ -49,6 +49,19 @@ func TestGenerate(t *testing.T) {
require.NotEmpty(t, taskName.DisplayName)
})
t.Run("FromPromptMultiByte", func(t *testing.T) {
t.Setenv("ANTHROPIC_API_KEY", "")
ctx := testutil.Context(t, testutil.WaitShort)
taskName := taskname.Generate(ctx, testutil.Logger(t), "über cool feature")
require.NoError(t, codersdk.NameValid(taskName.Name))
require.True(t, len(taskName.DisplayName) > 0)
// The display name must start with "Ü", not corrupted bytes.
require.Equal(t, "Über cool feature", taskName.DisplayName)
})
t.Run("Fallback", func(t *testing.T) {
// Ensure no API key
t.Setenv("ANTHROPIC_API_KEY", "")
+22 -10
View File
@@ -5,6 +5,7 @@ import (
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/acarl005/stripansi"
"github.com/microcosm-cc/bluemonday"
@@ -53,7 +54,7 @@ const (
TruncateWithFullWords TruncateOption = 1 << 1
)
// Truncate truncates s to n characters.
// Truncate truncates s to n runes.
// Additional behaviors can be specified using TruncateOptions.
func Truncate(s string, n int, opts ...TruncateOption) string {
var options TruncateOption
@@ -63,7 +64,8 @@ func Truncate(s string, n int, opts ...TruncateOption) string {
if n < 1 {
return ""
}
if len(s) <= n {
runes := []rune(s)
if len(runes) <= n {
return s
}
@@ -72,18 +74,18 @@ func Truncate(s string, n int, opts ...TruncateOption) string {
maxLen--
}
var sb strings.Builder
// If we need to truncate to full words, find the last word boundary before n.
if options&TruncateWithFullWords != 0 {
lastWordBoundary := strings.LastIndexFunc(s[:maxLen], unicode.IsSpace)
// Convert the rune-safe prefix to a string, then find
// the last word boundary (byte offset within that prefix).
truncated := string(runes[:maxLen])
lastWordBoundary := strings.LastIndexFunc(truncated, unicode.IsSpace)
if lastWordBoundary < 0 {
// We cannot find a word boundary. At this point, we'll truncate the string.
// It's better than nothing.
_, _ = sb.WriteString(s[:maxLen])
} else { // lastWordBoundary <= maxLen
_, _ = sb.WriteString(s[:lastWordBoundary])
_, _ = sb.WriteString(truncated)
} else {
_, _ = sb.WriteString(truncated[:lastWordBoundary])
}
} else {
_, _ = sb.WriteString(s[:maxLen])
_, _ = sb.WriteString(string(runes[:maxLen]))
}
if options&TruncateWithEllipsis != 0 {
@@ -126,3 +128,13 @@ func UISanitize(in string) string {
}
return strings.TrimSpace(b.String())
}
// Capitalize returns s with its first rune upper-cased. It is safe for
// multi-byte UTF-8 characters, unlike naive byte-slicing approaches.
func Capitalize(s string) string {
r, size := utf8.DecodeRuneInString(s)
if size == 0 {
return s
}
return string(unicode.ToUpper(r)) + s[size:]
}
+32
View File
@@ -57,6 +57,17 @@ func TestTruncate(t *testing.T) {
{"foo bar", 1, "…", []strings.TruncateOption{strings.TruncateWithFullWords, strings.TruncateWithEllipsis}},
{"foo bar", 0, "", []strings.TruncateOption{strings.TruncateWithFullWords, strings.TruncateWithEllipsis}},
{"This is a very long task prompt that should be truncated to 160 characters. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", 160, "This is a very long task prompt that should be truncated to 160 characters. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor…", []strings.TruncateOption{strings.TruncateWithFullWords, strings.TruncateWithEllipsis}},
// Multi-byte rune handling.
{"日本語テスト", 3, "日本語", nil},
{"日本語テスト", 4, "日本語テ", nil},
{"日本語テスト", 6, "日本語テスト", nil},
{"日本語テスト", 4, "日本語…", []strings.TruncateOption{strings.TruncateWithEllipsis}},
{"🎉🎊🎈🎁", 2, "🎉🎊", nil},
{"🎉🎊🎈🎁", 3, "🎉🎊…", []strings.TruncateOption{strings.TruncateWithEllipsis}},
// Multi-byte with full-word truncation.
{"hello 日本語", 7, "hello…", []strings.TruncateOption{strings.TruncateWithFullWords, strings.TruncateWithEllipsis}},
{"hello 日本語", 8, "hello 日…", []strings.TruncateOption{strings.TruncateWithEllipsis}},
{"日本語 テスト", 4, "日本語", []strings.TruncateOption{strings.TruncateWithFullWords}},
} {
tName := fmt.Sprintf("%s_%d", tt.s, tt.n)
for _, opt := range tt.options {
@@ -107,3 +118,24 @@ func TestUISanitize(t *testing.T) {
})
}
}
func TestCapitalize(t *testing.T) {
t.Parallel()
tests := []struct {
input string
expected string
}{
{"", ""},
{"hello", "Hello"},
{"über", "Über"},
{"Hello", "Hello"},
{"a", "A"},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("%q", tt.input), func(t *testing.T) {
t.Parallel()
assert.Equal(t, tt.expected, strings.Capitalize(tt.input))
})
}
}
+1 -1
View File
@@ -1015,7 +1015,7 @@ func Test_ResolveRequest(t *testing.T) {
w := rw.Result()
defer w.Body.Close()
require.Equal(t, http.StatusBadGateway, w.StatusCode)
require.Equal(t, http.StatusNotFound, w.StatusCode)
assertConnLogContains(t, rw, r, connLogger, workspace, agentNameUnhealthy, appNameAgentUnhealthy, database.ConnectionTypeWorkspaceApp, me.ID)
require.Len(t, connLogger.ConnectionLogs(), 1)
+2 -2
View File
@@ -77,7 +77,7 @@ func WriteWorkspaceApp500(log slog.Logger, accessURL *url.URL, rw http.ResponseW
})
}
// WriteWorkspaceAppOffline writes a HTML 502 error page for a workspace app. If
// WriteWorkspaceAppOffline writes a HTML 404 error page for a workspace app. If
// appReq is not nil, it will be used to log the request details at debug level.
func WriteWorkspaceAppOffline(log slog.Logger, accessURL *url.URL, rw http.ResponseWriter, r *http.Request, appReq *Request, msg string) {
if appReq != nil {
@@ -94,7 +94,7 @@ func WriteWorkspaceAppOffline(log slog.Logger, accessURL *url.URL, rw http.Respo
}
site.RenderStaticErrorPage(rw, r, site.ErrorPageData{
Status: http.StatusBadGateway,
Status: http.StatusNotFound,
Title: "Application Unavailable",
Description: msg,
Actions: []site.Action{
+4
View File
@@ -5572,6 +5572,10 @@ func TestWorkspaceSharingDisabled(t *testing.T) {
})
t.Run("NoAccessWhenDisabled", func(t *testing.T) {
t.Cleanup(func() {
rbac.ReloadBuiltinRoles(nil)
})
var (
client, db = coderdtest.NewWithDatabase(t, &coderdtest.Options{
DeploymentValues: coderdtest.DeploymentValues(t, func(dv *codersdk.DeploymentValues) {
+4 -19
View File
@@ -6,7 +6,6 @@ import (
"fmt"
"io"
"net/http"
"net/http/cookiejar"
"net/url"
"sync"
"time"
@@ -321,21 +320,15 @@ func (c *Client) connectRPCVersion(ctx context.Context, version *apiversion.APIV
}
rpcURL.RawQuery = q.Encode()
jar, err := cookiejar.New(nil)
if err != nil {
return nil, xerrors.Errorf("create cookie jar: %w", err)
}
jar.SetCookies(rpcURL, []*http.Cookie{{
Name: codersdk.SessionTokenCookie,
Value: c.SDK.SessionToken(),
}})
httpClient := &http.Client{
Jar: jar,
Transport: c.SDK.HTTPClient.Transport,
}
// nolint:bodyclose
conn, res, err := websocket.Dial(ctx, rpcURL.String(), &websocket.DialOptions{
HTTPClient: httpClient,
HTTPHeader: http.Header{
codersdk.SessionTokenHeader: []string{c.SDK.SessionToken()},
},
})
if err != nil {
if res == nil {
@@ -709,16 +702,7 @@ func (c *Client) WaitForReinit(ctx context.Context) (*ReinitializationEvent, err
return nil, xerrors.Errorf("parse url: %w", err)
}
jar, err := cookiejar.New(nil)
if err != nil {
return nil, xerrors.Errorf("create cookie jar: %w", err)
}
jar.SetCookies(rpcURL, []*http.Cookie{{
Name: codersdk.SessionTokenCookie,
Value: c.SDK.SessionToken(),
}})
httpClient := &http.Client{
Jar: jar,
Transport: c.SDK.HTTPClient.Transport,
}
@@ -726,6 +710,7 @@ func (c *Client) WaitForReinit(ctx context.Context) (*ReinitializationEvent, err
if err != nil {
return nil, xerrors.Errorf("build request: %w", err)
}
req.Header[codersdk.SessionTokenHeader] = []string{c.SDK.SessionToken()}
res, err := httpClient.Do(req)
if err != nil {
+2 -3
View File
@@ -335,9 +335,8 @@ type PauseTaskResponse struct {
}
// PauseTask pauses a task by stopping its workspace.
// Experimental: uses the /api/experimental endpoint.
func (c *Client) PauseTask(ctx context.Context, user string, id uuid.UUID) (PauseTaskResponse, error) {
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/experimental/tasks/%s/%s/pause", user, id.String()), nil)
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/v2/tasks/%s/%s/pause", user, id.String()), nil)
if err != nil {
return PauseTaskResponse{}, err
}
@@ -360,7 +359,7 @@ type ResumeTaskResponse struct {
}
func (c *Client) ResumeTask(ctx context.Context, user string, id uuid.UUID) (ResumeTaskResponse, error) {
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/experimental/tasks/%s/%s/resume", user, id.String()), nil)
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/v2/tasks/%s/%s/resume", user, id.String()), nil)
if err != nil {
return ResumeTaskResponse{}, err
}
+1
View File
@@ -242,6 +242,7 @@ var PublicAPIKeyScopes = []APIKeyScope{
APIKeyScopeTemplateRead,
APIKeyScopeTemplateUpdate,
APIKeyScopeTemplateUse,
APIKeyScopeUserRead,
APIKeyScopeUserReadPersonal,
APIKeyScopeUserUpdatePersonal,
APIKeyScopeUserSecretAll,
+6 -21
View File
@@ -6,7 +6,6 @@ import (
"fmt"
"io"
"net/http"
"net/http/cookiejar"
"slices"
"strings"
"time"
@@ -239,20 +238,14 @@ func (c *Client) provisionerJobLogsAfter(ctx context.Context, path string, after
if err != nil {
return nil, nil, err
}
jar, err := cookiejar.New(nil)
if err != nil {
return nil, nil, xerrors.Errorf("create cookie jar: %w", err)
}
jar.SetCookies(followURL, []*http.Cookie{{
Name: SessionTokenCookie,
Value: c.SessionToken(),
}})
httpClient := &http.Client{
Jar: jar,
Transport: c.HTTPClient.Transport,
}
conn, res, err := websocket.Dial(ctx, followURL.String(), &websocket.DialOptions{
HTTPClient: httpClient,
HTTPClient: httpClient,
HTTPHeader: http.Header{
SessionTokenHeader: []string{c.SessionToken()},
},
CompressionMode: websocket.CompressionDisabled,
})
if err != nil {
@@ -325,16 +318,8 @@ func (c *Client) ServeProvisionerDaemon(ctx context.Context, req ServeProvisione
headers.Set(ProvisionerDaemonPSK, req.PreSharedKey)
}
if req.ProvisionerKey == "" && req.PreSharedKey == "" {
// use session token if we don't have a PSK or provisioner key.
jar, err := cookiejar.New(nil)
if err != nil {
return nil, xerrors.Errorf("create cookie jar: %w", err)
}
jar.SetCookies(serverURL, []*http.Cookie{{
Name: SessionTokenCookie,
Value: c.SessionToken(),
}})
httpClient.Jar = jar
// Use session token if we don't have a PSK or provisioner key.
headers.Set(SessionTokenHeader, c.SessionToken())
}
conn, res, err := websocket.Dial(ctx, serverURL.String(), &websocket.DialOptions{
+7 -22
View File
@@ -6,7 +6,6 @@ import (
"fmt"
"io"
"net/http"
"net/http/cookiejar"
"strings"
"time"
@@ -580,24 +579,16 @@ func (c *Client) WatchWorkspaceAgentContainers(ctx context.Context, agentID uuid
return nil, nil, err
}
jar, err := cookiejar.New(nil)
if err != nil {
return nil, nil, xerrors.Errorf("create cookie jar: %w", err)
}
jar.SetCookies(reqURL, []*http.Cookie{{
Name: SessionTokenCookie,
Value: c.SessionToken(),
}})
conn, res, err := websocket.Dial(ctx, reqURL.String(), &websocket.DialOptions{
// We want `NoContextTakeover` compression to balance improving
// bandwidth cost/latency with minimal memory usage overhead.
CompressionMode: websocket.CompressionNoContextTakeover,
HTTPClient: &http.Client{
Jar: jar,
Transport: c.HTTPClient.Transport,
},
HTTPHeader: http.Header{
SessionTokenHeader: []string{c.SessionToken()},
},
})
if err != nil {
if res == nil {
@@ -687,20 +678,14 @@ func (c *Client) WorkspaceAgentLogsAfter(ctx context.Context, agentID uuid.UUID,
return ch, closeFunc(func() error { return nil }), nil
}
jar, err := cookiejar.New(nil)
if err != nil {
return nil, nil, xerrors.Errorf("create cookie jar: %w", err)
}
jar.SetCookies(reqURL, []*http.Cookie{{
Name: SessionTokenCookie,
Value: c.SessionToken(),
}})
httpClient := &http.Client{
Jar: jar,
Transport: c.HTTPClient.Transport,
}
conn, res, err := websocket.Dial(ctx, reqURL.String(), &websocket.DialOptions{
HTTPClient: httpClient,
HTTPClient: httpClient,
HTTPHeader: http.Header{
SessionTokenHeader: []string{c.SessionToken()},
},
CompressionMode: websocket.CompressionDisabled,
})
if err != nil {
+13 -17
View File
@@ -6,7 +6,6 @@ import (
"fmt"
"net"
"net/http"
"net/http/cookiejar"
"net/netip"
"os"
"strconv"
@@ -363,26 +362,23 @@ func (c *Client) AgentReconnectingPTY(ctx context.Context, opts WorkspaceAgentRe
}
serverURL.RawQuery = q.Encode()
// If we're not using a signed token, we need to set the session token as a
// cookie.
httpClient := c.client.HTTPClient
// Shallow-clone the HTTP client so we never inherit a caller-provided
// cookie jar. Non-browser websocket auth uses the Coder-Session-Token
// header or a signed-token query param — never cookies. A stale jar
// cookie would take precedence on the server (cookies are checked
// before headers) and cause spurious 401s.
wsHTTPClient := *c.client.HTTPClient
wsHTTPClient.Jar = nil
headers := http.Header{}
// If we're not using a signed token, set the session token header.
if opts.SignedToken == "" {
jar, err := cookiejar.New(nil)
if err != nil {
return nil, xerrors.Errorf("create cookie jar: %w", err)
}
jar.SetCookies(serverURL, []*http.Cookie{{
Name: codersdk.SessionTokenCookie,
Value: c.client.SessionToken(),
}})
httpClient = &http.Client{
Jar: jar,
Transport: c.client.HTTPClient.Transport,
}
headers.Set(codersdk.SessionTokenHeader, c.client.SessionToken())
}
//nolint:bodyclose
conn, res, err := websocket.Dial(ctx, serverURL.String(), &websocket.DialOptions{
HTTPClient: httpClient,
HTTPClient: &wsHTTPClient,
HTTPHeader: headers,
})
if err != nil {
if res == nil {
+28 -3
View File
@@ -122,6 +122,31 @@ deployment. They will always be available from the agent.
| `coder_aibridgeproxyd_inflight_mitm_requests` | gauge | Number of MITM requests currently being processed. | `provider` |
| `coder_aibridgeproxyd_mitm_requests_total` | counter | Total number of MITM requests handled by the proxy. | `provider` |
| `coder_aibridgeproxyd_mitm_responses_total` | counter | Total number of MITM responses by HTTP status code class. | `code` `provider` |
| `coder_derp_server_accepts_total` | counter | Total DERP connections accepted. | |
| `coder_derp_server_average_queue_duration_ms` | gauge | Average queue duration in milliseconds. | |
| `coder_derp_server_bytes_received_total` | counter | Total bytes received. | |
| `coder_derp_server_bytes_sent_total` | counter | Total bytes sent. | |
| `coder_derp_server_clients` | gauge | Total clients (local + remote). | |
| `coder_derp_server_clients_local` | gauge | Local clients. | |
| `coder_derp_server_clients_remote` | gauge | Remote (mesh) clients. | |
| `coder_derp_server_connections` | gauge | Current DERP connections. | |
| `coder_derp_server_got_ping_total` | counter | Total pings received. | |
| `coder_derp_server_home_connections` | gauge | Current home DERP connections. | |
| `coder_derp_server_home_moves_in_total` | counter | Total home moves in. | |
| `coder_derp_server_home_moves_out_total` | counter | Total home moves out. | |
| `coder_derp_server_packets_dropped_reason_total` | counter | Packets dropped by reason. | `reason` |
| `coder_derp_server_packets_dropped_total` | counter | Total packets dropped. | |
| `coder_derp_server_packets_dropped_type_total` | counter | Packets dropped by type. | `type` |
| `coder_derp_server_packets_forwarded_in_total` | counter | Total packets forwarded in from mesh peers. | |
| `coder_derp_server_packets_forwarded_out_total` | counter | Total packets forwarded out to mesh peers. | |
| `coder_derp_server_packets_received_kind_total` | counter | Packets received by kind. | `kind` |
| `coder_derp_server_packets_received_total` | counter | Total packets received. | |
| `coder_derp_server_packets_sent_total` | counter | Total packets sent. | |
| `coder_derp_server_peer_gone_disconnected_total` | counter | Total peer gone (disconnected) frames sent. | |
| `coder_derp_server_peer_gone_not_here_total` | counter | Total peer gone (not here) frames sent. | |
| `coder_derp_server_sent_pong_total` | counter | Total pongs sent. | |
| `coder_derp_server_unknown_frames_total` | counter | Total unknown frames received. | |
| `coder_derp_server_watchers` | gauge | Current watchers. | |
| `coder_pubsub_connected` | gauge | Whether we are connected (1) or not connected (0) to postgres | |
| `coder_pubsub_current_events` | gauge | The current number of pubsub event channels listened for | |
| `coder_pubsub_current_subscribers` | gauge | The current number of active pubsub subscribers | |
@@ -175,9 +200,9 @@ deployment. They will always be available from the agent.
| `coderd_dbpurge_iteration_duration_seconds` | histogram | Duration of each dbpurge iteration in seconds. | `success` |
| `coderd_dbpurge_records_purged_total` | counter | Total number of records purged by type. | `record_type` |
| `coderd_experiments` | gauge | Indicates whether each experiment is enabled (1) or not (0) | `experiment` |
| `coderd_insights_applications_usage_seconds` | gauge | The application usage per template. | `application_name` `slug` `template_name` |
| `coderd_insights_parameters` | gauge | The parameter usage per template. | `parameter_name` `parameter_type` `parameter_value` `template_name` |
| `coderd_insights_templates_active_users` | gauge | The number of active users of the template. | `template_name` |
| `coderd_insights_applications_usage_seconds` | gauge | The application usage per template. | `application_name` `organization_name` `slug` `template_name` |
| `coderd_insights_parameters` | gauge | The parameter usage per template. | `organization_name` `parameter_name` `parameter_type` `parameter_value` `template_name` |
| `coderd_insights_templates_active_users` | gauge | The number of active users of the template. | `organization_name` `template_name` |
| `coderd_license_active_users` | gauge | The number of active users. | |
| `coderd_license_errors` | gauge | The number of active license errors. | |
| `coderd_license_limit_users` | gauge | The user seats limit based on the active Coder license. | |
+424 -2
View File
@@ -372,7 +372,7 @@ To perform this operation, you must be authenticated. [Learn more](authenticatio
```shell
# Example request using curl
curl -X POST http://coder-server:8080/api/v2/tasks/{user}/{task}/pause \
-H 'Accept: */*' \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
@@ -389,6 +389,217 @@ curl -X POST http://coder-server:8080/api/v2/tasks/{user}/{task}/pause \
> 202 Response
```json
{
"workspace_build": {
"build_number": 0,
"created_at": "2019-08-24T14:15:22Z",
"daily_cost": 0,
"deadline": "2019-08-24T14:15:22Z",
"has_ai_task": true,
"has_external_agent": true,
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
"initiator_name": "string",
"job": {
"available_workers": [
"497f6eca-6276-4993-bfeb-53cbbbba6f08"
],
"canceled_at": "2019-08-24T14:15:22Z",
"completed_at": "2019-08-24T14:15:22Z",
"created_at": "2019-08-24T14:15:22Z",
"error": "string",
"error_code": "REQUIRED_TEMPLATE_VARIABLES",
"file_id": "8a0cfb4f-ddc9-436d-91bb-75133c583767",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
"input": {
"error": "string",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478"
},
"logs_overflowed": true,
"metadata": {
"template_display_name": "string",
"template_icon": "string",
"template_id": "c6d67e98-83ea-49f0-8812-e4abae2b68bc",
"template_name": "string",
"template_version_name": "string",
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
"workspace_name": "string"
},
"organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6",
"queue_position": 0,
"queue_size": 0,
"started_at": "2019-08-24T14:15:22Z",
"status": "pending",
"tags": {
"property1": "string",
"property2": "string"
},
"type": "template_version_import",
"worker_id": "ae5fa6f7-c55b-40c1-b40a-b36ac467652b",
"worker_name": "string"
},
"matched_provisioners": {
"available": 0,
"count": 0,
"most_recently_seen": "2019-08-24T14:15:22Z"
},
"max_deadline": "2019-08-24T14:15:22Z",
"reason": "initiator",
"resources": [
{
"agents": [
{
"api_version": "string",
"apps": [
{
"command": "string",
"display_name": "string",
"external": true,
"group": "string",
"health": "disabled",
"healthcheck": {
"interval": 0,
"threshold": 0,
"url": "string"
},
"hidden": true,
"icon": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"open_in": "slim-window",
"sharing_level": "owner",
"slug": "string",
"statuses": [
{
"agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978",
"app_id": "affd1d10-9538-4fc8-9e0b-4594a28c1335",
"created_at": "2019-08-24T14:15:22Z",
"icon": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"message": "string",
"needs_user_attention": true,
"state": "working",
"uri": "string",
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9"
}
],
"subdomain": true,
"subdomain_name": "string",
"tooltip": "string",
"url": "string"
}
],
"architecture": "string",
"connection_timeout_seconds": 0,
"created_at": "2019-08-24T14:15:22Z",
"directory": "string",
"disconnected_at": "2019-08-24T14:15:22Z",
"display_apps": [
"vscode"
],
"environment_variables": {
"property1": "string",
"property2": "string"
},
"expanded_directory": "string",
"first_connected_at": "2019-08-24T14:15:22Z",
"health": {
"healthy": false,
"reason": "agent has lost connection"
},
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"instance_id": "string",
"last_connected_at": "2019-08-24T14:15:22Z",
"latency": {
"property1": {
"latency_ms": 0,
"preferred": true
},
"property2": {
"latency_ms": 0,
"preferred": true
}
},
"lifecycle_state": "created",
"log_sources": [
{
"created_at": "2019-08-24T14:15:22Z",
"display_name": "string",
"icon": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"workspace_agent_id": "7ad2e618-fea7-4c1a-b70a-f501566a72f1"
}
],
"logs_length": 0,
"logs_overflowed": true,
"name": "string",
"operating_system": "string",
"parent_id": {
"uuid": "string",
"valid": true
},
"ready_at": "2019-08-24T14:15:22Z",
"resource_id": "4d5215ed-38bb-48ed-879a-fdb9ca58522f",
"scripts": [
{
"cron": "string",
"display_name": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"log_path": "string",
"log_source_id": "4197ab25-95cf-4b91-9c78-f7f2af5d353a",
"run_on_start": true,
"run_on_stop": true,
"script": "string",
"start_blocks_login": true,
"timeout": 0
}
],
"started_at": "2019-08-24T14:15:22Z",
"startup_script_behavior": "blocking",
"status": "connecting",
"subsystems": [
"envbox"
],
"troubleshooting_url": "string",
"updated_at": "2019-08-24T14:15:22Z",
"version": "string"
}
],
"created_at": "2019-08-24T14:15:22Z",
"daily_cost": 0,
"hide": true,
"icon": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"job_id": "453bd7d7-5355-4d6d-a38e-d9e7eb218c3f",
"metadata": [
{
"key": "string",
"sensitive": true,
"value": "string"
}
],
"name": "string",
"type": "string",
"workspace_transition": "start"
}
],
"status": "pending",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
"transition": "start",
"updated_at": "2019-08-24T14:15:22Z",
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
"workspace_name": "string",
"workspace_owner_avatar_url": "string",
"workspace_owner_id": "e7078695-5279-4c86-8774-3ac2367a2fc7",
"workspace_owner_name": "string"
}
}
```
### Responses
| Status | Meaning | Description | Schema |
@@ -404,7 +615,7 @@ To perform this operation, you must be authenticated. [Learn more](authenticatio
```shell
# Example request using curl
curl -X POST http://coder-server:8080/api/v2/tasks/{user}/{task}/resume \
-H 'Accept: */*' \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
@@ -421,6 +632,217 @@ curl -X POST http://coder-server:8080/api/v2/tasks/{user}/{task}/resume \
> 202 Response
```json
{
"workspace_build": {
"build_number": 0,
"created_at": "2019-08-24T14:15:22Z",
"daily_cost": 0,
"deadline": "2019-08-24T14:15:22Z",
"has_ai_task": true,
"has_external_agent": true,
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
"initiator_name": "string",
"job": {
"available_workers": [
"497f6eca-6276-4993-bfeb-53cbbbba6f08"
],
"canceled_at": "2019-08-24T14:15:22Z",
"completed_at": "2019-08-24T14:15:22Z",
"created_at": "2019-08-24T14:15:22Z",
"error": "string",
"error_code": "REQUIRED_TEMPLATE_VARIABLES",
"file_id": "8a0cfb4f-ddc9-436d-91bb-75133c583767",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
"input": {
"error": "string",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478"
},
"logs_overflowed": true,
"metadata": {
"template_display_name": "string",
"template_icon": "string",
"template_id": "c6d67e98-83ea-49f0-8812-e4abae2b68bc",
"template_name": "string",
"template_version_name": "string",
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
"workspace_name": "string"
},
"organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6",
"queue_position": 0,
"queue_size": 0,
"started_at": "2019-08-24T14:15:22Z",
"status": "pending",
"tags": {
"property1": "string",
"property2": "string"
},
"type": "template_version_import",
"worker_id": "ae5fa6f7-c55b-40c1-b40a-b36ac467652b",
"worker_name": "string"
},
"matched_provisioners": {
"available": 0,
"count": 0,
"most_recently_seen": "2019-08-24T14:15:22Z"
},
"max_deadline": "2019-08-24T14:15:22Z",
"reason": "initiator",
"resources": [
{
"agents": [
{
"api_version": "string",
"apps": [
{
"command": "string",
"display_name": "string",
"external": true,
"group": "string",
"health": "disabled",
"healthcheck": {
"interval": 0,
"threshold": 0,
"url": "string"
},
"hidden": true,
"icon": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"open_in": "slim-window",
"sharing_level": "owner",
"slug": "string",
"statuses": [
{
"agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978",
"app_id": "affd1d10-9538-4fc8-9e0b-4594a28c1335",
"created_at": "2019-08-24T14:15:22Z",
"icon": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"message": "string",
"needs_user_attention": true,
"state": "working",
"uri": "string",
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9"
}
],
"subdomain": true,
"subdomain_name": "string",
"tooltip": "string",
"url": "string"
}
],
"architecture": "string",
"connection_timeout_seconds": 0,
"created_at": "2019-08-24T14:15:22Z",
"directory": "string",
"disconnected_at": "2019-08-24T14:15:22Z",
"display_apps": [
"vscode"
],
"environment_variables": {
"property1": "string",
"property2": "string"
},
"expanded_directory": "string",
"first_connected_at": "2019-08-24T14:15:22Z",
"health": {
"healthy": false,
"reason": "agent has lost connection"
},
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"instance_id": "string",
"last_connected_at": "2019-08-24T14:15:22Z",
"latency": {
"property1": {
"latency_ms": 0,
"preferred": true
},
"property2": {
"latency_ms": 0,
"preferred": true
}
},
"lifecycle_state": "created",
"log_sources": [
{
"created_at": "2019-08-24T14:15:22Z",
"display_name": "string",
"icon": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"workspace_agent_id": "7ad2e618-fea7-4c1a-b70a-f501566a72f1"
}
],
"logs_length": 0,
"logs_overflowed": true,
"name": "string",
"operating_system": "string",
"parent_id": {
"uuid": "string",
"valid": true
},
"ready_at": "2019-08-24T14:15:22Z",
"resource_id": "4d5215ed-38bb-48ed-879a-fdb9ca58522f",
"scripts": [
{
"cron": "string",
"display_name": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"log_path": "string",
"log_source_id": "4197ab25-95cf-4b91-9c78-f7f2af5d353a",
"run_on_start": true,
"run_on_stop": true,
"script": "string",
"start_blocks_login": true,
"timeout": 0
}
],
"started_at": "2019-08-24T14:15:22Z",
"startup_script_behavior": "blocking",
"status": "connecting",
"subsystems": [
"envbox"
],
"troubleshooting_url": "string",
"updated_at": "2019-08-24T14:15:22Z",
"version": "string"
}
],
"created_at": "2019-08-24T14:15:22Z",
"daily_cost": 0,
"hide": true,
"icon": "string",
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
"job_id": "453bd7d7-5355-4d6d-a38e-d9e7eb218c3f",
"metadata": [
{
"key": "string",
"sensitive": true,
"value": "string"
}
],
"name": "string",
"type": "string",
"workspace_transition": "start"
}
],
"status": "pending",
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
"template_version_name": "string",
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
"transition": "start",
"updated_at": "2019-08-24T14:15:22Z",
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
"workspace_name": "string",
"workspace_owner_avatar_url": "string",
"workspace_owner_id": "e7078695-5279-4c86-8774-3ac2367a2fc7",
"workspace_owner_name": "string"
}
}
```
### Responses
| Status | Meaning | Description | Schema |
+36 -32
View File
@@ -39,40 +39,44 @@ func (r *RootCmd) Server(_ func()) *serpent.Command {
}
}
// Always generate a mesh key, even if the built-in DERP server is
// disabled. This mesh key is still used by workspace proxies running
// HA.
var meshKey string
err := options.Database.InTx(func(tx database.Store) error {
// This will block until the lock is acquired, and will be
// automatically released when the transaction ends.
err := tx.AcquireLock(ctx, database.LockIDEnterpriseDeploymentSetup)
if err != nil {
return xerrors.Errorf("acquire lock: %w", err)
}
meshKey, err = tx.GetDERPMeshKey(ctx)
if err == nil {
return nil
}
if !errors.Is(err, sql.ErrNoRows) {
return xerrors.Errorf("get DERP mesh key: %w", err)
}
meshKey, err = cryptorand.String(32)
if err != nil {
return xerrors.Errorf("generate DERP mesh key: %w", err)
}
err = tx.InsertDERPMeshKey(ctx, meshKey)
if err != nil {
return xerrors.Errorf("insert DERP mesh key: %w", err)
}
return nil
}, nil)
if err != nil {
return nil, nil, err
}
if meshKey == "" {
return nil, nil, xerrors.New("mesh key is empty")
}
if options.DeploymentValues.DERP.Server.Enable {
options.DERPServer = derp.NewServer(key.NewNode(), tailnet.Logger(options.Logger.Named("derp")))
var meshKey string
err := options.Database.InTx(func(tx database.Store) error {
// This will block until the lock is acquired, and will be
// automatically released when the transaction ends.
err := tx.AcquireLock(ctx, database.LockIDEnterpriseDeploymentSetup)
if err != nil {
return xerrors.Errorf("acquire lock: %w", err)
}
meshKey, err = tx.GetDERPMeshKey(ctx)
if err == nil {
return nil
}
if !errors.Is(err, sql.ErrNoRows) {
return xerrors.Errorf("get DERP mesh key: %w", err)
}
meshKey, err = cryptorand.String(32)
if err != nil {
return xerrors.Errorf("generate DERP mesh key: %w", err)
}
err = tx.InsertDERPMeshKey(ctx, meshKey)
if err != nil {
return xerrors.Errorf("insert DERP mesh key: %w", err)
}
return nil
}, nil)
if err != nil {
return nil, nil, err
}
if meshKey == "" {
return nil, nil, xerrors.New("mesh key is empty")
}
options.DERPServer.SetMeshKey(meshKey)
}
+20 -1
View File
@@ -604,6 +604,25 @@ func (api *API) workspaceProxyRegister(rw http.ResponseWriter, r *http.Request)
return
}
// Load the mesh key directly from the database. We don't retrieve the mesh
// key from the built-in DERP server because it may not be enabled.
//
// The mesh key is always generated at startup by an enterprise coderd
// server.
var meshKey string
if req.DerpEnabled {
var err error
meshKey, err = api.Database.GetDERPMeshKey(ctx)
if err != nil {
httpapi.InternalServerError(rw, xerrors.Errorf("get DERP mesh key: %w", err))
return
}
if meshKey == "" {
httpapi.InternalServerError(rw, xerrors.New("mesh key is empty"))
return
}
}
startingRegionID, _ := getProxyDERPStartingRegionID(api.Options.BaseDERPMap)
// #nosec G115 - Safe conversion as DERP region IDs are small integers expected to be within int32 range
regionID := int32(startingRegionID) + proxy.RegionID
@@ -710,7 +729,7 @@ func (api *API) workspaceProxyRegister(rw http.ResponseWriter, r *http.Request)
}
httpapi.Write(ctx, rw, http.StatusCreated, wsproxysdk.RegisterWorkspaceProxyResponse{
DERPMeshKey: api.DERPServer.MeshKey(),
DERPMeshKey: meshKey,
DERPRegionID: regionID,
DERPMap: api.AGPL.DERPMap(),
DERPForceWebSockets: api.DeploymentValues.DERP.Config.ForceWebSockets.Value(),
+106 -2
View File
@@ -2,12 +2,15 @@ package coderd_test
import (
"database/sql"
"encoding/json"
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"os"
"path/filepath"
"runtime"
"testing"
"time"
@@ -16,6 +19,7 @@ import (
"github.com/sqlc-dev/pqtype"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"cdr.dev/slog/v3/sloggers/slogtest"
"github.com/coder/coder/v2/agent/agenttest"
@@ -34,6 +38,7 @@ import (
"github.com/coder/coder/v2/enterprise/wsproxy/wsproxysdk"
"github.com/coder/coder/v2/provisioner/echo"
"github.com/coder/coder/v2/testutil"
"github.com/coder/serpent"
)
func TestRegions(t *testing.T) {
@@ -278,10 +283,11 @@ func TestWorkspaceProxyCRUD(t *testing.T) {
func TestProxyRegisterDeregister(t *testing.T) {
t.Parallel()
setup := func(t *testing.T) (*codersdk.Client, database.Store) {
setupWithDeploymentValues := func(t *testing.T, dv *codersdk.DeploymentValues) (*codersdk.Client, database.Store) {
db, pubsub := dbtestutil.NewDB(t)
client, _ := coderdenttest.New(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: dv,
Database: db,
Pubsub: pubsub,
IncludeProvisionerDaemon: true,
@@ -297,6 +303,11 @@ func TestProxyRegisterDeregister(t *testing.T) {
return client, db
}
setup := func(t *testing.T) (*codersdk.Client, database.Store) {
dv := coderdtest.DeploymentValues(t)
return setupWithDeploymentValues(t, dv)
}
t.Run("OK", func(t *testing.T) {
t.Parallel()
@@ -363,7 +374,7 @@ func TestProxyRegisterDeregister(t *testing.T) {
req = wsproxysdk.RegisterWorkspaceProxyRequest{
AccessURL: "https://cool.proxy.coder.test",
WildcardHostname: "*.cool.proxy.coder.test",
DerpEnabled: false,
DerpEnabled: true,
ReplicaID: req.ReplicaID,
ReplicaHostname: "venus",
ReplicaError: "error",
@@ -608,6 +619,99 @@ func TestProxyRegisterDeregister(t *testing.T) {
require.True(t, ok, "expected to register replica %d", i)
}
})
t.Run("RegisterWithDisabledBuiltInDERP/DerpEnabled", func(t *testing.T) {
t.Parallel()
// Create a DERP map file. Currently, Coder refuses to start if there
// are zero DERP regions.
// TODO: ideally coder can start without any DERP servers if the
// customer is going to be using DERPs via proxies. We could make it
// a configuration value to allow an empty DERP map on startup or
// something.
tmpDir := t.TempDir()
derpPath := filepath.Join(tmpDir, "derp.json")
content, err := json.Marshal(&tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
1: {
Nodes: []*tailcfg.DERPNode{{}},
},
},
})
require.NoError(t, err)
require.NoError(t, os.WriteFile(derpPath, content, 0o600))
dv := coderdtest.DeploymentValues(t)
dv.DERP.Server.Enable = false // disable built-in DERP server
dv.DERP.Config.Path = serpent.String(derpPath)
client, _ := setupWithDeploymentValues(t, dv)
ctx := testutil.Context(t, testutil.WaitLong)
createRes, err := client.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{
Name: "proxy",
})
require.NoError(t, err)
proxyClient := wsproxysdk.New(client.URL, createRes.ProxyToken)
registerRes, err := proxyClient.RegisterWorkspaceProxy(ctx, wsproxysdk.RegisterWorkspaceProxyRequest{
AccessURL: "https://proxy.coder.test",
WildcardHostname: "*.proxy.coder.test",
DerpEnabled: true,
ReplicaID: uuid.New(),
ReplicaHostname: "venus",
ReplicaError: "",
ReplicaRelayAddress: "http://127.0.0.1:8080",
Version: buildinfo.Version(),
})
require.NoError(t, err)
// Should still be able to retrieve the DERP mesh key from the database,
// even though the built-in DERP server is disabled.
require.Equal(t, registerRes.DERPMeshKey, coderdtest.DefaultDERPMeshKey)
})
t.Run("RegisterWithDisabledBuiltInDERP/DerpEnabled", func(t *testing.T) {
t.Parallel()
// Same as above.
tmpDir := t.TempDir()
derpPath := filepath.Join(tmpDir, "derp.json")
content, err := json.Marshal(&tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
1: {
Nodes: []*tailcfg.DERPNode{{}},
},
},
})
require.NoError(t, err)
require.NoError(t, os.WriteFile(derpPath, content, 0o600))
dv := coderdtest.DeploymentValues(t)
dv.DERP.Server.Enable = false // disable built-in DERP server
dv.DERP.Config.Path = serpent.String(derpPath)
client, _ := setupWithDeploymentValues(t, dv)
ctx := testutil.Context(t, testutil.WaitLong)
createRes, err := client.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{
Name: "proxy",
})
require.NoError(t, err)
proxyClient := wsproxysdk.New(client.URL, createRes.ProxyToken)
registerRes, err := proxyClient.RegisterWorkspaceProxy(ctx, wsproxysdk.RegisterWorkspaceProxyRequest{
AccessURL: "https://proxy.coder.test",
WildcardHostname: "*.proxy.coder.test",
DerpEnabled: false,
ReplicaID: uuid.New(),
ReplicaHostname: "venus",
ReplicaError: "",
ReplicaRelayAddress: "http://127.0.0.1:8080",
Version: buildinfo.Version(),
})
require.NoError(t, err)
// The server shouldn't bother querying or returning the DERP mesh key
// if the proxy's DERP server is disabled.
require.Empty(t, registerRes.DERPMeshKey)
})
}
func TestIssueSignedAppToken(t *testing.T) {
+18
View File
@@ -4,6 +4,7 @@ import (
"context"
"crypto/tls"
"errors"
"expvar"
"fmt"
"net/http"
"net/url"
@@ -42,8 +43,14 @@ import (
sharedhttpmw "github.com/coder/coder/v2/httpmw"
"github.com/coder/coder/v2/site"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/derpmetrics"
)
// expDERPOnce guards the global expvar.Publish call for the DERP server.
// expvar panics on duplicate registration, and tests may create multiple
// servers in the same process.
var expDERPOnce sync.Once
type Options struct {
Logger slog.Logger
Experiments codersdk.Experiments
@@ -196,6 +203,17 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
return nil, xerrors.Errorf("create DERP mesh tls config: %w", err)
}
derpServer := derp.NewServer(key.NewNode(), tailnet.Logger(opts.Logger.Named("net.derp")))
// Publish DERP stats to expvar, available via the pprof
// debug server (--pprof-enable) at /debug/vars. This avoids
// exposing expvar on the public HTTP router.
expDERPOnce.Do(func() {
if expvar.Get("derp") == nil {
expvar.Publish("derp", derpServer.ExpVar())
}
})
if opts.PrometheusRegistry != nil {
opts.PrometheusRegistry.MustRegister(derpmetrics.NewDERPExpvarCollector(derpServer))
}
ctx, cancel := context.WithCancel(context.Background())
+52
View File
@@ -1223,3 +1223,55 @@ func createProxyReplicas(ctx context.Context, t *testing.T, opts *createProxyRep
return proxies
}
func TestWorkspaceProxyDERPMetrics(t *testing.T) {
t.Parallel()
deploymentValues := coderdtest.DeploymentValues(t)
deploymentValues.Experiments = []string{"*"}
client, closer, api, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{
Options: &coderdtest.Options{
DeploymentValues: deploymentValues,
AppHostname: "*.primary.test.coder.com",
IncludeProvisionerDaemon: true,
RealIPConfig: &httpmw.RealIPConfig{
TrustedOrigins: []*net.IPNet{{
IP: net.ParseIP("127.0.0.1"),
Mask: net.CIDRMask(8, 32),
}},
TrustedHeaders: []string{
"CF-Connecting-IP",
},
},
},
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureWorkspaceProxy: 1,
},
},
})
t.Cleanup(func() {
_ = closer.Close()
})
proxy := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
Name: "metrics-test-proxy",
})
// Gather metrics from the wsproxy's Prometheus registry.
metrics, err := proxy.PrometheusRegistry.Gather()
require.NoError(t, err)
names := make(map[string]struct{})
for _, m := range metrics {
names[m.GetName()] = struct{}{}
}
assert.Contains(t, names, "coder_derp_server_connections",
"expected coder_derp_server_connections to be registered")
assert.Contains(t, names, "coder_derp_server_bytes_received_total",
"expected coder_derp_server_bytes_received_total to be registered")
assert.Contains(t, names, "coder_derp_server_packets_dropped_reason_total",
"expected coder_derp_server_packets_dropped_reason_total to be registered")
}
+16 -11
View File
@@ -36,7 +36,7 @@ replace github.com/tcnksm/go-httpstat => github.com/coder/go-httpstat v0.0.0-202
// There are a few minor changes we make to Tailscale that we're slowly upstreaming. Compare here:
// https://github.com/tailscale/tailscale/compare/main...coder:tailscale:main
replace tailscale.com => github.com/coder/tailscale v1.1.1-0.20250829055706-6eafe0f9199e
replace tailscale.com => github.com/coder/tailscale v1.1.1-0.20260306035934-af5c6fc52433
// This is replaced to include
// 1. a fix for a data race: c.f. https://github.com/tailscale/wireguard-go/pull/25
@@ -107,7 +107,7 @@ require (
github.com/coder/wgtunnel v0.2.0
github.com/coreos/go-oidc/v3 v3.17.0
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/creack/pty v1.1.21
github.com/creack/pty v1.1.24
github.com/dave/dst v0.27.2
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/dblohm7/wingoes v0.0.0-20240820181039-f2b84150679e
@@ -277,13 +277,12 @@ require (
github.com/chromedp/sysutil v1.1.0 // indirect
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
github.com/clbanning/mxj/v2 v2.7.0 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cloudflare/circl v1.6.3 // indirect
github.com/containerd/continuity v0.4.5 // indirect
github.com/coreos/go-iptables v0.6.0 // indirect
github.com/dlclark/regexp2 v1.11.5 // indirect
github.com/docker/cli v28.3.2+incompatible // indirect
github.com/docker/docker v28.3.3+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/cli v29.2.0+incompatible // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dop251/goja v0.0.0-20241024094426-79f3a7efcdbd // indirect
github.com/dustin/go-humanize v1.0.1
@@ -324,7 +323,7 @@ require (
github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
github.com/googleapis/gax-go/v2 v2.17.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-cty v1.5.0 // indirect
@@ -439,7 +438,7 @@ require (
go.opentelemetry.io/contrib v1.19.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0
go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
@@ -473,7 +472,7 @@ require (
github.com/anthropics/anthropic-sdk-go v1.19.0
github.com/brianvoe/gofakeit/v7 v7.14.0
github.com/coder/agentapi-sdk-go v0.0.0-20250505131810-560d1d88d225
github.com/coder/aibridge v1.0.6
github.com/coder/aibridge v1.0.10
github.com/coder/aisdk-go v0.0.9
github.com/coder/boundary v0.8.3
github.com/coder/preview v1.0.4
@@ -523,10 +522,13 @@ require (
github.com/clipperhouse/uax29/v2 v2.5.0 // indirect
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect
github.com/coder/paralleltestctx v0.0.1 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/daixiang0/gci v0.13.7 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.37.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.3.3 // indirect
github.com/esiqveland/notify v0.13.3 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.6.2 // indirect
@@ -544,6 +546,8 @@ require (
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/landlock-lsm/go-landlock v0.0.0-20251103212306-430f8e5cd97c // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/moby/moby/api v1.54.0 // indirect
github.com/moby/moby/client v0.3.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
github.com/openai/openai-go v1.12.0 // indirect
@@ -573,6 +577,7 @@ require (
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
go.yaml.in/yaml/v4 v4.0.0-rc.3 // indirect
golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 // indirect
google.golang.org/genai v1.12.0 // indirect
+26 -20
View File
@@ -909,8 +909,8 @@ github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfa
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -928,8 +928,8 @@ github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/T
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=
github.com/coder/agentapi-sdk-go v0.0.0-20250505131810-560d1d88d225 h1:tRIViZ5JRmzdOEo5wUWngaGEFBG8OaE1o2GIHN5ujJ8=
github.com/coder/agentapi-sdk-go v0.0.0-20250505131810-560d1d88d225/go.mod h1:rNLVpYgEVeu1Zk29K64z6Od8RBP9DwqCu9OfCzh8MR4=
github.com/coder/aibridge v1.0.6 h1:RVcJCutgWAd8MOxNI5MNVBl+ttqShVsmMQvUAkfuU9Q=
github.com/coder/aibridge v1.0.6/go.mod h1:c7Of2xfAksZUrPWN180Eh60fiKgzs7dyOjniTjft6AE=
github.com/coder/aibridge v1.0.10 h1:pKMzRIDmIoFdXHrJXI2AnDdJhPqkSj0ql11jvgsy7zs=
github.com/coder/aibridge v1.0.10/go.mod h1:c7Of2xfAksZUrPWN180Eh60fiKgzs7dyOjniTjft6AE=
github.com/coder/aisdk-go v0.0.9 h1:Vzo/k2qwVGLTR10ESDeP2Ecek1SdPfZlEjtTfMveiVo=
github.com/coder/aisdk-go v0.0.9/go.mod h1:KF6/Vkono0FJJOtWtveh5j7yfNrSctVTpwgweYWSp5M=
github.com/coder/boundary v0.8.3 h1:QOb5WYKieRH/gwyUgofC9FDHSSJHpdw1jTrB5zsHovA=
@@ -963,8 +963,8 @@ github.com/coder/serpent v0.14.0 h1:g7vt2zBMp3nWyAvyhvQduaI53Ku65U3wITMi01+/8pU=
github.com/coder/serpent v0.14.0/go.mod h1:7OIvFBYMd+OqarMy5einBl8AtRr8LliopVU7pyrwucY=
github.com/coder/ssh v0.0.0-20231128192721-70855dedb788 h1:YoUSJ19E8AtuUFVYBpXuOD6a/zVP3rcxezNsoDseTUw=
github.com/coder/ssh v0.0.0-20231128192721-70855dedb788/go.mod h1:aGQbuCLyhRLMzZF067xc84Lh7JDs1FKwCmF1Crl9dxQ=
github.com/coder/tailscale v1.1.1-0.20250829055706-6eafe0f9199e h1:9RKGKzGLHtTvVBQublzDGtCtal3cXP13diCHoAIGPeI=
github.com/coder/tailscale v1.1.1-0.20250829055706-6eafe0f9199e/go.mod h1:jU9T1vEs+DOs8NtGp1F2PT0/TOGVwtg/JCCKYRgvMOs=
github.com/coder/tailscale v1.1.1-0.20260306035934-af5c6fc52433 h1:NxqWSEZFuCeIR/N7lZ9cx+434urbNvrrA7ZyNPTwnmc=
github.com/coder/tailscale v1.1.1-0.20260306035934-af5c6fc52433/go.mod h1:q+R4UL4pPb0CpaSNVUTDsg0kZeL/OlqjRNO9XbJxU5g=
github.com/coder/terraform-config-inspect v0.0.0-20250107175719-6d06d90c630e h1:JNLPDi2P73laR1oAclY6jWzAbucf70ASAvf5mh2cME0=
github.com/coder/terraform-config-inspect v0.0.0-20250107175719-6d06d90c630e/go.mod h1:Gz/z9Hbn+4KSp8A2FBtNszfLSdT2Tn/uAKGuVqqWmDI=
github.com/coder/terraform-provider-coder/v2 v2.13.1 h1:dtPaJUvueFm+XwBPUMWQCc5Z1QUQBW4B4RNyzX4h4y8=
@@ -999,8 +999,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6N
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
github.com/cyphar/filepath-securejoin v0.5.1 h1:eYgfMq5yryL4fbWfkLpFFy2ukSELzaJOTaUTuh+oF48=
github.com/cyphar/filepath-securejoin v0.5.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/daixiang0/gci v0.13.7 h1:+0bG5eK9vlI08J+J/NWGbWPTNiXPG4WhNLJOkSxWITQ=
@@ -1035,12 +1035,12 @@ github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5
github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/cli v28.3.2+incompatible h1:mOt9fcLE7zaACbxW1GeS65RI67wIJrTnqS3hP2huFsY=
github.com/docker/cli v28.3.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v29.2.0+incompatible h1:9oBd9+YM7rxjZLfyMGxjraKBKE4/nVyvVfN4qNl9XRM=
github.com/docker/cli v29.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
@@ -1078,16 +1078,16 @@ github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJ
github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q=
github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA=
github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=
github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=
github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=
github.com/envoyproxy/go-control-plane/envoy v1.37.0 h1:u3riX6BoYRfF4Dr7dwSOroNfdSbEPe9Yyl09/B6wBrQ=
github.com/envoyproxy/go-control-plane/envoy v1.37.0/go.mod h1:DReE9MMrmecPy+YvQOAOHNYMALuowAnbjjEMkkWOi6A=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=
github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=
github.com/envoyproxy/protoc-gen-validate v1.3.3 h1:MVQghNeW+LZcmXe7SY1V36Z+WFMDjpqGAGacLe2T0ds=
github.com/envoyproxy/protoc-gen-validate v1.3.3/go.mod h1:TsndJ/ngyIdQRhMcVVGDDHINPLWB7C82oDArY51KfB0=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/esiqveland/notify v0.13.3 h1:QCMw6o1n+6rl+oLUfg8P1IIDSFsDEb2WlXvVvIJbI/o=
@@ -1393,8 +1393,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w7
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/hairyhenderson/go-codeowners v0.7.0 h1:s0W4wF8bdsBEjTWzwzSlsatSthWtTAF2xLgo4a4RwAo=
github.com/hairyhenderson/go-codeowners v0.7.0/go.mod h1:wUlNgQ3QjqC4z8DnM5nnCYVq/icpqXJyJOukKx5U8/Q=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -1627,6 +1627,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
github.com/moby/moby/api v1.54.0 h1:7kbUgyiKcoBhm0UrWbdrMs7RX8dnwzURKVbZGy2GnL0=
github.com/moby/moby/api v1.54.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=
github.com/moby/moby/client v0.3.0 h1:UUGL5okry+Aomj3WhGt9Aigl3ZOxZGqR7XPo+RLPlKs=
github.com/moby/moby/client v0.3.0/go.mod h1:HJgFbJRvogDQjbM8fqc1MCEm4mIAGMLjXbgwoZp6jCQ=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
@@ -2066,8 +2070,8 @@ go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4Etq
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
@@ -2889,6 +2893,8 @@ modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
mvdan.cc/gofumpt v0.8.0 h1:nZUCeC2ViFaerTcYKstMmfysj6uhQrA2vJe+2vwGU6k=
mvdan.cc/gofumpt v0.8.0/go.mod h1:vEYnSzyGPmjvFkqJWtXkh79UwPWP9/HMxQdGEXZHjpg=
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
+78 -3
View File
@@ -1,3 +1,78 @@
# HELP coder_derp_server_accepts_total Total DERP connections accepted.
# TYPE coder_derp_server_accepts_total counter
coder_derp_server_accepts_total 0
# HELP coder_derp_server_average_queue_duration_ms Average queue duration in milliseconds.
# TYPE coder_derp_server_average_queue_duration_ms gauge
coder_derp_server_average_queue_duration_ms 0
# HELP coder_derp_server_bytes_received_total Total bytes received.
# TYPE coder_derp_server_bytes_received_total counter
coder_derp_server_bytes_received_total 0
# HELP coder_derp_server_bytes_sent_total Total bytes sent.
# TYPE coder_derp_server_bytes_sent_total counter
coder_derp_server_bytes_sent_total 0
# HELP coder_derp_server_clients Total clients (local + remote).
# TYPE coder_derp_server_clients gauge
coder_derp_server_clients 0
# HELP coder_derp_server_clients_local Local clients.
# TYPE coder_derp_server_clients_local gauge
coder_derp_server_clients_local 0
# HELP coder_derp_server_clients_remote Remote (mesh) clients.
# TYPE coder_derp_server_clients_remote gauge
coder_derp_server_clients_remote 0
# HELP coder_derp_server_connections Current DERP connections.
# TYPE coder_derp_server_connections gauge
coder_derp_server_connections 0
# HELP coder_derp_server_got_ping_total Total pings received.
# TYPE coder_derp_server_got_ping_total counter
coder_derp_server_got_ping_total 0
# HELP coder_derp_server_home_connections Current home DERP connections.
# TYPE coder_derp_server_home_connections gauge
coder_derp_server_home_connections 0
# HELP coder_derp_server_home_moves_in_total Total home moves in.
# TYPE coder_derp_server_home_moves_in_total counter
coder_derp_server_home_moves_in_total 0
# HELP coder_derp_server_home_moves_out_total Total home moves out.
# TYPE coder_derp_server_home_moves_out_total counter
coder_derp_server_home_moves_out_total 0
# HELP coder_derp_server_packets_dropped_reason_total Packets dropped by reason.
# TYPE coder_derp_server_packets_dropped_reason_total counter
coder_derp_server_packets_dropped_reason_total{reason=""} 0
# HELP coder_derp_server_packets_dropped_total Total packets dropped.
# TYPE coder_derp_server_packets_dropped_total counter
coder_derp_server_packets_dropped_total 0
# HELP coder_derp_server_packets_dropped_type_total Packets dropped by type.
# TYPE coder_derp_server_packets_dropped_type_total counter
coder_derp_server_packets_dropped_type_total{type=""} 0
# HELP coder_derp_server_packets_forwarded_in_total Total packets forwarded in from mesh peers.
# TYPE coder_derp_server_packets_forwarded_in_total counter
coder_derp_server_packets_forwarded_in_total 0
# HELP coder_derp_server_packets_forwarded_out_total Total packets forwarded out to mesh peers.
# TYPE coder_derp_server_packets_forwarded_out_total counter
coder_derp_server_packets_forwarded_out_total 0
# HELP coder_derp_server_packets_received_kind_total Packets received by kind.
# TYPE coder_derp_server_packets_received_kind_total counter
coder_derp_server_packets_received_kind_total{kind=""} 0
# HELP coder_derp_server_packets_received_total Total packets received.
# TYPE coder_derp_server_packets_received_total counter
coder_derp_server_packets_received_total 0
# HELP coder_derp_server_packets_sent_total Total packets sent.
# TYPE coder_derp_server_packets_sent_total counter
coder_derp_server_packets_sent_total 0
# HELP coder_derp_server_peer_gone_disconnected_total Total peer gone (disconnected) frames sent.
# TYPE coder_derp_server_peer_gone_disconnected_total counter
coder_derp_server_peer_gone_disconnected_total 0
# HELP coder_derp_server_peer_gone_not_here_total Total peer gone (not here) frames sent.
# TYPE coder_derp_server_peer_gone_not_here_total counter
coder_derp_server_peer_gone_not_here_total 0
# HELP coder_derp_server_sent_pong_total Total pongs sent.
# TYPE coder_derp_server_sent_pong_total counter
coder_derp_server_sent_pong_total 0
# HELP coder_derp_server_unknown_frames_total Total unknown frames received.
# TYPE coder_derp_server_unknown_frames_total counter
coder_derp_server_unknown_frames_total 0
# HELP coder_derp_server_watchers Current watchers.
# TYPE coder_derp_server_watchers gauge
coder_derp_server_watchers 0
# HELP coder_pubsub_connected Whether we are connected (1) or not connected (0) to postgres
# TYPE coder_pubsub_connected gauge
coder_pubsub_connected 0
@@ -159,13 +234,13 @@ coderd_dbpurge_records_purged_total{record_type=""} 0
coderd_experiments{experiment=""} 0
# HELP coderd_insights_applications_usage_seconds The application usage per template.
# TYPE coderd_insights_applications_usage_seconds gauge
coderd_insights_applications_usage_seconds{template_name="",application_name="",slug=""} 0
coderd_insights_applications_usage_seconds{template_name="",application_name="",slug="",organization_name=""} 0
# HELP coderd_insights_parameters The parameter usage per template.
# TYPE coderd_insights_parameters gauge
coderd_insights_parameters{template_name="",parameter_name="",parameter_type="",parameter_value=""} 0
coderd_insights_parameters{template_name="",parameter_name="",parameter_type="",parameter_value="",organization_name=""} 0
# HELP coderd_insights_templates_active_users The number of active users of the template.
# TYPE coderd_insights_templates_active_users gauge
coderd_insights_templates_active_users{template_name=""} 0
coderd_insights_templates_active_users{template_name="",organization_name=""} 0
# HELP coderd_license_active_users The number of active users.
# TYPE coderd_license_active_users gauge
coderd_license_active_users 0
+1
View File
@@ -30,6 +30,7 @@ var scanDirs = []string{
"coderd",
"enterprise",
"provisionerd",
"tailnet",
}
// skipPaths lists files that should be excluded from scanning. Their metrics
+15
View File
@@ -16,6 +16,21 @@ deploy_branch=main
# Determine the current branch name and check that it is one of the supported
# branch names.
branch_name=$(git branch --show-current)
# --- BEGIN TEMPORARY SHORT-CIRCUIT ---
# Forces deployment of main. Remove after 2026-03-04T12:00Z.
if [[ "$branch_name" == "main" ]]; then
log "TEMPORARY SHORT-CIRCUIT: deploying main"
log "VERDICT: DEPLOY"
echo "DEPLOY"
exit 0
else
log "VERDICT: DO NOT DEPLOY"
echo "NOOP"
exit 0
fi
# --- END TEMPORARY SHORT-CIRCUIT ---
if [[ "$branch_name" != "main" && ! "$branch_name" =~ ^release/[0-9]+\.[0-9]+$ ]]; then
error "Current branch '$branch_name' is not a supported branch name for dogfood, must be 'main' or 'release/x.y'"
fi
+1 -1
View File
@@ -248,7 +248,7 @@ export const patchRoleSyncSettings = (
};
};
const getWorkspaceSharingSettingsKey = (organization: string) => [
export const getWorkspaceSharingSettingsKey = (organization: string) => [
"organization",
organization,
"workspaceSharingSettings",
+2 -13
View File
@@ -17,10 +17,7 @@ export const taskLogs = (user: string, taskId: string) => ({
export const pauseTask = (task: Task, queryClient: QueryClient) => {
return {
mutationFn: async () => {
if (!task.workspace_id) {
throw new Error("Task has no workspace");
}
return API.stopWorkspace(task.workspace_id);
return API.pauseTask(task.owner_name, task.id);
},
onSuccess: async () => {
await queryClient.invalidateQueries({ queryKey: ["tasks"] });
@@ -31,15 +28,7 @@ export const pauseTask = (task: Task, queryClient: QueryClient) => {
export const resumeTask = (task: Task, queryClient: QueryClient) => {
return {
mutationFn: async () => {
if (!task.workspace_id) {
throw new Error("Task has no workspace");
}
return API.startWorkspace(
task.workspace_id,
task.template_version_id,
undefined,
undefined,
);
return API.resumeTask(task.owner_name, task.id);
},
onSuccess: async () => {
await queryClient.invalidateQueries({ queryKey: ["tasks"] });
+1 -1
View File
@@ -479,7 +479,7 @@ export const workspacePermissions = (workspace?: Workspace) => {
checks: workspace ? workspaceChecks(workspace) : {},
}),
queryKey: ["workspaces", workspace?.id, "permissions"],
enabled: !!workspace,
enabled: Boolean(workspace),
staleTime: Number.POSITIVE_INFINITY,
};
};
@@ -0,0 +1,25 @@
import {
MockWorkspace,
MockWorkspaceAgent,
MockWorkspaceApp,
} from "testHelpers/entities";
import { renderWithAuth } from "testHelpers/renderHelpers";
import { screen } from "@testing-library/react";
import { AppLink } from "./AppLink";
const renderAppLink = (app: typeof MockWorkspaceApp) => {
return renderWithAuth(
<AppLink app={app} workspace={MockWorkspace} agent={MockWorkspaceAgent} />,
);
};
// Regression test for https://github.com/coder/coder/issues/18573:
// open_in="tab" was not opening links in a new tab.
describe("AppLink", () => {
it("sets target=_blank and rel=noreferrer when open_in is tab", async () => {
renderAppLink({ ...MockWorkspaceApp, open_in: "tab" });
const link = await screen.findByRole("link");
expect(link).toHaveAttribute("target", "_blank");
expect(link).toHaveAttribute("rel", "noreferrer");
});
});
+12 -2
View File
@@ -135,7 +135,12 @@ export const AppLink: FC<AppLinkProps> = ({
const button = grouped ? (
<DropdownMenuItem asChild>
<a href={canClick ? link.href : undefined} onClick={link.onClick}>
<a
href={canClick ? link.href : undefined}
onClick={link.onClick}
target={app.open_in === "tab" ? "_blank" : undefined}
rel={app.open_in === "tab" ? "noreferrer" : undefined}
>
{icon}
{link.label}
{ShareIcon && <ShareIcon />}
@@ -143,7 +148,12 @@ export const AppLink: FC<AppLinkProps> = ({
</DropdownMenuItem>
) : (
<AgentButton asChild>
<a href={canClick ? link.href : undefined} onClick={link.onClick}>
<a
href={canClick ? link.href : undefined}
onClick={link.onClick}
target={app.open_in === "tab" ? "_blank" : undefined}
rel={app.open_in === "tab" ? "noreferrer" : undefined}
>
{icon}
{link.label}
{ShareIcon && <ShareIcon />}
@@ -1,3 +1,4 @@
import { workspaceSharingSettings } from "api/queries/organizations";
import type {
Group,
WorkspaceACL,
@@ -37,6 +38,7 @@ import { TableLoader } from "components/TableLoader/TableLoader";
import { EllipsisVertical, UserPlusIcon } from "lucide-react";
import { getGroupSubtitle } from "modules/groups";
import type { FC, ReactNode } from "react";
import { useQuery } from "react-query";
interface RoleSelectProps {
value: WorkspaceRole;
@@ -139,6 +141,7 @@ export const RoleSelectField: FC<RoleSelectFieldProps> = ({
};
interface WorkspaceSharingFormProps {
organizationId: string;
workspaceACL: WorkspaceACL | undefined;
canUpdatePermissions: boolean;
isTaskWorkspace: boolean;
@@ -155,6 +158,7 @@ interface WorkspaceSharingFormProps {
}
export const WorkspaceSharingForm: FC<WorkspaceSharingFormProps> = ({
organizationId,
workspaceACL,
canUpdatePermissions,
isTaskWorkspace,
@@ -169,6 +173,46 @@ export const WorkspaceSharingForm: FC<WorkspaceSharingFormProps> = ({
isCompact,
showRestartWarning,
}) => {
const sharingSettingsQuery = useQuery(
workspaceSharingSettings(organizationId),
);
if (sharingSettingsQuery.isLoading) {
return (
<TableBody>
<TableLoader />
</TableBody>
);
}
if (!sharingSettingsQuery.data) {
return (
<TableBody>
<TableRow>
<TableCell colSpan={999}>
<ErrorAlert error={sharingSettingsQuery.error} />
</TableCell>
</TableRow>
</TableBody>
);
}
if (sharingSettingsQuery.data.sharing_disabled) {
return (
<TableBody>
<TableRow>
<TableCell colSpan={999}>
<EmptyState
message="This workspace cannot be shared"
description="Workspace sharing has been disabled for this organization."
isCompact={isCompact}
/>
</TableCell>
</TableRow>
</TableBody>
);
}
const isEmpty = Boolean(
workspaceACL &&
workspaceACL.users.length === 0 &&
+8 -8
View File
@@ -10,6 +10,14 @@ export const workspaceChecks = (workspace: Workspace) =>
},
action: "read",
},
shareWorkspace: {
object: {
resource_type: "workspace",
resource_id: workspace.id,
owner_id: workspace.owner_id,
},
action: "share",
},
updateWorkspace: {
object: {
resource_type: "workspace",
@@ -34,14 +42,6 @@ export const workspaceChecks = (workspace: Workspace) =>
},
action: "update",
},
// To run a build in debug mode we need to be able to read the deployment
// config (enable_terraform_debug_mode).
deploymentConfig: {
object: {
resource_type: "deployment_config",
},
action: "read",
},
}) satisfies Record<string, AuthorizationCheck>;
export type WorkspacePermissions = Record<
+6 -6
View File
@@ -739,9 +739,9 @@ export const TaskResuming: Story = {
spyOn(API, "getWorkspaceByOwnerAndName").mockResolvedValue(
MockStoppedWorkspace,
);
spyOn(API, "startWorkspace").mockResolvedValue(
MockStartingWorkspace.latest_build,
);
spyOn(API, "resumeTask").mockResolvedValue({
workspace_build: MockStartingWorkspace.latest_build,
});
spyOn(API, "getTaskLogs").mockResolvedValue(MockTaskLogsResponse);
},
parameters: {
@@ -766,7 +766,7 @@ export const TaskResuming: Story = {
await userEvent.click(resumeButton);
await waitFor(async () => {
expect(API.startWorkspace).toBeCalled();
expect(API.resumeTask).toBeCalled();
});
},
};
@@ -781,7 +781,7 @@ export const TaskResumeFailure: Story = {
spyOn(API, "getWorkspaceByOwnerAndName").mockResolvedValue(
MockStoppedWorkspace,
);
spyOn(API, "startWorkspace").mockRejectedValue(
spyOn(API, "resumeTask").mockRejectedValue(
new Error("Some unexpected error"),
);
spyOn(API, "getTaskLogs").mockResolvedValue(MockTaskLogsResponse);
@@ -820,7 +820,7 @@ export const TaskResumeFailureWithDialog: Story = {
spyOn(API, "getWorkspaceByOwnerAndName").mockResolvedValue(
MockStoppedWorkspace,
);
spyOn(API, "startWorkspace").mockRejectedValue({
spyOn(API, "resumeTask").mockRejectedValue({
...mockApiError({
message: "Bad Request",
detail: "Invalid build parameters provided",
+13 -8
View File
@@ -361,7 +361,9 @@ export const PauseTask: Story = {
spyOn(API, "getTasks").mockResolvedValue([
{ ...MockTask, status: "active" },
]);
spyOn(API, "stopWorkspace").mockResolvedValue(MockWorkspaceBuildStop);
spyOn(API, "pauseTask").mockResolvedValue({
workspace_build: MockWorkspaceBuildStop,
});
},
play: async ({ canvasElement }) => {
const canvas = within(canvasElement);
@@ -370,7 +372,10 @@ export const PauseTask: Story = {
});
await userEvent.click(pauseButton);
await waitFor(() => {
expect(API.stopWorkspace).toHaveBeenCalledWith(MockTask.workspace_id);
expect(API.pauseTask).toHaveBeenCalledWith(
MockTask.owner_name,
MockTask.id,
);
});
},
};
@@ -394,7 +399,9 @@ export const ResumeTask: Story = {
spyOn(API, "getTasks").mockResolvedValue([
{ ...MockTask, status: "paused" },
]);
spyOn(API, "startWorkspace").mockResolvedValue(MockWorkspaceBuildStop);
spyOn(API, "resumeTask").mockResolvedValue({
workspace_build: MockWorkspaceBuildStop,
});
},
play: async ({ canvasElement }) => {
const canvas = within(canvasElement);
@@ -403,11 +410,9 @@ export const ResumeTask: Story = {
});
await userEvent.click(resumeButton);
await waitFor(() => {
expect(API.startWorkspace).toHaveBeenCalledWith(
MockTask.workspace_id,
MockTask.template_version_id,
undefined,
undefined,
expect(API.resumeTask).toHaveBeenCalledWith(
MockTask.owner_name,
MockTask.id,
);
});
},
@@ -24,9 +24,9 @@ const createTimestamp = (
const permissions: WorkspacePermissions = {
readWorkspace: true,
shareWorkspace: true,
updateWorkspace: true,
updateWorkspaceVersion: true,
deploymentConfig: true,
deleteFailedWorkspace: true,
};
@@ -57,7 +57,6 @@ export const Workspace: FC<WorkspaceProps> = ({
latestVersion,
permissions,
timings,
sharingDisabled,
handleStart,
handleStop,
handleRestart,
@@ -111,7 +110,6 @@ export const Workspace: FC<WorkspaceProps> = ({
latestVersion={latestVersion}
isUpdating={isUpdating}
isRestarting={isRestarting}
sharingDisabled={sharingDisabled}
handleStart={handleStart}
handleStop={handleStop}
handleRestart={handleRestart}
@@ -38,6 +38,7 @@ export const ShareButton: FC<ShareButtonProps> = ({
<FeatureStageBadge contentType="beta" size="sm" />
</div>
<WorkspaceSharingForm
organizationId={workspace.organization_id}
workspaceACL={sharing.workspaceACL}
canUpdatePermissions={canUpdatePermissions}
isTaskWorkspace={Boolean(workspace.task_id)}
@@ -16,11 +16,11 @@ const meta: Meta<typeof WorkspaceActions> = {
args: {
isUpdating: false,
permissions: {
deleteFailedWorkspace: true,
deploymentConfig: true,
readWorkspace: true,
shareWorkspace: true,
updateWorkspace: true,
updateWorkspaceVersion: true,
deleteFailedWorkspace: true,
},
},
decorators: [withDashboardProvider, withDesktopViewport, withAuthProvider],
@@ -172,11 +172,11 @@ export const FailedWithDebug: Story = {
args: {
workspace: Mocks.MockFailedWorkspace,
permissions: {
deploymentConfig: true,
deleteFailedWorkspace: true,
readWorkspace: true,
shareWorkspace: true,
updateWorkspace: true,
updateWorkspaceVersion: true,
deleteFailedWorkspace: true,
},
},
};
@@ -29,7 +29,6 @@ interface WorkspaceActionsProps {
isUpdating: boolean;
isRestarting: boolean;
permissions: WorkspacePermissions;
sharingDisabled?: boolean;
handleToggleFavorite: () => void;
handleStart: (buildParameters?: WorkspaceBuildParameter[]) => void;
handleStop: () => void;
@@ -46,7 +45,6 @@ export const WorkspaceActions: FC<WorkspaceActionsProps> = ({
isUpdating,
isRestarting,
permissions,
sharingDisabled,
handleToggleFavorite,
handleStart,
handleStop,
@@ -57,10 +55,13 @@ export const WorkspaceActions: FC<WorkspaceActionsProps> = ({
handleDebug,
handleDormantActivate,
}) => {
const { user } = useAuthenticated();
const {
permissions: { viewDeploymentConfig },
user,
} = useAuthenticated();
const { data: deployment } = useQuery({
...deploymentConfig(),
enabled: permissions.deploymentConfig,
enabled: viewDeploymentConfig,
});
const { actions, canCancel, canAcceptJobs } = abilitiesByWorkspaceStatus(
workspace,
@@ -191,7 +192,7 @@ export const WorkspaceActions: FC<WorkspaceActionsProps> = ({
onToggle={handleToggleFavorite}
/>
{!sharingDisabled && (
{permissions.shareWorkspace && (
<ShareButton
workspace={workspace}
canUpdatePermissions={permissions.updateWorkspace}
@@ -14,9 +14,9 @@ import { WorkspaceNotifications } from "./WorkspaceNotifications";
export const defaultPermissions: WorkspacePermissions = {
readWorkspace: true,
updateWorkspaceVersion: true,
shareWorkspace: true,
updateWorkspace: true,
deploymentConfig: true,
updateWorkspaceVersion: true,
deleteFailedWorkspace: true,
};
@@ -125,11 +125,11 @@ describe("WorkspacePage", () => {
server.use(
http.post("/api/v2/authcheck", async () => {
const permissions: WorkspacePermissions = {
deleteFailedWorkspace: true,
deploymentConfig: true,
readWorkspace: true,
shareWorkspace: true,
updateWorkspace: true,
updateWorkspaceVersion: true,
deleteFailedWorkspace: true,
};
return HttpResponse.json(permissions);
}),
@@ -1,5 +1,4 @@
import { watchWorkspace } from "api/api";
import { workspaceSharingSettings } from "api/queries/organizations";
import { template as templateQueryOptions } from "api/queries/templates";
import { workspaceBuildsKey } from "api/queries/workspaceBuilds";
import {
@@ -45,12 +44,6 @@ const WorkspacePage: FC = () => {
const permissionsQuery = useQuery(workspacePermissions(workspace));
const permissions = permissionsQuery.data;
const sharingSettingsQuery = useQuery({
...workspaceSharingSettings(workspace?.organization_id ?? ""),
enabled: !!workspace,
});
const sharingDisabled = sharingSettingsQuery.data?.sharing_disabled ?? false;
// Watch workspace changes
const updateWorkspaceData = useEffectEvent(
async (newWorkspaceData: Workspace) => {
@@ -121,7 +114,6 @@ const WorkspacePage: FC = () => {
workspace={workspace}
template={template}
permissions={permissions}
sharingDisabled={sharingDisabled}
/>
);
};
@@ -34,14 +34,12 @@ interface WorkspaceReadyPageProps {
template: TypesGen.Template;
workspace: TypesGen.Workspace;
permissions: WorkspacePermissions;
sharingDisabled?: boolean;
}
export const WorkspaceReadyPage: FC<WorkspaceReadyPageProps> = ({
workspace,
template,
permissions,
sharingDisabled,
}) => {
const queryClient = useQueryClient();
@@ -285,7 +283,6 @@ export const WorkspaceReadyPage: FC<WorkspaceReadyPageProps> = ({
template={template}
buildLogs={buildLogs}
timings={timingsQuery.data}
sharingDisabled={sharingDisabled}
handleStart={async (buildParameters) => {
const { hasEphemeral, ephemeralParameters } =
await checkEphemeralParameters(buildParameters);
@@ -35,9 +35,9 @@ const meta: Meta<typeof WorkspaceTopbar> = {
latestVersion: MockTemplateVersion,
permissions: {
readWorkspace: true,
updateWorkspaceVersion: true,
shareWorkspace: true,
updateWorkspace: true,
deploymentConfig: true,
updateWorkspaceVersion: true,
deleteFailedWorkspace: true,
},
},
@@ -44,7 +44,6 @@ interface WorkspaceProps {
template: TypesGen.Template;
permissions: WorkspacePermissions;
latestVersion?: TypesGen.TemplateVersion;
sharingDisabled?: boolean;
handleStart: (buildParameters?: TypesGen.WorkspaceBuildParameter[]) => void;
handleStop: () => void;
handleRestart: (buildParameters?: TypesGen.WorkspaceBuildParameter[]) => void;
@@ -63,7 +62,6 @@ export const WorkspaceTopbar: FC<WorkspaceProps> = ({
permissions,
isUpdating,
isRestarting,
sharingDisabled,
handleStart,
handleStop,
handleRestart,
@@ -238,7 +236,6 @@ export const WorkspaceTopbar: FC<WorkspaceProps> = ({
permissions={permissions}
isUpdating={isUpdating}
isRestarting={isRestarting}
sharingDisabled={sharingDisabled}
handleStart={handleStart}
handleStop={handleStop}
handleRestart={handleRestart}
@@ -1,4 +1,3 @@
import type { Workspace } from "api/typesGenerated";
import { Avatar } from "components/Avatar/Avatar";
import { FeatureStageBadge } from "components/FeatureStageBadge/FeatureStageBadge";
import {
@@ -12,19 +11,11 @@ import {
TimerIcon as ScheduleIcon,
Users as SharingIcon,
} from "lucide-react";
import type { FC } from "react";
import { useWorkspaceSettings } from "./WorkspaceSettingsLayout";
interface SidebarProps {
username: string;
workspace: Workspace;
sharingDisabled?: boolean;
}
export const Sidebar: React.FC = () => {
const { owner, workspace, permissions } = useWorkspaceSettings();
export const Sidebar: FC<SidebarProps> = ({
username,
workspace,
sharingDisabled,
}) => {
return (
<BaseSidebar>
<SidebarHeader
@@ -36,7 +27,7 @@ export const Sidebar: FC<SidebarProps> = ({
/>
}
title={workspace.name}
linkTo={`/@${username}/${workspace.name}`}
linkTo={`/@${owner}/${workspace.name}`}
subtitle={workspace.template_display_name ?? workspace.template_name}
/>
@@ -49,7 +40,7 @@ export const Sidebar: FC<SidebarProps> = ({
<SidebarNavItem href="schedule" icon={ScheduleIcon}>
Schedule
</SidebarNavItem>
{!sharingDisabled && (
{permissions?.shareWorkspace && (
<SidebarNavItem href="sharing" icon={SharingIcon}>
Sharing
<FeatureStageBadge contentType="beta" size="sm" />
@@ -4,7 +4,7 @@ import WorkspaceParametersPage from "./WorkspaceParametersPage";
import WorkspaceParametersPageExperimental from "./WorkspaceParametersPageExperimental";
const WorkspaceParametersExperimentRouter: FC = () => {
const workspace = useWorkspaceSettings();
const { workspace } = useWorkspaceSettings();
return (
<>
@@ -29,7 +29,7 @@ import {
} from "./WorkspaceParametersForm";
const WorkspaceParametersPage: FC = () => {
const workspace = useWorkspaceSettings();
const { workspace } = useWorkspaceSettings();
const build = workspace.latest_build;
const { data: templateVersionParameters } = useQuery(
richParameters(build.template_version_id),
@@ -33,7 +33,7 @@ import { useWorkspaceSettings } from "../WorkspaceSettingsLayout";
import { WorkspaceParametersPageViewExperimental } from "./WorkspaceParametersPageViewExperimental";
const WorkspaceParametersPageExperimental: FC = () => {
const workspace = useWorkspaceSettings();
const { workspace } = useWorkspaceSettings();
const navigate = useNavigate();
const [searchParams] = useSearchParams();
const templateVersionId = searchParams.get("templateVersionId") ?? undefined;
@@ -6,10 +6,10 @@ import {
} from "testHelpers/entities";
import { withAuthProvider, withDashboardProvider } from "testHelpers/storybook";
import type { Meta, StoryObj } from "@storybook/react-vite";
import { getAuthorizationKey } from "api/queries/authCheck";
import { templateByNameKey } from "api/queries/templates";
import { workspaceByOwnerAndNameKey } from "api/queries/workspaces";
import type { Workspace } from "api/typesGenerated";
import type { WorkspacePermissions } from "modules/workspaces/permissions";
import {
reactRouterOutlet,
reactRouterParameters,
@@ -68,19 +68,14 @@ function workspaceQueries(workspace: Workspace) {
data: workspace,
},
{
key: getAuthorizationKey({
checks: {
updateWorkspace: {
object: {
resource_type: "workspace",
resource_id: MockWorkspace.id,
owner_id: MockWorkspace.owner_id,
},
action: "update",
},
},
}),
data: { updateWorkspace: true },
key: ["workspaces", workspace.id, "permissions"],
data: {
readWorkspace: true,
shareWorkspace: true,
updateWorkspace: true,
updateWorkspaceVersion: true,
deleteFailedWorkspace: true,
} satisfies WorkspacePermissions,
},
{
key: templateByNameKey(
@@ -1,5 +1,4 @@
import { API } from "api/api";
import { checkAuthorization } from "api/queries/authCheck";
import { templateByName } from "api/queries/templates";
import { workspaceByOwnerAndNameKey } from "api/queries/workspaces";
import type * as TypesGen from "api/typesGenerated";
@@ -28,28 +27,13 @@ import {
} from "./formToRequest";
import { WorkspaceScheduleForm } from "./WorkspaceScheduleForm";
const permissionsToCheck = (workspace: TypesGen.Workspace) =>
({
updateWorkspace: {
object: {
resource_type: "workspace",
resource_id: workspace.id,
owner_id: workspace.owner_id,
},
action: "update",
},
}) as const;
const WorkspaceSchedulePage: FC = () => {
const params = useParams() as { username: string; workspace: string };
const navigate = useNavigate();
const username = params.username.replace("@", "");
const workspaceName = params.workspace;
const queryClient = useQueryClient();
const workspace = useWorkspaceSettings();
const { data: permissions, error: checkPermissionsError } = useQuery(
checkAuthorization({ checks: permissionsToCheck(workspace) }),
);
const { permissions, workspace } = useWorkspaceSettings();
const { data: template, error: getTemplateError } = useQuery(
templateByName(workspace.organization_id, workspace.template_name),
);
@@ -66,8 +50,8 @@ const WorkspaceSchedulePage: FC = () => {
},
onError: () => displayError("Failed to update workspace schedule"),
});
const error = checkPermissionsError || getTemplateError;
const isLoading = !template || !permissions;
const error = getTemplateError;
const isLoading = !template;
const [isConfirmingApply, setIsConfirmingApply] = useState(false);
const { mutate: updateWorkspace } = useMutation({
@@ -1,17 +1,28 @@
import { workspaceSharingSettings } from "api/queries/organizations";
import { workspaceByOwnerAndName } from "api/queries/workspaces";
import {
workspaceByOwnerAndName,
workspacePermissions,
} from "api/queries/workspaces";
import type { Workspace } from "api/typesGenerated";
import { ErrorAlert } from "components/Alert/ErrorAlert";
import { Loader } from "components/Loader/Loader";
import { Margins } from "components/Margins/Margins";
import { Stack } from "components/Stack/Stack";
import type { WorkspacePermissions } from "modules/workspaces/permissions";
import { createContext, type FC, Suspense, useContext } from "react";
import { useQuery } from "react-query";
import { Outlet, useParams } from "react-router";
import { pageTitle } from "utils/page";
import { Sidebar } from "./Sidebar";
const WorkspaceSettings = createContext<Workspace | undefined>(undefined);
type WorkspaceSettingsContext = {
owner: string;
workspace: Workspace;
permissions?: WorkspacePermissions;
};
const WorkspaceSettings = createContext<WorkspaceSettingsContext | undefined>(
undefined,
);
export function useWorkspaceSettings() {
const value = useContext(WorkspaceSettings);
@@ -31,39 +42,36 @@ export const WorkspaceSettingsLayout: FC = () => {
};
const workspaceName = params.workspace;
const username = params.username.replace("@", "");
const {
data: workspace,
error,
isLoading,
isError,
} = useQuery(workspaceByOwnerAndName(username, workspaceName));
const workspaceQuery = useQuery(
workspaceByOwnerAndName(username, workspaceName),
);
const sharingSettingsQuery = useQuery({
...workspaceSharingSettings(workspace?.organization_id ?? ""),
enabled: !!workspace,
});
const sharingDisabled = sharingSettingsQuery.data?.sharing_disabled ?? false;
const permissionsQuery = useQuery(workspacePermissions(workspaceQuery.data));
if (isLoading) {
if (workspaceQuery.isLoading) {
return <Loader />;
}
const error = workspaceQuery.error || permissionsQuery.error;
return (
<>
<title>{pageTitle(workspaceName, "Settings")}</title>
<Margins>
<Stack css={{ padding: "48px 0" }} direction="row" spacing={10}>
{isError ? (
{error ? (
<ErrorAlert error={error} />
) : (
workspace && (
<WorkspaceSettings.Provider value={workspace}>
<Sidebar
workspace={workspace}
username={username}
sharingDisabled={sharingDisabled}
/>
workspaceQuery.data && (
<WorkspaceSettings.Provider
value={{
owner: username,
workspace: workspaceQuery.data,
permissions: permissionsQuery.data,
}}
>
<Sidebar />
<Suspense fallback={<Loader />}>
<main css={{ width: "100%" }}>
<Outlet />
@@ -15,7 +15,7 @@ const WorkspaceSettingsPage: FC = () => {
};
const workspaceName = params.workspace;
const username = params.username.replace("@", "");
const workspace = useWorkspaceSettings();
const { workspace } = useWorkspaceSettings();
const navigate = useNavigate();
const mutation = useMutation({
@@ -11,7 +11,7 @@ import { useWorkspaceSettings } from "../WorkspaceSettingsLayout";
import { WorkspaceSharingPageView } from "./WorkspaceSharingPageView";
const WorkspaceSharingPage: FC = () => {
const workspace = useWorkspaceSettings();
const { workspace } = useWorkspaceSettings();
const sharing = useWorkspaceSharing(workspace);
const checks = workspaceChecks(workspace);
@@ -25,7 +25,7 @@ const WorkspaceSharingPage: FC = () => {
sharing.error ?? permissionsQuery.error ?? sharing.mutationError;
return (
<div className="flex flex-col gap-12 max-w-screen-md">
<div className="flex flex-col gap-12">
<title>{pageTitle(workspace.name, "Sharing")}</title>
<header className="flex flex-col">
@@ -7,6 +7,7 @@ import {
mockApiError,
} from "testHelpers/entities";
import type { Meta, StoryObj } from "@storybook/react-vite";
import { getWorkspaceSharingSettingsKey } from "api/queries/organizations";
import type {
WorkspaceACL,
WorkspaceGroup,
@@ -63,6 +64,14 @@ const aclWithUsersAndGroups: WorkspaceACL = {
const meta: Meta<typeof WorkspaceSharingPageView> = {
title: "pages/WorkspaceSharingPageView",
component: WorkspaceSharingPageView,
parameters: {
queries: [
{
key: getWorkspaceSharingSettingsKey(MockWorkspace.organization_id),
data: { sharing_disabled: false },
},
],
},
args: {
workspace: MockWorkspace,
workspaceACL: emptyACL,
@@ -52,6 +52,7 @@ export const WorkspaceSharingPageView: FC<WorkspaceSharingPageViewProps> = ({
}) => {
return (
<WorkspaceSharingForm
organizationId={workspace.organization_id}
workspaceACL={workspaceACL}
canUpdatePermissions={canUpdatePermissions}
isTaskWorkspace={Boolean(workspace.task_id)}
+1 -1
View File
@@ -1429,7 +1429,7 @@ func (c *Controller) Run(ctx context.Context) {
tailnetClients, err := c.Dialer.Dial(c.ctx, c.ResumeTokenCtrl)
if err != nil {
if xerrors.Is(err, context.Canceled) || xerrors.Is(err, context.DeadlineExceeded) {
if c.ctx.Err() != nil {
return
}
+103
View File
@@ -1075,6 +1075,84 @@ func TestController_Disconnects(t *testing.T) {
_ = testutil.TryReceive(testCtx, t, uut.Closed())
}
func TestController_RetriesWrappedDeadlineExceeded(t *testing.T) {
t.Parallel()
testCtx := testutil.Context(t, testutil.WaitShort)
ctx, cancel := context.WithCancel(testCtx)
defer cancel()
logger := testutil.Logger(t)
dialer := &scriptedDialer{
attempts: make(chan int, 10),
dialFn: func(ctx context.Context, attempt int) (tailnet.ControlProtocolClients, error) {
if attempt == 1 {
return tailnet.ControlProtocolClients{}, &net.OpError{
Op: "dial",
Net: "tcp",
Err: context.DeadlineExceeded,
}
}
<-ctx.Done()
return tailnet.ControlProtocolClients{}, ctx.Err()
},
}
uut := tailnet.NewController(logger.Named("ctrl"), dialer)
uut.Run(ctx)
require.Equal(t, 1, testutil.TryReceive(testCtx, t, dialer.attempts))
require.Equal(t, 2, testutil.TryReceive(testCtx, t, dialer.attempts))
select {
case <-uut.Closed():
t.Fatal("controller exited after wrapped deadline exceeded")
default:
}
cancel()
_ = testutil.TryReceive(testCtx, t, uut.Closed())
}
func TestController_DoesNotRedialAfterCancel(t *testing.T) {
t.Parallel()
testCtx := testutil.Context(t, testutil.WaitShort)
ctx, cancel := context.WithCancel(testCtx)
logger := testutil.Logger(t)
fClient := newFakeWorkspaceUpdateClient(testCtx, t)
dialer := &scriptedDialer{
attempts: make(chan int, 10),
dialFn: func(_ context.Context, _ int) (tailnet.ControlProtocolClients, error) {
return tailnet.ControlProtocolClients{
WorkspaceUpdates: fClient,
Closer: fakeCloser{},
}, nil
},
}
fCtrl := newFakeUpdatesController(testCtx, t)
uut := tailnet.NewController(logger.Named("ctrl"), dialer)
uut.WorkspaceUpdatesCtrl = fCtrl
uut.Run(ctx)
require.Equal(t, 1, testutil.TryReceive(testCtx, t, dialer.attempts))
call := testutil.TryReceive(testCtx, t, fCtrl.calls)
require.Equal(t, fClient, call.client)
testutil.RequireSend[tailnet.CloserWaiter](testCtx, t, call.resp, newFakeCloserWaiter())
cancel()
closeCall := testutil.TryReceive(testCtx, t, fClient.close)
testutil.RequireSend(testCtx, t, closeCall, nil)
_ = testutil.TryReceive(testCtx, t, uut.Closed())
select {
case attempt := <-dialer.attempts:
t.Fatalf("unexpected redial attempt after cancel: %d", attempt)
default:
}
}
func TestController_TelemetrySuccess(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
@@ -2070,6 +2148,31 @@ func newFakeCloserWaiter() *fakeCloserWaiter {
}
}
type scriptedDialer struct {
attempts chan int
dialFn func(context.Context, int) (tailnet.ControlProtocolClients, error)
mu sync.Mutex
attemptN int
}
func (d *scriptedDialer) Dial(ctx context.Context, _ tailnet.ResumeTokenController) (tailnet.ControlProtocolClients, error) {
d.mu.Lock()
d.attemptN++
attempt := d.attemptN
d.mu.Unlock()
if d.attempts != nil {
select {
case d.attempts <- attempt:
case <-ctx.Done():
return tailnet.ControlProtocolClients{}, ctx.Err()
}
}
return d.dialFn(ctx, attempt)
}
type fakeWorkspaceUpdatesDialer struct {
client tailnet.WorkspaceUpdatesClient
}
+214
View File
@@ -0,0 +1,214 @@
package derpmetrics
import (
"expvar"
"strconv"
"github.com/prometheus/client_golang/prometheus"
"tailscale.com/derp"
)
// DERPExpvarCollector exports a DERP server's expvar stats as
// properly typed Prometheus metrics.
type DERPExpvarCollector struct {
server *derp.Server
// Counters.
accepts *prometheus.Desc
bytesReceived *prometheus.Desc
bytesSent *prometheus.Desc
packetsReceived *prometheus.Desc
packetsSent *prometheus.Desc
packetsDropped *prometheus.Desc
packetsForwardedIn *prometheus.Desc
packetsForwardedOut *prometheus.Desc
homeMovesIn *prometheus.Desc
homeMovesOut *prometheus.Desc
gotPing *prometheus.Desc
sentPong *prometheus.Desc
peerGoneDisconnected *prometheus.Desc
peerGoneNotHere *prometheus.Desc
unknownFrames *prometheus.Desc
// Labeled counters.
packetsDroppedByReason *prometheus.Desc
packetsDroppedByType *prometheus.Desc
packetsReceivedByKind *prometheus.Desc
// Gauges.
connections *prometheus.Desc
homeConnections *prometheus.Desc
clientsTotal *prometheus.Desc
clientsLocal *prometheus.Desc
clientsRemote *prometheus.Desc
watchers *prometheus.Desc
avgQueueDurMS *prometheus.Desc
}
// NewDERPExpvarCollector creates a Prometheus collector that reads
// stats from a DERP server's expvar on each scrape.
func NewDERPExpvarCollector(server *derp.Server) *DERPExpvarCollector {
return &DERPExpvarCollector{
server: server,
accepts: prometheus.NewDesc("coder_derp_server_accepts_total", "Total DERP connections accepted.", nil, nil),
bytesReceived: prometheus.NewDesc("coder_derp_server_bytes_received_total", "Total bytes received.", nil, nil),
bytesSent: prometheus.NewDesc("coder_derp_server_bytes_sent_total", "Total bytes sent.", nil, nil),
packetsReceived: prometheus.NewDesc("coder_derp_server_packets_received_total", "Total packets received.", nil, nil),
packetsSent: prometheus.NewDesc("coder_derp_server_packets_sent_total", "Total packets sent.", nil, nil),
packetsDropped: prometheus.NewDesc("coder_derp_server_packets_dropped_total", "Total packets dropped.", nil, nil),
packetsForwardedIn: prometheus.NewDesc("coder_derp_server_packets_forwarded_in_total", "Total packets forwarded in from mesh peers.", nil, nil),
packetsForwardedOut: prometheus.NewDesc("coder_derp_server_packets_forwarded_out_total", "Total packets forwarded out to mesh peers.", nil, nil),
homeMovesIn: prometheus.NewDesc("coder_derp_server_home_moves_in_total", "Total home moves in.", nil, nil),
homeMovesOut: prometheus.NewDesc("coder_derp_server_home_moves_out_total", "Total home moves out.", nil, nil),
gotPing: prometheus.NewDesc("coder_derp_server_got_ping_total", "Total pings received.", nil, nil),
sentPong: prometheus.NewDesc("coder_derp_server_sent_pong_total", "Total pongs sent.", nil, nil),
peerGoneDisconnected: prometheus.NewDesc("coder_derp_server_peer_gone_disconnected_total", "Total peer gone (disconnected) frames sent.", nil, nil),
peerGoneNotHere: prometheus.NewDesc("coder_derp_server_peer_gone_not_here_total", "Total peer gone (not here) frames sent.", nil, nil),
unknownFrames: prometheus.NewDesc("coder_derp_server_unknown_frames_total", "Total unknown frames received.", nil, nil),
packetsDroppedByReason: prometheus.NewDesc("coder_derp_server_packets_dropped_reason_total", "Packets dropped by reason.", []string{"reason"}, nil),
packetsDroppedByType: prometheus.NewDesc("coder_derp_server_packets_dropped_type_total", "Packets dropped by type.", []string{"type"}, nil),
packetsReceivedByKind: prometheus.NewDesc("coder_derp_server_packets_received_kind_total", "Packets received by kind.", []string{"kind"}, nil),
connections: prometheus.NewDesc("coder_derp_server_connections", "Current DERP connections.", nil, nil),
homeConnections: prometheus.NewDesc("coder_derp_server_home_connections", "Current home DERP connections.", nil, nil),
clientsTotal: prometheus.NewDesc("coder_derp_server_clients", "Total clients (local + remote).", nil, nil),
clientsLocal: prometheus.NewDesc("coder_derp_server_clients_local", "Local clients.", nil, nil),
clientsRemote: prometheus.NewDesc("coder_derp_server_clients_remote", "Remote (mesh) clients.", nil, nil),
watchers: prometheus.NewDesc("coder_derp_server_watchers", "Current watchers.", nil, nil),
avgQueueDurMS: prometheus.NewDesc("coder_derp_server_average_queue_duration_ms", "Average queue duration in milliseconds.", nil, nil),
}
}
func (c *DERPExpvarCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.accepts
ch <- c.bytesReceived
ch <- c.bytesSent
ch <- c.packetsReceived
ch <- c.packetsSent
ch <- c.packetsDropped
ch <- c.packetsForwardedIn
ch <- c.packetsForwardedOut
ch <- c.homeMovesIn
ch <- c.homeMovesOut
ch <- c.gotPing
ch <- c.sentPong
ch <- c.peerGoneDisconnected
ch <- c.peerGoneNotHere
ch <- c.unknownFrames
ch <- c.packetsDroppedByReason
ch <- c.packetsDroppedByType
ch <- c.packetsReceivedByKind
ch <- c.connections
ch <- c.homeConnections
ch <- c.clientsTotal
ch <- c.clientsLocal
ch <- c.clientsRemote
ch <- c.watchers
ch <- c.avgQueueDurMS
}
// Collect reads the DERP server's expvar stats and emits them as
// Prometheus metrics. Called on each /metrics scrape.
func (c *DERPExpvarCollector) Collect(ch chan<- prometheus.Metric) {
vars, ok := c.server.ExpVar().(interface {
Do(func(expvar.KeyValue))
})
if !ok {
return
}
vars.Do(func(kv expvar.KeyValue) {
switch kv.Key {
case "accepts":
emitCounter(ch, c.accepts, kv.Value)
case "bytes_received":
emitCounter(ch, c.bytesReceived, kv.Value)
case "bytes_sent":
emitCounter(ch, c.bytesSent, kv.Value)
case "packets_received":
emitCounter(ch, c.packetsReceived, kv.Value)
case "packets_sent":
emitCounter(ch, c.packetsSent, kv.Value)
case "packets_dropped":
emitCounter(ch, c.packetsDropped, kv.Value)
case "packets_forwarded_in":
emitCounter(ch, c.packetsForwardedIn, kv.Value)
case "packets_forwarded_out":
emitCounter(ch, c.packetsForwardedOut, kv.Value)
case "home_moves_in":
emitCounter(ch, c.homeMovesIn, kv.Value)
case "home_moves_out":
emitCounter(ch, c.homeMovesOut, kv.Value)
case "got_ping":
emitCounter(ch, c.gotPing, kv.Value)
case "sent_pong":
emitCounter(ch, c.sentPong, kv.Value)
case "peer_gone_disconnected_frames":
emitCounter(ch, c.peerGoneDisconnected, kv.Value)
case "peer_gone_not_here_frames":
emitCounter(ch, c.peerGoneNotHere, kv.Value)
case "unknown_frames":
emitCounter(ch, c.unknownFrames, kv.Value)
case "counter_packets_dropped_reason":
emitLabeledCounters(ch, c.packetsDroppedByReason, kv.Value)
case "counter_packets_dropped_type":
emitLabeledCounters(ch, c.packetsDroppedByType, kv.Value)
case "counter_packets_received_kind":
emitLabeledCounters(ch, c.packetsReceivedByKind, kv.Value)
case "gauge_current_connections":
emitGauge(ch, c.connections, kv.Value)
case "gauge_current_home_connections":
emitGauge(ch, c.homeConnections, kv.Value)
case "gauge_clients_total":
emitGauge(ch, c.clientsTotal, kv.Value)
case "gauge_clients_local":
emitGauge(ch, c.clientsLocal, kv.Value)
case "gauge_clients_remote":
emitGauge(ch, c.clientsRemote, kv.Value)
case "gauge_watchers":
emitGauge(ch, c.watchers, kv.Value)
case "average_queue_duration_ms":
emitGauge(ch, c.avgQueueDurMS, kv.Value)
}
})
}
func emitCounter(ch chan<- prometheus.Metric, desc *prometheus.Desc, v expvar.Var) {
if f, ok := parseExpvarFloat(v); ok {
ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, f)
}
}
func emitGauge(ch chan<- prometheus.Metric, desc *prometheus.Desc, v expvar.Var) {
if f, ok := parseExpvarFloat(v); ok {
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, f)
}
}
func emitLabeledCounters(ch chan<- prometheus.Metric, desc *prometheus.Desc, v expvar.Var) {
sub, ok := v.(interface{ Do(func(expvar.KeyValue)) })
if !ok {
return
}
sub.Do(func(kv expvar.KeyValue) {
if f, ok := parseExpvarFloat(kv.Value); ok {
ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, f, kv.Key)
}
})
}
func parseExpvarFloat(v expvar.Var) (float64, bool) {
switch val := v.(type) {
case *expvar.Int:
return float64(val.Value()), true
case *expvar.Float:
return val.Value(), true
default:
f, err := strconv.ParseFloat(v.String(), 64)
return f, err == nil
}
}
+177
View File
@@ -0,0 +1,177 @@
package derpmetrics_test
import (
"testing"
"github.com/prometheus/client_golang/prometheus"
ptestutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/derp"
"tailscale.com/types/key"
"github.com/coder/coder/v2/tailnet/derpmetrics"
)
func TestDERPExpvarCollector(t *testing.T) {
t.Parallel()
t.Run("RegistersAndCollects", func(t *testing.T) {
t.Parallel()
server := derp.NewServer(key.NewNode(), func(format string, args ...any) {})
defer server.Close()
reg := prometheus.NewRegistry()
collector := derpmetrics.NewDERPExpvarCollector(server)
require.NoError(t, reg.Register(collector))
// Verify we can gather without error.
metrics, err := reg.Gather()
require.NoError(t, err)
require.NotEmpty(t, metrics, "expected at least one metric family")
// Verify expected metric names are present.
names := make(map[string]struct{})
for _, m := range metrics {
names[m.GetName()] = struct{}{}
}
expectedCounters := []string{
"coder_derp_server_accepts_total",
"coder_derp_server_bytes_received_total",
"coder_derp_server_bytes_sent_total",
"coder_derp_server_packets_received_total",
"coder_derp_server_packets_sent_total",
"coder_derp_server_packets_dropped_total",
"coder_derp_server_packets_forwarded_in_total",
"coder_derp_server_packets_forwarded_out_total",
"coder_derp_server_home_moves_in_total",
"coder_derp_server_home_moves_out_total",
"coder_derp_server_got_ping_total",
"coder_derp_server_sent_pong_total",
"coder_derp_server_peer_gone_disconnected_total",
"coder_derp_server_peer_gone_not_here_total",
"coder_derp_server_unknown_frames_total",
}
expectedGauges := []string{
"coder_derp_server_connections",
"coder_derp_server_home_connections",
"coder_derp_server_clients",
"coder_derp_server_clients_local",
"coder_derp_server_clients_remote",
"coder_derp_server_watchers",
"coder_derp_server_average_queue_duration_ms",
}
expectedLabeled := []string{
"coder_derp_server_packets_dropped_reason_total",
"coder_derp_server_packets_dropped_type_total",
"coder_derp_server_packets_received_kind_total",
}
for _, name := range expectedCounters {
assert.Contains(t, names, name, "missing counter %s", name)
}
for _, name := range expectedGauges {
assert.Contains(t, names, name, "missing gauge %s", name)
}
for _, name := range expectedLabeled {
assert.Contains(t, names, name, "missing labeled counter %s", name)
}
})
t.Run("CounterTypes", func(t *testing.T) {
t.Parallel()
server := derp.NewServer(key.NewNode(), func(format string, args ...any) {})
defer server.Close()
reg := prometheus.NewRegistry()
collector := derpmetrics.NewDERPExpvarCollector(server)
require.NoError(t, reg.Register(collector))
// Counters should report as counter type.
count := ptestutil.CollectAndCount(collector)
assert.Greater(t, count, 0, "expected metrics to be collected")
// Verify a known counter starts at zero.
metrics, err := reg.Gather()
require.NoError(t, err)
for _, m := range metrics {
if m.GetName() == "coder_derp_server_bytes_received_total" {
require.Len(t, m.GetMetric(), 1)
assert.Equal(t, float64(0), m.GetMetric()[0].GetCounter().GetValue())
return
}
}
t.Fatal("coder_derp_server_bytes_received_total not found")
})
t.Run("GaugeTypes", func(t *testing.T) {
t.Parallel()
server := derp.NewServer(key.NewNode(), func(format string, args ...any) {})
defer server.Close()
reg := prometheus.NewRegistry()
collector := derpmetrics.NewDERPExpvarCollector(server)
require.NoError(t, reg.Register(collector))
metrics, err := reg.Gather()
require.NoError(t, err)
for _, m := range metrics {
if m.GetName() == "coder_derp_server_connections" {
require.Len(t, m.GetMetric(), 1)
// Gauge type check — GetGauge should be non-nil.
assert.NotNil(t, m.GetMetric()[0].GetGauge())
assert.Equal(t, float64(0), m.GetMetric()[0].GetGauge().GetValue())
return
}
}
t.Fatal("coder_derp_server_connections not found")
})
t.Run("LabeledCounters", func(t *testing.T) {
t.Parallel()
server := derp.NewServer(key.NewNode(), func(format string, args ...any) {})
defer server.Close()
reg := prometheus.NewRegistry()
collector := derpmetrics.NewDERPExpvarCollector(server)
require.NoError(t, reg.Register(collector))
metrics, err := reg.Gather()
require.NoError(t, err)
for _, m := range metrics {
if m.GetName() == "coder_derp_server_packets_dropped_reason_total" {
// Should have labeled sub-metrics (one per reason).
require.NotEmpty(t, m.GetMetric(), "expected labeled metrics for drop reasons")
// Each metric should have a "reason" label.
for _, metric := range m.GetMetric() {
labels := metric.GetLabel()
require.Len(t, labels, 1)
assert.Equal(t, "reason", labels[0].GetName())
}
return
}
}
t.Fatal("coder_derp_server_packets_dropped_reason_total not found")
})
t.Run("NoDuplicateRegistration", func(t *testing.T) {
t.Parallel()
server := derp.NewServer(key.NewNode(), func(format string, args ...any) {})
defer server.Close()
reg := prometheus.NewRegistry()
c1 := derpmetrics.NewDERPExpvarCollector(server)
require.NoError(t, reg.Register(c1))
c2 := derpmetrics.NewDERPExpvarCollector(server)
err := reg.Register(c2)
assert.Error(t, err, "registering a second collector should fail")
})
}