Compare commits

...

7 Commits

Author SHA1 Message Date
Danielle Maywood 4793806569 chore: upgrade coder/clistat to v1.1.1 (#20322) (#20324)
coder/clistat has received a handful of bug fixes so we're back-porting
these bug fixes to 2.26

---

Cherry-picked from 9bef5de30d
2025-10-16 15:29:05 +01:00
Dean Sheather 03440f6ae2 fix: avoid connection logging crashes in agent [2.26] (#20306)
# For release 2.26

- Ignore errors when reporting a connection from the server, just log
them instead
- Translate connection log IP `localhost` to `127.0.0.1` on both the
server and the agent
- Temporary fix: convert invalid IPs to `127.0.0.1` since the database
forbids NULL

Relates to #20194
2025-10-16 01:28:10 +11:00
Cian Johnston 7afe6c813b fix(coderd): ensure agent WebSocket conn is cleaned up (#19711) (#20094)
Co-authored-by: Danielle Maywood <danielle@themaywoods.com>
2025-10-01 15:37:56 -05:00
Kacper Sawicki 536920459d [2.26 backport] perf(enterprise): remove expensive GetWorkspaces query from entitlements (#19747) (#19756)
This PR addresses the significant database load issue where the
GetWorkspaces query was causing performance problems in the license
entitlements code.

cherry picked from commit 3074547
2025-09-11 09:48:32 +02:00
Kacper Sawicki c0f1b9d73e [2.26 backport] fix: pin pg_dump version when generating schema (#19696) (#19765)
This is required by #19756 

The latest release of all `pg_dump` major versions, going back to 13,
started inserting `\restrict` `\unrestrict` keywords into dumps. This
currently breaks sqlc in `gen/dump` and our check migration script. Full
details of the postgres change are available here:
https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=575f54d4c

To fix, we'll always use the `pg_dump` in our postgres 13.21 docker
image for schema dumps, instead of what's on the runner/local machine.

Coder doesn't restore from postgres dumps, so we're not vulnerable to
attacks that would be patched by the latest postgres version.
Regardless, we'll unpin ASAP.

Once sqlc is updated to handle these keywords, we need to start
stripping them when comparing the schema in the migration check script,
and then we can unpin the pg_dump version. This is being tracked at
https://github.com/coder/internal/issues/965

Co-authored-by: Ethan <39577870+ethanndickson@users.noreply.github.com>
2025-09-11 09:32:58 +02:00
Stephen Kirby a056cb6577 chore: add last commit from cherry-pick list for release (#19679)
Co-authored-by: Spike Curtis <spike@coder.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-02 19:19:04 -05:00
Stephen Kirby 0a73f842b3 fix: merge cherry-picked items for v2.26.0 (#19678)
Co-authored-by: Cian Johnston <cian@coder.com>
Co-authored-by: Mathias Fredriksson <mafredri@gmail.com>
Co-authored-by: Hugo Dutka <hugo@coder.com>
Co-authored-by: Kacper Sawicki <kacper@coder.com>
Co-authored-by: Atif Ali <atif@coder.com>
Co-authored-by: Danielle Maywood <danielle@themaywoods.com>
Co-authored-by: Susana Ferreira <susana@coder.com>
Co-authored-by: Brett Kolodny <brettkolodny@gmail.com>
Co-authored-by: Dean Sheather <dean@deansheather.com>
Co-authored-by: Steven Masley <Emyrk@users.noreply.github.com>
2025-09-02 16:19:17 -05:00
59 changed files with 2648 additions and 369 deletions
+1
View File
@@ -33,6 +33,7 @@ updates:
- dependency-name: "*"
update-types:
- version-update:semver-patch
- dependency-name: "github.com/mark3labs/mcp-go"
# Update our Dockerfile.
- package-ecosystem: "docker"
+14 -3
View File
@@ -790,11 +790,15 @@ func (a *agent) reportConnectionsLoop(ctx context.Context, aAPI proto.DRPCAgentC
logger.Debug(ctx, "reporting connection")
_, err := aAPI.ReportConnection(ctx, payload)
if err != nil {
return xerrors.Errorf("failed to report connection: %w", err)
// Do not fail the loop if we fail to report a connection, just
// log a warning.
// Related to https://github.com/coder/coder/issues/20194
logger.Warn(ctx, "failed to report connection to server", slog.Error(err))
// no continue here, we still need to remove it from the slice
} else {
logger.Debug(ctx, "successfully reported connection")
}
logger.Debug(ctx, "successfully reported connection")
// Remove the payload we sent.
a.reportConnectionsMu.Lock()
a.reportConnections[0] = nil // Release the pointer from the underlying array.
@@ -825,6 +829,13 @@ func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_T
ip = host
}
// If the IP is "localhost" (which it can be in some cases), set it to
// 127.0.0.1 instead.
// Related to https://github.com/coder/coder/issues/20194
if ip == "localhost" {
ip = "127.0.0.1"
}
a.reportConnectionsMu.Lock()
defer a.reportConnectionsMu.Unlock()
+12 -6
View File
@@ -62,12 +62,6 @@ import (
"github.com/coder/serpent"
"github.com/coder/wgtunnel/tunnelsdk"
"github.com/coder/coder/v2/coderd/entitlements"
"github.com/coder/coder/v2/coderd/notifications/reports"
"github.com/coder/coder/v2/coderd/runtimeconfig"
"github.com/coder/coder/v2/coderd/webpush"
"github.com/coder/coder/v2/codersdk/drpcsdk"
"github.com/coder/coder/v2/buildinfo"
"github.com/coder/coder/v2/cli/clilog"
"github.com/coder/coder/v2/cli/cliui"
@@ -83,15 +77,19 @@ import (
"github.com/coder/coder/v2/coderd/database/migrations"
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/devtunnel"
"github.com/coder/coder/v2/coderd/entitlements"
"github.com/coder/coder/v2/coderd/externalauth"
"github.com/coder/coder/v2/coderd/gitsshkey"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/jobreaper"
"github.com/coder/coder/v2/coderd/notifications"
"github.com/coder/coder/v2/coderd/notifications/reports"
"github.com/coder/coder/v2/coderd/oauthpki"
"github.com/coder/coder/v2/coderd/prometheusmetrics"
"github.com/coder/coder/v2/coderd/prometheusmetrics/insights"
"github.com/coder/coder/v2/coderd/promoauth"
"github.com/coder/coder/v2/coderd/provisionerdserver"
"github.com/coder/coder/v2/coderd/runtimeconfig"
"github.com/coder/coder/v2/coderd/schedule"
"github.com/coder/coder/v2/coderd/telemetry"
"github.com/coder/coder/v2/coderd/tracing"
@@ -99,9 +97,11 @@ import (
"github.com/coder/coder/v2/coderd/util/ptr"
"github.com/coder/coder/v2/coderd/util/slice"
stringutil "github.com/coder/coder/v2/coderd/util/strings"
"github.com/coder/coder/v2/coderd/webpush"
"github.com/coder/coder/v2/coderd/workspaceapps/appurl"
"github.com/coder/coder/v2/coderd/workspacestats"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/drpcsdk"
"github.com/coder/coder/v2/cryptorand"
"github.com/coder/coder/v2/provisioner/echo"
"github.com/coder/coder/v2/provisioner/terraform"
@@ -280,6 +280,12 @@ func enablePrometheus(
}
}
provisionerdserverMetrics := provisionerdserver.NewMetrics(logger)
if err := provisionerdserverMetrics.Register(options.PrometheusRegistry); err != nil {
return nil, xerrors.Errorf("failed to register provisionerd_server metrics: %w", err)
}
options.ProvisionerdServerMetrics = provisionerdserverMetrics
//nolint:revive
return ServeHandler(
ctx, logger, promhttp.InstrumentMetricHandler(
+24 -1
View File
@@ -3,9 +3,11 @@ package agentapi
import (
"context"
"database/sql"
"net"
"sync/atomic"
"github.com/google/uuid"
"github.com/sqlc-dev/pqtype"
"golang.org/x/xerrors"
"google.golang.org/protobuf/types/known/emptypb"
@@ -61,6 +63,27 @@ func (a *ConnLogAPI) ReportConnection(ctx context.Context, req *agentproto.Repor
return nil, xerrors.Errorf("get workspace by agent id: %w", err)
}
// Some older clients may incorrectly report "localhost" as the IP address.
// Related to https://github.com/coder/coder/issues/20194
logIPRaw := req.GetConnection().GetIp()
if logIPRaw == "localhost" {
logIPRaw = "127.0.0.1"
}
// TEMPORARY FIX for https://github.com/coder/coder/issues/20194
logIP := database.ParseIP(logIPRaw)
if !logIP.Valid {
// In older versions of Coder, NULL IPs are not permitted in the DB, so
// use 127.0.0.1 instead.
logIP = pqtype.Inet{
IPNet: net.IPNet{
IP: net.IPv4(127, 0, 0, 1),
Mask: net.CIDRMask(32, 32),
},
Valid: true,
}
}
reason := req.GetConnection().GetReason()
connLogger := *a.ConnectionLogger.Load()
err = connLogger.Upsert(ctx, database.UpsertConnectionLogParams{
@@ -73,7 +96,7 @@ func (a *ConnLogAPI) ReportConnection(ctx context.Context, req *agentproto.Repor
AgentName: workspaceAgent.Name,
Type: connectionType,
Code: code,
Ip: database.ParseIP(req.GetConnection().GetIp()),
Ip: logIP,
ConnectionID: uuid.NullUUID{
UUID: connectionID,
Valid: true,
+5
View File
@@ -110,6 +110,11 @@ func TestConnectionLog(t *testing.T) {
mDB := dbmock.NewMockStore(gomock.NewController(t))
mDB.EXPECT().GetWorkspaceByAgentID(gomock.Any(), agent.ID).Return(workspace, nil)
// TEMPORARY FIX for https://github.com/coder/coder/issues/20194
if tt.ip == "" {
tt.ip = "127.0.0.1"
}
api := &agentapi.ConnLogAPI{
ConnectionLogger: asAtomicPointer[connectionlog.ConnectionLogger](connLogger),
Database: mDB,
-8
View File
@@ -11,7 +11,6 @@ import (
"github.com/go-chi/chi/v5"
"github.com/google/uuid"
"golang.org/x/xerrors"
"cdr.dev/slog"
@@ -196,13 +195,6 @@ func (api *API) tasksCreate(rw http.ResponseWriter, r *http.Request) {
// prompts and mapping status/state. This method enforces that only AI task
// workspaces are given.
func (api *API) tasksFromWorkspaces(ctx context.Context, apiWorkspaces []codersdk.Workspace) ([]codersdk.Task, error) {
// Enforce that only AI task workspaces are given.
for _, ws := range apiWorkspaces {
if ws.LatestBuild.HasAITask == nil || !*ws.LatestBuild.HasAITask {
return nil, xerrors.Errorf("workspace %s is not an AI task workspace", ws.ID)
}
}
// Fetch prompts for each workspace build and map by build ID.
buildIDs := make([]uuid.UUID, 0, len(apiWorkspaces))
for _, ws := range apiWorkspaces {
+33 -3
View File
@@ -12,6 +12,8 @@ import (
"github.com/moby/moby/pkg/namesgenerator"
"golang.org/x/xerrors"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/apikey"
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/database"
@@ -56,6 +58,14 @@ func (api *API) postToken(rw http.ResponseWriter, r *http.Request) {
return
}
// TODO(Cian): System users technically just have the 'member' role
// and we don't want to disallow all members from creating API keys.
if user.IsSystem {
api.Logger.Warn(ctx, "disallowed creating api key for system user", slog.F("user_id", user.ID))
httpapi.Forbidden(rw)
return
}
scope := database.APIKeyScopeAll
if scope != "" {
scope = database.APIKeyScope(createToken.Scope)
@@ -121,10 +131,29 @@ func (api *API) postToken(rw http.ResponseWriter, r *http.Request) {
// @Success 201 {object} codersdk.GenerateAPIKeyResponse
// @Router /users/{user}/keys [post]
func (api *API) postAPIKey(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
user := httpmw.UserParam(r)
var (
ctx = r.Context()
user = httpmw.UserParam(r)
auditor = api.Auditor.Load()
aReq, commitAudit = audit.InitRequest[database.APIKey](rw, &audit.RequestParams{
Audit: *auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionCreate,
})
)
aReq.Old = database.APIKey{}
defer commitAudit()
cookie, _, err := api.createAPIKey(ctx, apikey.CreateParams{
// TODO(Cian): System users technically just have the 'member' role
// and we don't want to disallow all members from creating API keys.
if user.IsSystem {
api.Logger.Warn(ctx, "disallowed creating api key for system user", slog.F("user_id", user.ID))
httpapi.Forbidden(rw)
return
}
cookie, key, err := api.createAPIKey(ctx, apikey.CreateParams{
UserID: user.ID,
DefaultLifetime: api.DeploymentValues.Sessions.DefaultTokenDuration.Value(),
LoginType: database.LoginTypePassword,
@@ -138,6 +167,7 @@ func (api *API) postAPIKey(rw http.ResponseWriter, r *http.Request) {
return
}
aReq.New = *key
// We intentionally do not set the cookie on the response here.
// Setting the cookie will couple the browser session to the API
// key we return here, meaning logging out of the website would
+54 -2
View File
@@ -2,6 +2,7 @@ package coderd_test
import (
"context"
"encoding/json"
"net/http"
"strings"
"testing"
@@ -13,8 +14,10 @@ import (
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/testutil"
"github.com/coder/serpent"
@@ -301,14 +304,32 @@ func TestSessionExpiry(t *testing.T) {
func TestAPIKey_OK(t *testing.T) {
t.Parallel()
// Given: a deployment with auditing enabled
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
_ = coderdtest.CreateFirstUser(t, client)
auditor := audit.NewMock()
client := coderdtest.New(t, &coderdtest.Options{Auditor: auditor})
owner := coderdtest.CreateFirstUser(t, client)
auditor.ResetLogs()
// When: an API key is created
res, err := client.CreateAPIKey(ctx, codersdk.Me)
require.NoError(t, err)
require.Greater(t, len(res.Key), 2)
// Then: an audit log is generated
als := auditor.AuditLogs()
require.Len(t, als, 1)
al := als[0]
assert.Equal(t, owner.UserID, al.UserID)
assert.Equal(t, database.AuditActionCreate, al.Action)
assert.Equal(t, database.ResourceTypeApiKey, al.ResourceType)
// Then: the diff MUST NOT contain the generated key.
raw, err := json.Marshal(al)
require.NoError(t, err)
require.NotContains(t, res.Key, string(raw))
}
func TestAPIKey_Deleted(t *testing.T) {
@@ -351,3 +372,34 @@ func TestAPIKey_SetDefault(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, dc.Sessions.DefaultTokenDuration.Value().Seconds(), apiKey1.LifetimeSeconds)
}
func TestAPIKey_PrebuildsNotAllowed(t *testing.T) {
t.Parallel()
db, pubsub := dbtestutil.NewDB(t)
dc := coderdtest.DeploymentValues(t)
dc.Sessions.DefaultTokenDuration = serpent.Duration(time.Hour * 12)
client := coderdtest.New(t, &coderdtest.Options{
Database: db,
Pubsub: pubsub,
DeploymentValues: dc,
})
ctx := testutil.Context(t, testutil.WaitLong)
// Given: an existing api token for the prebuilds user
_, prebuildsToken := dbgen.APIKey(t, db, database.APIKey{
UserID: database.PrebuildsSystemUserID,
})
client.SetSessionToken(prebuildsToken)
// When: the prebuilds user tries to create an API key
_, err := client.CreateAPIKey(ctx, database.PrebuildsSystemUserID.String())
// Then: denied.
require.ErrorContains(t, err, httpapi.ResourceForbiddenResponse.Message)
// When: the prebuilds user tries to create a token
_, err = client.CreateToken(ctx, database.PrebuildsSystemUserID.String(), codersdk.CreateTokenRequest{})
// Then: also denied.
require.ErrorContains(t, err, httpapi.ResourceForbiddenResponse.Message)
}
+3
View File
@@ -241,6 +241,8 @@ type Options struct {
UpdateAgentMetrics func(ctx context.Context, labels prometheusmetrics.AgentMetricLabels, metrics []*agentproto.Stats_Metric)
StatsBatcher workspacestats.Batcher
ProvisionerdServerMetrics *provisionerdserver.Metrics
// WorkspaceAppAuditSessionTimeout allows changing the timeout for audit
// sessions. Raising or lowering this value will directly affect the write
// load of the audit log table. This is used for testing. Default 1 hour.
@@ -1930,6 +1932,7 @@ func (api *API) CreateInMemoryTaggedProvisionerDaemon(dialCtx context.Context, n
},
api.NotificationsEnqueuer,
&api.PrebuildsReconciler,
api.ProvisionerdServerMetrics,
)
if err != nil {
return nil, err
+3
View File
@@ -184,6 +184,8 @@ type Options struct {
OIDCConvertKeyCache cryptokeys.SigningKeycache
Clock quartz.Clock
TelemetryReporter telemetry.Reporter
ProvisionerdServerMetrics *provisionerdserver.Metrics
}
// New constructs a codersdk client connected to an in-memory API instance.
@@ -604,6 +606,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
Clock: options.Clock,
AppEncryptionKeyCache: options.APIKeyEncryptionCache,
OIDCConvertKeyCache: options.OIDCConvertKeyCache,
ProvisionerdServerMetrics: options.ProvisionerdServerMetrics,
}
}
+29 -8
View File
@@ -1777,6 +1777,13 @@ func (q *querier) EnqueueNotificationMessage(ctx context.Context, arg database.E
return q.db.EnqueueNotificationMessage(ctx, arg)
}
func (q *querier) ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error {
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceApiKey); err != nil {
return err
}
return q.db.ExpirePrebuildsAPIKeys(ctx, now)
}
func (q *querier) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
fetch := func(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
return q.db.GetWorkspaceByID(ctx, id)
@@ -2242,14 +2249,6 @@ func (q *querier) GetLogoURL(ctx context.Context) (string, error) {
return q.db.GetLogoURL(ctx)
}
func (q *querier) GetManagedAgentCount(ctx context.Context, arg database.GetManagedAgentCountParams) (int64, error) {
// Must be able to read all workspaces to check usage.
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceWorkspace); err != nil {
return 0, xerrors.Errorf("authorize read all workspaces: %w", err)
}
return q.db.GetManagedAgentCount(ctx, arg)
}
func (q *querier) GetNotificationMessagesByStatus(ctx context.Context, arg database.GetNotificationMessagesByStatusParams) ([]database.NotificationMessage, error) {
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceNotificationMessage); err != nil {
return nil, err
@@ -2689,6 +2688,13 @@ func (q *querier) GetQuotaConsumedForUser(ctx context.Context, params database.G
return q.db.GetQuotaConsumedForUser(ctx, params)
}
func (q *querier) GetRegularWorkspaceCreateMetrics(ctx context.Context) ([]database.GetRegularWorkspaceCreateMetricsRow, error) {
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceWorkspace.All()); err != nil {
return nil, err
}
return q.db.GetRegularWorkspaceCreateMetrics(ctx)
}
func (q *querier) GetReplicaByID(ctx context.Context, id uuid.UUID) (database.Replica, error) {
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
return database.Replica{}, err
@@ -3041,6 +3047,13 @@ func (q *querier) GetTemplatesWithFilter(ctx context.Context, arg database.GetTe
return q.db.GetAuthorizedTemplates(ctx, arg, prep)
}
func (q *querier) GetTotalUsageDCManagedAgentsV1(ctx context.Context, arg database.GetTotalUsageDCManagedAgentsV1Params) (int64, error) {
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceUsageEvent); err != nil {
return 0, err
}
return q.db.GetTotalUsageDCManagedAgentsV1(ctx, arg)
}
func (q *querier) GetUnexpiredLicenses(ctx context.Context) ([]database.License, error) {
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceLicense); err != nil {
return nil, err
@@ -3711,6 +3724,14 @@ func (q *querier) GetWorkspacesEligibleForTransition(ctx context.Context, now ti
}
func (q *querier) InsertAPIKey(ctx context.Context, arg database.InsertAPIKeyParams) (database.APIKey, error) {
// TODO(Cian): ideally this would be encoded in the policy, but system users are just members and we
// don't currently have a capability to conditionally deny creating resources by owner ID in a role.
// We also need to enrich rbac.Actor with IsSystem so that we can distinguish all system users.
// For now, there is only one system user (prebuilds).
if act, ok := ActorFromContext(ctx); ok && act.ID == database.PrebuildsSystemUserID.String() {
return database.APIKey{}, logNotAuthorizedError(ctx, q.log, NotAuthorizedError{Err: xerrors.Errorf("prebuild user may not create api keys")})
}
return insert(q.log, q.auth,
rbac.ResourceApiKey.WithOwner(arg.UserID.String()),
q.db.InsertAPIKey)(ctx, arg)
+33 -6
View File
@@ -18,6 +18,7 @@ import (
"golang.org/x/xerrors"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/db2sdk"
@@ -806,12 +807,6 @@ func (s *MethodTestSuite) TestLicense() {
dbm.EXPECT().GetAnnouncementBanners(gomock.Any()).Return("value", nil).AnyTimes()
check.Args().Asserts().Returns("value")
}))
s.Run("GetManagedAgentCount", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
start := dbtime.Now()
end := start.Add(time.Hour)
dbm.EXPECT().GetManagedAgentCount(gomock.Any(), database.GetManagedAgentCountParams{StartTime: start, EndTime: end}).Return(int64(0), nil).AnyTimes()
check.Args(database.GetManagedAgentCountParams{StartTime: start, EndTime: end}).Asserts(rbac.ResourceWorkspace, policy.ActionRead).Returns(int64(0))
}))
}
func (s *MethodTestSuite) TestOrganization() {
@@ -1502,6 +1497,10 @@ func (s *MethodTestSuite) TestUser() {
dbm.EXPECT().DeleteAPIKeysByUserID(gomock.Any(), key.UserID).Return(nil).AnyTimes()
check.Args(key.UserID).Asserts(rbac.ResourceApiKey.WithOwner(key.UserID.String()), policy.ActionDelete).Returns()
}))
s.Run("ExpirePrebuildsAPIKeys", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
dbm.EXPECT().ExpirePrebuildsAPIKeys(gomock.Any(), gomock.Any()).Times(1).Return(nil)
check.Args(dbtime.Now()).Asserts(rbac.ResourceApiKey, policy.ActionDelete).Returns()
}))
s.Run("GetQuotaAllowanceForUser", s.Mocked(func(dbm *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
u := testutil.Fake(s.T(), faker, database.User{})
arg := database.GetQuotaAllowanceForUserParams{UserID: u.ID, OrganizationID: uuid.New()}
@@ -3223,6 +3222,10 @@ func (s *MethodTestSuite) TestWorkspace() {
d := dbgen.WorkspaceAgentDevcontainer(s.T(), db, database.WorkspaceAgentDevcontainer{WorkspaceAgentID: agt.ID})
check.Args(agt.ID).Asserts(w, policy.ActionRead).Returns([]database.WorkspaceAgentDevcontainer{d})
}))
s.Run("GetRegularWorkspaceCreateMetrics", s.Subtest(func(_ database.Store, check *expects) {
check.Args().
Asserts(rbac.ResourceWorkspace.All(), policy.ActionRead)
}))
}
func (s *MethodTestSuite) TestWorkspacePortSharing() {
@@ -5673,4 +5676,28 @@ func (s *MethodTestSuite) TestUsageEvents() {
db.EXPECT().UpdateUsageEventsPostPublish(gomock.Any(), params).Return(nil)
check.Args(params).Asserts(rbac.ResourceUsageEvent, policy.ActionUpdate)
}))
s.Run("GetTotalUsageDCManagedAgentsV1", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
db.EXPECT().GetTotalUsageDCManagedAgentsV1(gomock.Any(), gomock.Any()).Return(int64(1), nil)
check.Args(database.GetTotalUsageDCManagedAgentsV1Params{
StartDate: time.Time{},
EndDate: time.Time{},
}).Asserts(rbac.ResourceUsageEvent, policy.ActionRead)
}))
}
// Ensures that the prebuilds actor may never insert an api key.
func TestInsertAPIKey_AsPrebuildsUser(t *testing.T) {
t.Parallel()
prebuildsSubj := rbac.Subject{
ID: database.PrebuildsSystemUserID.String(),
}
ctx := dbauthz.As(testutil.Context(t, testutil.WaitShort), prebuildsSubj)
mDB := dbmock.NewMockStore(gomock.NewController(t))
log := slogtest.Make(t, nil)
mDB.EXPECT().Wrappers().Times(1).Return([]string{})
dbz := dbauthz.New(mDB, nil, log, nil)
faker := gofakeit.New(0)
_, err := dbz.InsertAPIKey(ctx, testutil.Fake(t, faker, database.InsertAPIKeyParams{}))
require.True(t, dbauthz.IsNotAuthorizedError(err))
}
+7 -3
View File
@@ -157,7 +157,7 @@ func Template(t testing.TB, db database.Store, seed database.Template) database.
return template
}
func APIKey(t testing.TB, db database.Store, seed database.APIKey) (key database.APIKey, token string) {
func APIKey(t testing.TB, db database.Store, seed database.APIKey, munge ...func(*database.InsertAPIKeyParams)) (key database.APIKey, token string) {
id, _ := cryptorand.String(10)
secret, _ := cryptorand.String(22)
hashed := sha256.Sum256([]byte(secret))
@@ -173,7 +173,7 @@ func APIKey(t testing.TB, db database.Store, seed database.APIKey) (key database
}
}
key, err := db.InsertAPIKey(genCtx, database.InsertAPIKeyParams{
params := database.InsertAPIKeyParams{
ID: takeFirst(seed.ID, id),
// 0 defaults to 86400 at the db layer
LifetimeSeconds: takeFirst(seed.LifetimeSeconds, 0),
@@ -187,7 +187,11 @@ func APIKey(t testing.TB, db database.Store, seed database.APIKey) (key database
LoginType: takeFirst(seed.LoginType, database.LoginTypePassword),
Scope: takeFirst(seed.Scope, database.APIKeyScopeAll),
TokenName: takeFirst(seed.TokenName),
})
}
for _, fn := range munge {
fn(&params)
}
key, err := db.InsertAPIKey(genCtx, params)
require.NoError(t, err, "insert api key")
return key, fmt.Sprintf("%s-%s", key.ID, secret)
}
+21 -7
View File
@@ -523,6 +523,13 @@ func (m queryMetricsStore) EnqueueNotificationMessage(ctx context.Context, arg d
return r0
}
func (m queryMetricsStore) ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error {
start := time.Now()
r0 := m.s.ExpirePrebuildsAPIKeys(ctx, now)
m.queryLatencies.WithLabelValues("ExpirePrebuildsAPIKeys").Observe(time.Since(start).Seconds())
return r0
}
func (m queryMetricsStore) FavoriteWorkspace(ctx context.Context, arg uuid.UUID) error {
start := time.Now()
r0 := m.s.FavoriteWorkspace(ctx, arg)
@@ -978,13 +985,6 @@ func (m queryMetricsStore) GetLogoURL(ctx context.Context) (string, error) {
return url, err
}
func (m queryMetricsStore) GetManagedAgentCount(ctx context.Context, arg database.GetManagedAgentCountParams) (int64, error) {
start := time.Now()
r0, r1 := m.s.GetManagedAgentCount(ctx, arg)
m.queryLatencies.WithLabelValues("GetManagedAgentCount").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m queryMetricsStore) GetNotificationMessagesByStatus(ctx context.Context, arg database.GetNotificationMessagesByStatusParams) ([]database.NotificationMessage, error) {
start := time.Now()
r0, r1 := m.s.GetNotificationMessagesByStatus(ctx, arg)
@@ -1356,6 +1356,13 @@ func (m queryMetricsStore) GetQuotaConsumedForUser(ctx context.Context, ownerID
return consumed, err
}
func (m queryMetricsStore) GetRegularWorkspaceCreateMetrics(ctx context.Context) ([]database.GetRegularWorkspaceCreateMetricsRow, error) {
start := time.Now()
r0, r1 := m.s.GetRegularWorkspaceCreateMetrics(ctx)
m.queryLatencies.WithLabelValues("GetRegularWorkspaceCreateMetrics").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m queryMetricsStore) GetReplicaByID(ctx context.Context, id uuid.UUID) (database.Replica, error) {
start := time.Now()
replica, err := m.s.GetReplicaByID(ctx, id)
@@ -1608,6 +1615,13 @@ func (m queryMetricsStore) GetTemplatesWithFilter(ctx context.Context, arg datab
return templates, err
}
func (m queryMetricsStore) GetTotalUsageDCManagedAgentsV1(ctx context.Context, arg database.GetTotalUsageDCManagedAgentsV1Params) (int64, error) {
start := time.Now()
r0, r1 := m.s.GetTotalUsageDCManagedAgentsV1(ctx, arg)
m.queryLatencies.WithLabelValues("GetTotalUsageDCManagedAgentsV1").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m queryMetricsStore) GetUnexpiredLicenses(ctx context.Context) ([]database.License, error) {
start := time.Now()
licenses, err := m.s.GetUnexpiredLicenses(ctx)
+44 -15
View File
@@ -962,6 +962,20 @@ func (mr *MockStoreMockRecorder) EnqueueNotificationMessage(ctx, arg any) *gomoc
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnqueueNotificationMessage", reflect.TypeOf((*MockStore)(nil).EnqueueNotificationMessage), ctx, arg)
}
// ExpirePrebuildsAPIKeys mocks base method.
func (m *MockStore) ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExpirePrebuildsAPIKeys", ctx, now)
ret0, _ := ret[0].(error)
return ret0
}
// ExpirePrebuildsAPIKeys indicates an expected call of ExpirePrebuildsAPIKeys.
func (mr *MockStoreMockRecorder) ExpirePrebuildsAPIKeys(ctx, now any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpirePrebuildsAPIKeys", reflect.TypeOf((*MockStore)(nil).ExpirePrebuildsAPIKeys), ctx, now)
}
// FavoriteWorkspace mocks base method.
func (m *MockStore) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
m.ctrl.T.Helper()
@@ -2041,21 +2055,6 @@ func (mr *MockStoreMockRecorder) GetLogoURL(ctx any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogoURL", reflect.TypeOf((*MockStore)(nil).GetLogoURL), ctx)
}
// GetManagedAgentCount mocks base method.
func (m *MockStore) GetManagedAgentCount(ctx context.Context, arg database.GetManagedAgentCountParams) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetManagedAgentCount", ctx, arg)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetManagedAgentCount indicates an expected call of GetManagedAgentCount.
func (mr *MockStoreMockRecorder) GetManagedAgentCount(ctx, arg any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagedAgentCount", reflect.TypeOf((*MockStore)(nil).GetManagedAgentCount), ctx, arg)
}
// GetNotificationMessagesByStatus mocks base method.
func (m *MockStore) GetNotificationMessagesByStatus(ctx context.Context, arg database.GetNotificationMessagesByStatusParams) ([]database.NotificationMessage, error) {
m.ctrl.T.Helper()
@@ -2851,6 +2850,21 @@ func (mr *MockStoreMockRecorder) GetQuotaConsumedForUser(ctx, arg any) *gomock.C
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQuotaConsumedForUser", reflect.TypeOf((*MockStore)(nil).GetQuotaConsumedForUser), ctx, arg)
}
// GetRegularWorkspaceCreateMetrics mocks base method.
func (m *MockStore) GetRegularWorkspaceCreateMetrics(ctx context.Context) ([]database.GetRegularWorkspaceCreateMetricsRow, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRegularWorkspaceCreateMetrics", ctx)
ret0, _ := ret[0].([]database.GetRegularWorkspaceCreateMetricsRow)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetRegularWorkspaceCreateMetrics indicates an expected call of GetRegularWorkspaceCreateMetrics.
func (mr *MockStoreMockRecorder) GetRegularWorkspaceCreateMetrics(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRegularWorkspaceCreateMetrics", reflect.TypeOf((*MockStore)(nil).GetRegularWorkspaceCreateMetrics), ctx)
}
// GetReplicaByID mocks base method.
func (m *MockStore) GetReplicaByID(ctx context.Context, id uuid.UUID) (database.Replica, error) {
m.ctrl.T.Helper()
@@ -3421,6 +3435,21 @@ func (mr *MockStoreMockRecorder) GetTemplatesWithFilter(ctx, arg any) *gomock.Ca
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTemplatesWithFilter", reflect.TypeOf((*MockStore)(nil).GetTemplatesWithFilter), ctx, arg)
}
// GetTotalUsageDCManagedAgentsV1 mocks base method.
func (m *MockStore) GetTotalUsageDCManagedAgentsV1(ctx context.Context, arg database.GetTotalUsageDCManagedAgentsV1Params) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetTotalUsageDCManagedAgentsV1", ctx, arg)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetTotalUsageDCManagedAgentsV1 indicates an expected call of GetTotalUsageDCManagedAgentsV1.
func (mr *MockStoreMockRecorder) GetTotalUsageDCManagedAgentsV1(ctx, arg any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTotalUsageDCManagedAgentsV1", reflect.TypeOf((*MockStore)(nil).GetTotalUsageDCManagedAgentsV1), ctx, arg)
}
// GetUnexpiredLicenses mocks base method.
func (m *MockStore) GetUnexpiredLicenses(ctx context.Context) ([]database.License, error) {
m.ctrl.T.Helper()
+3
View File
@@ -68,6 +68,9 @@ func New(ctx context.Context, logger slog.Logger, db database.Store, clk quartz.
if err := tx.DeleteOldNotificationMessages(ctx); err != nil {
return xerrors.Errorf("failed to delete old notification messages: %w", err)
}
if err := tx.ExpirePrebuildsAPIKeys(ctx, dbtime.Time(start)); err != nil {
return xerrors.Errorf("failed to expire prebuilds user api keys: %w", err)
}
deleteOldAuditLogConnectionEventsBefore := start.Add(-maxAuditLogConnectionEventAge)
if err := tx.DeleteOldAuditLogConnectionEvents(ctx, database.DeleteOldAuditLogConnectionEventsParams{
+66
View File
@@ -27,6 +27,7 @@ import (
"github.com/coder/coder/v2/coderd/database/dbrollup"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/provisionerdserver"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/provisionerd/proto"
"github.com/coder/coder/v2/provisionersdk"
@@ -638,3 +639,68 @@ func TestDeleteOldAuditLogConnectionEventsLimit(t *testing.T) {
require.Len(t, logs, 0)
}
func TestExpireOldAPIKeys(t *testing.T) {
t.Parallel()
// Given: a number of workspaces and API keys owned by a regular user and the prebuilds system user.
var (
ctx = testutil.Context(t, testutil.WaitShort)
now = dbtime.Now()
db, _ = dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
org = dbgen.Organization(t, db, database.Organization{})
user = dbgen.User(t, db, database.User{})
tpl = dbgen.Template(t, db, database.Template{OrganizationID: org.ID, CreatedBy: user.ID})
userWs = dbgen.Workspace(t, db, database.WorkspaceTable{
OwnerID: user.ID,
TemplateID: tpl.ID,
})
prebuildsWs = dbgen.Workspace(t, db, database.WorkspaceTable{
OwnerID: database.PrebuildsSystemUserID,
TemplateID: tpl.ID,
})
createAPIKey = func(userID uuid.UUID, name string) database.APIKey {
k, _ := dbgen.APIKey(t, db, database.APIKey{UserID: userID, TokenName: name, ExpiresAt: now.Add(time.Hour)}, func(iap *database.InsertAPIKeyParams) {
iap.TokenName = name
})
return k
}
assertKeyActive = func(kid string) {
k, err := db.GetAPIKeyByID(ctx, kid)
require.NoError(t, err)
assert.True(t, k.ExpiresAt.After(now))
}
assertKeyExpired = func(kid string) {
k, err := db.GetAPIKeyByID(ctx, kid)
require.NoError(t, err)
assert.True(t, k.ExpiresAt.Equal(now))
}
unnamedUserAPIKey = createAPIKey(user.ID, "")
unnamedPrebuildsAPIKey = createAPIKey(database.PrebuildsSystemUserID, "")
namedUserAPIKey = createAPIKey(user.ID, "my-token")
namedPrebuildsAPIKey = createAPIKey(database.PrebuildsSystemUserID, "also-my-token")
userWorkspaceAPIKey1 = createAPIKey(user.ID, provisionerdserver.WorkspaceSessionTokenName(user.ID, userWs.ID))
userWorkspaceAPIKey2 = createAPIKey(user.ID, provisionerdserver.WorkspaceSessionTokenName(user.ID, prebuildsWs.ID))
prebuildsWorkspaceAPIKey1 = createAPIKey(database.PrebuildsSystemUserID, provisionerdserver.WorkspaceSessionTokenName(database.PrebuildsSystemUserID, prebuildsWs.ID))
prebuildsWorkspaceAPIKey2 = createAPIKey(database.PrebuildsSystemUserID, provisionerdserver.WorkspaceSessionTokenName(database.PrebuildsSystemUserID, userWs.ID))
)
// When: we call ExpirePrebuildsAPIKeys
err := db.ExpirePrebuildsAPIKeys(ctx, now)
// Then: no errors is reported.
require.NoError(t, err)
// We do not touch user API keys.
assertKeyActive(unnamedUserAPIKey.ID)
assertKeyActive(namedUserAPIKey.ID)
assertKeyActive(userWorkspaceAPIKey1.ID)
assertKeyActive(userWorkspaceAPIKey2.ID)
// Unnamed prebuilds API keys get expired.
assertKeyExpired(unnamedPrebuildsAPIKey.ID)
// API keys for workspaces still owned by prebuilds user remain active until claimed.
assertKeyActive(prebuildsWorkspaceAPIKey1.ID)
// API keys for workspaces no longer owned by prebuilds user get expired.
assertKeyExpired(prebuildsWorkspaceAPIKey2.ID)
// Out of an abundance of caution, we do not expire explicitly named prebuilds API keys.
assertKeyActive(namedPrebuildsAPIKey.ID)
}
+21 -17
View File
@@ -10,7 +10,6 @@ import (
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"testing"
"time"
@@ -251,26 +250,31 @@ func PGDump(dbURL string) ([]byte, error) {
return stdout.Bytes(), nil
}
const minimumPostgreSQLVersion = 13
const (
minimumPostgreSQLVersion = 13
postgresImageSha = "sha256:467e7f2fb97b2f29d616e0be1d02218a7bbdfb94eb3cda7461fd80165edfd1f7"
)
// PGDumpSchemaOnly is for use by gen/dump only.
// It runs pg_dump against dbURL and sets a consistent timezone and encoding.
func PGDumpSchemaOnly(dbURL string) ([]byte, error) {
hasPGDump := false
if _, err := exec.LookPath("pg_dump"); err == nil {
out, err := exec.Command("pg_dump", "--version").Output()
if err == nil {
// Parse output:
// pg_dump (PostgreSQL) 14.5 (Ubuntu 14.5-0ubuntu0.22.04.1)
parts := strings.Split(string(out), " ")
if len(parts) > 2 {
version, err := strconv.Atoi(strings.Split(parts[2], ".")[0])
if err == nil && version >= minimumPostgreSQLVersion {
hasPGDump = true
}
}
}
}
// TODO: Temporarily pin pg_dump to the docker image until
// https://github.com/sqlc-dev/sqlc/issues/4065 is resolved.
// if _, err := exec.LookPath("pg_dump"); err == nil {
// out, err := exec.Command("pg_dump", "--version").Output()
// if err == nil {
// // Parse output:
// // pg_dump (PostgreSQL) 14.5 (Ubuntu 14.5-0ubuntu0.22.04.1)
// parts := strings.Split(string(out), " ")
// if len(parts) > 2 {
// version, err := strconv.Atoi(strings.Split(parts[2], ".")[0])
// if err == nil && version >= minimumPostgreSQLVersion {
// hasPGDump = true
// }
// }
// }
// }
cmdArgs := []string{
"pg_dump",
@@ -295,7 +299,7 @@ func PGDumpSchemaOnly(dbURL string) ([]byte, error) {
"run",
"--rm",
"--network=host",
fmt.Sprintf("%s:%d", postgresImage, minimumPostgreSQLVersion),
fmt.Sprintf("%s:%d@%s", postgresImage, minimumPostgreSQLVersion, postgresImageSha),
}, cmdArgs...)
}
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) //#nosec
+47
View File
@@ -361,6 +361,38 @@ CREATE TYPE workspace_transition AS ENUM (
'delete'
);
CREATE FUNCTION aggregate_usage_event() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
-- Check for supported event types and throw error for unknown types
IF NEW.event_type NOT IN ('dc_managed_agents_v1') THEN
RAISE EXCEPTION 'Unhandled usage event type in aggregate_usage_event: %', NEW.event_type;
END IF;
INSERT INTO usage_events_daily (day, event_type, usage_data)
VALUES (
-- Extract the date from the created_at timestamp, always using UTC for
-- consistency
date_trunc('day', NEW.created_at AT TIME ZONE 'UTC')::date,
NEW.event_type,
NEW.event_data
)
ON CONFLICT (day, event_type) DO UPDATE SET
usage_data = CASE
-- Handle simple counter events by summing the count
WHEN NEW.event_type IN ('dc_managed_agents_v1') THEN
jsonb_build_object(
'count',
COALESCE((usage_events_daily.usage_data->>'count')::bigint, 0) +
COALESCE((NEW.event_data->>'count')::bigint, 0)
)
END;
RETURN NEW;
END;
$$;
CREATE FUNCTION check_workspace_agent_name_unique() RETURNS trigger
LANGUAGE plpgsql
AS $$
@@ -1860,6 +1892,16 @@ COMMENT ON COLUMN usage_events.published_at IS 'Set to a timestamp when the even
COMMENT ON COLUMN usage_events.failure_message IS 'Set to an error message when the event is temporarily or permanently unsuccessfully published to the usage collector service.';
CREATE TABLE usage_events_daily (
day date NOT NULL,
event_type text NOT NULL,
usage_data jsonb NOT NULL
);
COMMENT ON TABLE usage_events_daily IS 'usage_events_daily is a daily rollup of usage events. It stores the total usage for each event type by day.';
COMMENT ON COLUMN usage_events_daily.day IS 'The date of the summed usage events, always in UTC.';
CREATE TABLE user_configs (
user_id uuid NOT NULL,
key character varying(256) NOT NULL,
@@ -2711,6 +2753,9 @@ ALTER TABLE ONLY template_versions
ALTER TABLE ONLY templates
ADD CONSTRAINT templates_pkey PRIMARY KEY (id);
ALTER TABLE ONLY usage_events_daily
ADD CONSTRAINT usage_events_daily_pkey PRIMARY KEY (day, event_type);
ALTER TABLE ONLY usage_events
ADD CONSTRAINT usage_events_pkey PRIMARY KEY (id);
@@ -3034,6 +3079,8 @@ CREATE TRIGGER tailnet_notify_peer_change AFTER INSERT OR DELETE OR UPDATE ON ta
CREATE TRIGGER tailnet_notify_tunnel_change AFTER INSERT OR DELETE OR UPDATE ON tailnet_tunnels FOR EACH ROW EXECUTE FUNCTION tailnet_notify_tunnel_change();
CREATE TRIGGER trigger_aggregate_usage_event AFTER INSERT ON usage_events FOR EACH ROW EXECUTE FUNCTION aggregate_usage_event();
CREATE TRIGGER trigger_delete_group_members_on_org_member_delete BEFORE DELETE ON organization_members FOR EACH ROW EXECUTE FUNCTION delete_group_members_on_org_member_delete();
CREATE TRIGGER trigger_delete_oauth2_provider_app_token AFTER DELETE ON oauth2_provider_app_tokens FOR EACH ROW EXECUTE FUNCTION delete_deleted_oauth2_provider_app_token_api_key();
@@ -0,0 +1,3 @@
DROP TRIGGER IF EXISTS trigger_aggregate_usage_event ON usage_events;
DROP FUNCTION IF EXISTS aggregate_usage_event();
DROP TABLE IF EXISTS usage_events_daily;
@@ -0,0 +1,65 @@
CREATE TABLE usage_events_daily (
day date NOT NULL, -- always grouped by day in UTC
event_type text NOT NULL,
usage_data jsonb NOT NULL,
PRIMARY KEY (day, event_type)
);
COMMENT ON TABLE usage_events_daily IS 'usage_events_daily is a daily rollup of usage events. It stores the total usage for each event type by day.';
COMMENT ON COLUMN usage_events_daily.day IS 'The date of the summed usage events, always in UTC.';
-- Function to handle usage event aggregation
CREATE OR REPLACE FUNCTION aggregate_usage_event()
RETURNS TRIGGER AS $$
BEGIN
-- Check for supported event types and throw error for unknown types
IF NEW.event_type NOT IN ('dc_managed_agents_v1') THEN
RAISE EXCEPTION 'Unhandled usage event type in aggregate_usage_event: %', NEW.event_type;
END IF;
INSERT INTO usage_events_daily (day, event_type, usage_data)
VALUES (
-- Extract the date from the created_at timestamp, always using UTC for
-- consistency
date_trunc('day', NEW.created_at AT TIME ZONE 'UTC')::date,
NEW.event_type,
NEW.event_data
)
ON CONFLICT (day, event_type) DO UPDATE SET
usage_data = CASE
-- Handle simple counter events by summing the count
WHEN NEW.event_type IN ('dc_managed_agents_v1') THEN
jsonb_build_object(
'count',
COALESCE((usage_events_daily.usage_data->>'count')::bigint, 0) +
COALESCE((NEW.event_data->>'count')::bigint, 0)
)
END;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create trigger to automatically aggregate usage events
CREATE TRIGGER trigger_aggregate_usage_event
AFTER INSERT ON usage_events
FOR EACH ROW
EXECUTE FUNCTION aggregate_usage_event();
-- Populate usage_events_daily with existing data
INSERT INTO
usage_events_daily (day, event_type, usage_data)
SELECT
date_trunc('day', created_at AT TIME ZONE 'UTC')::date AS day,
event_type,
jsonb_build_object('count', SUM((event_data->>'count')::bigint)) AS usage_data
FROM
usage_events
WHERE
-- The only event type we currently support is dc_managed_agents_v1
event_type = 'dc_managed_agents_v1'
GROUP BY
date_trunc('day', created_at AT TIME ZONE 'UTC')::date,
event_type
ON CONFLICT (day, event_type) DO UPDATE SET
usage_data = EXCLUDED.usage_data;
+106
View File
@@ -9,17 +9,20 @@ import (
"slices"
"sync"
"testing"
"time"
"github.com/golang-migrate/migrate/v4"
migratepostgres "github.com/golang-migrate/migrate/v4/database/postgres"
"github.com/golang-migrate/migrate/v4/source"
"github.com/golang-migrate/migrate/v4/source/iofs"
"github.com/golang-migrate/migrate/v4/source/stub"
"github.com/google/uuid"
"github.com/lib/pq"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
"golang.org/x/sync/errgroup"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/migrations"
"github.com/coder/coder/v2/testutil"
@@ -363,3 +366,106 @@ func TestMigrateUpWithFixtures(t *testing.T) {
})
}
}
// TestMigration000362AggregateUsageEvents tests the migration that aggregates
// usage events into daily rows correctly.
func TestMigration000362AggregateUsageEvents(t *testing.T) {
t.Parallel()
const migrationVersion = 362
// Similarly to the other test, this test will probably time out in CI.
ctx := testutil.Context(t, testutil.WaitSuperLong)
sqlDB := testSQLDB(t)
db := database.New(sqlDB)
// Migrate up to the migration before the one that aggregates usage events.
next, err := migrations.Stepper(sqlDB)
require.NoError(t, err)
for {
version, more, err := next()
require.NoError(t, err)
if !more {
t.Fatalf("migration %d not found", migrationVersion)
}
if version == migrationVersion-1 {
break
}
}
locSydney, err := time.LoadLocation("Australia/Sydney")
require.NoError(t, err)
usageEvents := []struct {
// The only possible event type is dc_managed_agents_v1 when this
// migration gets applied.
eventData []byte
createdAt time.Time
}{
{
eventData: []byte(`{"count": 41}`),
createdAt: time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
eventData: []byte(`{"count": 1}`),
// 2025-01-01 in UTC
createdAt: time.Date(2025, 1, 2, 8, 38, 57, 0, locSydney),
},
{
eventData: []byte(`{"count": 1}`),
createdAt: time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC),
},
}
expectedDailyRows := []struct {
day time.Time
usageData []byte
}{
{
day: time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC),
usageData: []byte(`{"count": 42}`),
},
{
day: time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC),
usageData: []byte(`{"count": 1}`),
},
}
for _, usageEvent := range usageEvents {
err := db.InsertUsageEvent(ctx, database.InsertUsageEventParams{
ID: uuid.New().String(),
EventType: "dc_managed_agents_v1",
EventData: usageEvent.eventData,
CreatedAt: usageEvent.createdAt,
})
require.NoError(t, err)
}
// Migrate up to the migration that aggregates usage events.
version, _, err := next()
require.NoError(t, err)
require.EqualValues(t, migrationVersion, version)
// Get all of the newly created daily rows. This query is not exposed in the
// querier interface intentionally.
rows, err := sqlDB.QueryContext(ctx, "SELECT day, event_type, usage_data FROM usage_events_daily ORDER BY day ASC")
require.NoError(t, err, "perform query")
defer rows.Close()
var out []database.UsageEventsDaily
for rows.Next() {
var row database.UsageEventsDaily
err := rows.Scan(&row.Day, &row.EventType, &row.UsageData)
require.NoError(t, err, "scan row")
out = append(out, row)
}
// Verify that the daily rows match our expectations.
require.Len(t, out, len(expectedDailyRows))
for i, row := range out {
require.Equal(t, "dc_managed_agents_v1", row.EventType)
// The read row might be `+0000` rather than `UTC` specifically, so just
// ensure it's within 1 second of the expected time.
require.WithinDuration(t, expectedDailyRows[i].day, row.Day, time.Second)
require.JSONEq(t, string(expectedDailyRows[i].usageData), string(row.UsageData))
}
}
+8
View File
@@ -3778,6 +3778,14 @@ type UsageEvent struct {
FailureMessage sql.NullString `db:"failure_message" json:"failure_message"`
}
// usage_events_daily is a daily rollup of usage events. It stores the total usage for each event type by day.
type UsageEventsDaily struct {
// The date of the summed usage events, always in UTC.
Day time.Time `db:"day" json:"day"`
EventType string `db:"event_type" json:"event_type"`
UsageData json.RawMessage `db:"usage_data" json:"usage_data"`
}
type User struct {
ID uuid.UUID `db:"id" json:"id"`
Email string `db:"email" json:"email"`
+17 -2
View File
@@ -130,6 +130,11 @@ type sqlcQuerier interface {
// of the test-only in-memory database. Do not use this in new code.
DisableForeignKeysAndTriggers(ctx context.Context) error
EnqueueNotificationMessage(ctx context.Context, arg EnqueueNotificationMessageParams) error
// Firstly, collect api_keys owned by the prebuilds user that correlate
// to workspaces no longer owned by the prebuilds user.
// Next, collect api_keys that belong to the prebuilds user but have no token name.
// These were most likely created via 'coder login' as the prebuilds user.
ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error
FavoriteWorkspace(ctx context.Context, id uuid.UUID) error
FetchMemoryResourceMonitorsByAgentID(ctx context.Context, agentID uuid.UUID) (WorkspaceAgentMemoryResourceMonitor, error)
FetchMemoryResourceMonitorsUpdatedAfter(ctx context.Context, updatedAt time.Time) ([]WorkspaceAgentMemoryResourceMonitor, error)
@@ -222,8 +227,6 @@ type sqlcQuerier interface {
GetLicenseByID(ctx context.Context, id int32) (License, error)
GetLicenses(ctx context.Context) ([]License, error)
GetLogoURL(ctx context.Context) (string, error)
// This isn't strictly a license query, but it's related to license enforcement.
GetManagedAgentCount(ctx context.Context, arg GetManagedAgentCountParams) (int64, error)
GetNotificationMessagesByStatus(ctx context.Context, arg GetNotificationMessagesByStatusParams) ([]NotificationMessage, error)
// Fetch the notification report generator log indicating recent activity.
GetNotificationReportGeneratorLogByTemplate(ctx context.Context, templateID uuid.UUID) (NotificationReportGeneratorLog, error)
@@ -306,6 +309,9 @@ type sqlcQuerier interface {
GetProvisionerLogsAfterID(ctx context.Context, arg GetProvisionerLogsAfterIDParams) ([]ProvisionerJobLog, error)
GetQuotaAllowanceForUser(ctx context.Context, arg GetQuotaAllowanceForUserParams) (int64, error)
GetQuotaConsumedForUser(ctx context.Context, arg GetQuotaConsumedForUserParams) (int64, error)
// Count regular workspaces: only those whose first successful 'start' build
// was not initiated by the prebuild system user.
GetRegularWorkspaceCreateMetrics(ctx context.Context) ([]GetRegularWorkspaceCreateMetricsRow, error)
GetReplicaByID(ctx context.Context, id uuid.UUID) (Replica, error)
GetReplicasUpdatedAfter(ctx context.Context, updatedAt time.Time) ([]Replica, error)
GetRunningPrebuiltWorkspaces(ctx context.Context) ([]GetRunningPrebuiltWorkspacesRow, error)
@@ -369,6 +375,15 @@ type sqlcQuerier interface {
GetTemplateVersionsCreatedAfter(ctx context.Context, createdAt time.Time) ([]TemplateVersion, error)
GetTemplates(ctx context.Context) ([]Template, error)
GetTemplatesWithFilter(ctx context.Context, arg GetTemplatesWithFilterParams) ([]Template, error)
// Gets the total number of managed agents created between two dates. Uses the
// aggregate table to avoid large scans or a complex index on the usage_events
// table.
//
// This has the trade off that we can't count accurately between two exact
// timestamps. The provided timestamps will be converted to UTC and truncated to
// the events that happened on and between the two dates. Both dates are
// inclusive.
GetTotalUsageDCManagedAgentsV1(ctx context.Context, arg GetTotalUsageDCManagedAgentsV1Params) (int64, error)
GetUnexpiredLicenses(ctx context.Context) ([]License, error)
// GetUserActivityInsights returns the ranking with top active users.
// The result can be filtered on template_ids, meaning only user data
+128
View File
@@ -6652,3 +6652,131 @@ func TestGetLatestWorkspaceBuildsByWorkspaceIDs(t *testing.T) {
require.Equal(t, expB.BuildNumber, b.BuildNumber, "unexpected build number")
}
}
func TestUsageEventsTrigger(t *testing.T) {
t.Parallel()
// This is not exposed in the querier interface intentionally.
getDailyRows := func(ctx context.Context, sqlDB *sql.DB) []database.UsageEventsDaily {
t.Helper()
rows, err := sqlDB.QueryContext(ctx, "SELECT day, event_type, usage_data FROM usage_events_daily ORDER BY day ASC")
require.NoError(t, err, "perform query")
defer rows.Close()
var out []database.UsageEventsDaily
for rows.Next() {
var row database.UsageEventsDaily
err := rows.Scan(&row.Day, &row.EventType, &row.UsageData)
require.NoError(t, err, "scan row")
out = append(out, row)
}
return out
}
t.Run("OK", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
db, _, sqlDB := dbtestutil.NewDBWithSQLDB(t)
// Assert there are no daily rows.
rows := getDailyRows(ctx, sqlDB)
require.Len(t, rows, 0)
// Insert a usage event.
err := db.InsertUsageEvent(ctx, database.InsertUsageEventParams{
ID: "1",
EventType: "dc_managed_agents_v1",
EventData: []byte(`{"count": 41}`),
CreatedAt: time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC),
})
require.NoError(t, err)
// Assert there is one daily row that contains the correct data.
rows = getDailyRows(ctx, sqlDB)
require.Len(t, rows, 1)
require.Equal(t, "dc_managed_agents_v1", rows[0].EventType)
require.JSONEq(t, `{"count": 41}`, string(rows[0].UsageData))
// The read row might be `+0000` rather than `UTC` specifically, so just
// ensure it's within 1 second of the expected time.
require.WithinDuration(t, time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), rows[0].Day, time.Second)
// Insert a new usage event on the same UTC day, should increment the count.
locSydney, err := time.LoadLocation("Australia/Sydney")
require.NoError(t, err)
err = db.InsertUsageEvent(ctx, database.InsertUsageEventParams{
ID: "2",
EventType: "dc_managed_agents_v1",
EventData: []byte(`{"count": 1}`),
// Insert it at a random point during the same day. Sydney is +1000 or
// +1100, so 8am in Sydney is the previous day in UTC.
CreatedAt: time.Date(2025, 1, 2, 8, 38, 57, 0, locSydney),
})
require.NoError(t, err)
// There should still be only one daily row with the incremented count.
rows = getDailyRows(ctx, sqlDB)
require.Len(t, rows, 1)
require.Equal(t, "dc_managed_agents_v1", rows[0].EventType)
require.JSONEq(t, `{"count": 42}`, string(rows[0].UsageData))
require.WithinDuration(t, time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), rows[0].Day, time.Second)
// TODO: when we have a new event type, we should test that adding an
// event with a different event type on the same day creates a new daily
// row.
// Insert a new usage event on a different day, should create a new daily
// row.
err = db.InsertUsageEvent(ctx, database.InsertUsageEventParams{
ID: "3",
EventType: "dc_managed_agents_v1",
EventData: []byte(`{"count": 1}`),
CreatedAt: time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC),
})
require.NoError(t, err)
// There should now be two daily rows.
rows = getDailyRows(ctx, sqlDB)
require.Len(t, rows, 2)
// Output is sorted by day ascending, so the first row should be the
// previous day's row.
require.Equal(t, "dc_managed_agents_v1", rows[0].EventType)
require.JSONEq(t, `{"count": 42}`, string(rows[0].UsageData))
require.WithinDuration(t, time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), rows[0].Day, time.Second)
require.Equal(t, "dc_managed_agents_v1", rows[1].EventType)
require.JSONEq(t, `{"count": 1}`, string(rows[1].UsageData))
require.WithinDuration(t, time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC), rows[1].Day, time.Second)
})
t.Run("UnknownEventType", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitLong)
db, _, sqlDB := dbtestutil.NewDBWithSQLDB(t)
// Relax the usage_events.event_type check constraint to see what
// happens when we insert a usage event that the trigger doesn't know
// about.
_, err := sqlDB.ExecContext(ctx, "ALTER TABLE usage_events DROP CONSTRAINT usage_event_type_check")
require.NoError(t, err)
// Insert a usage event with an unknown event type.
err = db.InsertUsageEvent(ctx, database.InsertUsageEventParams{
ID: "broken",
EventType: "dean's cool event",
EventData: []byte(`{"my": "cool json"}`),
CreatedAt: time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC),
})
require.ErrorContains(t, err, "Unhandled usage event type in aggregate_usage_event")
// The event should've been blocked.
var count int
err = sqlDB.QueryRowContext(ctx, "SELECT COUNT(*) FROM usage_events WHERE id = 'broken'").Scan(&count)
require.NoError(t, err)
require.Equal(t, 0, count)
// We should not have any daily rows.
rows := getDailyRows(ctx, sqlDB)
require.Len(t, rows, 0)
})
}
+146 -41
View File
@@ -148,6 +148,46 @@ func (q *sqlQuerier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context
return err
}
const expirePrebuildsAPIKeys = `-- name: ExpirePrebuildsAPIKeys :exec
WITH unexpired_prebuilds_workspace_session_tokens AS (
SELECT id, SUBSTRING(token_name FROM 38 FOR 36)::uuid AS workspace_id
FROM api_keys
WHERE user_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
AND expires_at > $1::timestamptz
AND token_name SIMILAR TO 'c42fdf75-3097-471c-8c33-fb52454d81c0_[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}_session_token'
),
stale_prebuilds_workspace_session_tokens AS (
SELECT upwst.id
FROM unexpired_prebuilds_workspace_session_tokens upwst
LEFT JOIN workspaces w
ON w.id = upwst.workspace_id
WHERE w.owner_id <> 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
),
unnamed_prebuilds_api_keys AS (
SELECT id
FROM api_keys
WHERE user_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
AND token_name = ''
AND expires_at > $1::timestamptz
)
UPDATE api_keys
SET expires_at = $1::timestamptz
WHERE id IN (
SELECT id FROM stale_prebuilds_workspace_session_tokens
UNION
SELECT id FROM unnamed_prebuilds_api_keys
)
`
// Firstly, collect api_keys owned by the prebuilds user that correlate
// to workspaces no longer owned by the prebuilds user.
// Next, collect api_keys that belong to the prebuilds user but have no token name.
// These were most likely created via 'coder login' as the prebuilds user.
func (q *sqlQuerier) ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error {
_, err := q.db.ExecContext(ctx, expirePrebuildsAPIKeys, now)
return err
}
const getAPIKeyByID = `-- name: GetAPIKeyByID :one
SELECT
id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name
@@ -4334,44 +4374,6 @@ func (q *sqlQuerier) GetLicenses(ctx context.Context) ([]License, error) {
return items, nil
}
const getManagedAgentCount = `-- name: GetManagedAgentCount :one
SELECT
COUNT(DISTINCT wb.id) AS count
FROM
workspace_builds AS wb
JOIN
provisioner_jobs AS pj
ON
wb.job_id = pj.id
WHERE
wb.transition = 'start'::workspace_transition
AND wb.has_ai_task = true
-- Only count jobs that are pending, running or succeeded. Other statuses
-- like cancel(ed|ing), failed or unknown are not considered as managed
-- agent usage. These workspace builds are typically unusable anyway.
AND pj.job_status IN (
'pending'::provisioner_job_status,
'running'::provisioner_job_status,
'succeeded'::provisioner_job_status
)
-- Jobs are counted at the time they are created, not when they are
-- completed, as pending jobs haven't completed yet.
AND wb.created_at BETWEEN $1::timestamptz AND $2::timestamptz
`
type GetManagedAgentCountParams struct {
StartTime time.Time `db:"start_time" json:"start_time"`
EndTime time.Time `db:"end_time" json:"end_time"`
}
// This isn't strictly a license query, but it's related to license enforcement.
func (q *sqlQuerier) GetManagedAgentCount(ctx context.Context, arg GetManagedAgentCountParams) (int64, error) {
row := q.db.QueryRowContext(ctx, getManagedAgentCount, arg.StartTime, arg.EndTime)
var count int64
err := row.Scan(&count)
return count, err
}
const getUnexpiredLicenses = `-- name: GetUnexpiredLicenses :many
SELECT id, uploaded_at, jwt, exp, uuid
FROM licenses
@@ -7301,7 +7303,7 @@ const getPrebuildMetrics = `-- name: GetPrebuildMetrics :many
SELECT
t.name as template_name,
tvp.name as preset_name,
o.name as organization_name,
o.name as organization_name,
COUNT(*) as created_count,
COUNT(*) FILTER (WHERE pj.job_status = 'failed'::provisioner_job_status) as failed_count,
COUNT(*) FILTER (
@@ -13626,6 +13628,40 @@ func (q *sqlQuerier) DisableForeignKeysAndTriggers(ctx context.Context) error {
return err
}
const getTotalUsageDCManagedAgentsV1 = `-- name: GetTotalUsageDCManagedAgentsV1 :one
SELECT
-- The first cast is necessary since you can't sum strings, and the second
-- cast is necessary to make sqlc happy.
COALESCE(SUM((usage_data->>'count')::bigint), 0)::bigint AS total_count
FROM
usage_events_daily
WHERE
event_type = 'dc_managed_agents_v1'
-- Parentheses are necessary to avoid sqlc from generating an extra
-- argument.
AND day BETWEEN date_trunc('day', ($1::timestamptz) AT TIME ZONE 'UTC')::date AND date_trunc('day', ($2::timestamptz) AT TIME ZONE 'UTC')::date
`
type GetTotalUsageDCManagedAgentsV1Params struct {
StartDate time.Time `db:"start_date" json:"start_date"`
EndDate time.Time `db:"end_date" json:"end_date"`
}
// Gets the total number of managed agents created between two dates. Uses the
// aggregate table to avoid large scans or a complex index on the usage_events
// table.
//
// This has the trade off that we can't count accurately between two exact
// timestamps. The provided timestamps will be converted to UTC and truncated to
// the events that happened on and between the two dates. Both dates are
// inclusive.
func (q *sqlQuerier) GetTotalUsageDCManagedAgentsV1(ctx context.Context, arg GetTotalUsageDCManagedAgentsV1Params) (int64, error) {
row := q.db.QueryRowContext(ctx, getTotalUsageDCManagedAgentsV1, arg.StartDate, arg.EndDate)
var total_count int64
err := row.Scan(&total_count)
return total_count, err
}
const insertUsageEvent = `-- name: InsertUsageEvent :exec
INSERT INTO
usage_events (
@@ -13685,7 +13721,7 @@ WITH usage_events AS (
-- than an hour ago. This is so we can retry publishing
-- events where the replica exited or couldn't update the
-- row.
-- The parenthesis around @now::timestamptz are necessary to
-- The parentheses around @now::timestamptz are necessary to
-- avoid sqlc from generating an extra argument.
OR potential_event.publish_started_at < ($1::timestamptz) - INTERVAL '1 hour'
)
@@ -13693,7 +13729,7 @@ WITH usage_events AS (
-- always permanently reject these events anyways. This is to
-- avoid duplicate events being billed to customers, as
-- Metronome will only deduplicate events within 34 days.
-- Also, the same parenthesis thing here as above.
-- Also, the same parentheses thing here as above.
AND potential_event.created_at > ($1::timestamptz) - INTERVAL '30 days'
ORDER BY potential_event.created_at ASC
FOR UPDATE SKIP LOCKED
@@ -20123,6 +20159,75 @@ func (q *sqlQuerier) GetDeploymentWorkspaceStats(ctx context.Context) (GetDeploy
return i, err
}
const getRegularWorkspaceCreateMetrics = `-- name: GetRegularWorkspaceCreateMetrics :many
WITH first_success_build AS (
-- Earliest successful 'start' build per workspace
SELECT DISTINCT ON (wb.workspace_id)
wb.workspace_id,
wb.template_version_preset_id,
wb.initiator_id
FROM workspace_builds wb
JOIN provisioner_jobs pj ON pj.id = wb.job_id
WHERE
wb.transition = 'start'::workspace_transition
AND pj.job_status = 'succeeded'::provisioner_job_status
ORDER BY wb.workspace_id, wb.build_number, wb.id
)
SELECT
t.name AS template_name,
COALESCE(tvp.name, '') AS preset_name,
o.name AS organization_name,
COUNT(*) AS created_count
FROM first_success_build fsb
JOIN workspaces w ON w.id = fsb.workspace_id
JOIN templates t ON t.id = w.template_id
LEFT JOIN template_version_presets tvp ON tvp.id = fsb.template_version_preset_id
JOIN organizations o ON o.id = w.organization_id
WHERE
NOT t.deleted
-- Exclude workspaces whose first successful start was the prebuilds system user
AND fsb.initiator_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
GROUP BY t.name, COALESCE(tvp.name, ''), o.name
ORDER BY t.name, preset_name, o.name
`
type GetRegularWorkspaceCreateMetricsRow struct {
TemplateName string `db:"template_name" json:"template_name"`
PresetName string `db:"preset_name" json:"preset_name"`
OrganizationName string `db:"organization_name" json:"organization_name"`
CreatedCount int64 `db:"created_count" json:"created_count"`
}
// Count regular workspaces: only those whose first successful 'start' build
// was not initiated by the prebuild system user.
func (q *sqlQuerier) GetRegularWorkspaceCreateMetrics(ctx context.Context) ([]GetRegularWorkspaceCreateMetricsRow, error) {
rows, err := q.db.QueryContext(ctx, getRegularWorkspaceCreateMetrics)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRegularWorkspaceCreateMetricsRow
for rows.Next() {
var i GetRegularWorkspaceCreateMetricsRow
if err := rows.Scan(
&i.TemplateName,
&i.PresetName,
&i.OrganizationName,
&i.CreatedCount,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getWorkspaceACLByID = `-- name: GetWorkspaceACLByID :one
SELECT
group_acl as groups,
+34
View File
@@ -83,3 +83,37 @@ DELETE FROM
api_keys
WHERE
user_id = $1;
-- name: ExpirePrebuildsAPIKeys :exec
-- Firstly, collect api_keys owned by the prebuilds user that correlate
-- to workspaces no longer owned by the prebuilds user.
WITH unexpired_prebuilds_workspace_session_tokens AS (
SELECT id, SUBSTRING(token_name FROM 38 FOR 36)::uuid AS workspace_id
FROM api_keys
WHERE user_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
AND expires_at > @now::timestamptz
AND token_name SIMILAR TO 'c42fdf75-3097-471c-8c33-fb52454d81c0_[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}_session_token'
),
stale_prebuilds_workspace_session_tokens AS (
SELECT upwst.id
FROM unexpired_prebuilds_workspace_session_tokens upwst
LEFT JOIN workspaces w
ON w.id = upwst.workspace_id
WHERE w.owner_id <> 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
),
-- Next, collect api_keys that belong to the prebuilds user but have no token name.
-- These were most likely created via 'coder login' as the prebuilds user.
unnamed_prebuilds_api_keys AS (
SELECT id
FROM api_keys
WHERE user_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
AND token_name = ''
AND expires_at > @now::timestamptz
)
UPDATE api_keys
SET expires_at = @now::timestamptz
WHERE id IN (
SELECT id FROM stale_prebuilds_workspace_session_tokens
UNION
SELECT id FROM unnamed_prebuilds_api_keys
);
-25
View File
@@ -35,28 +35,3 @@ DELETE
FROM licenses
WHERE id = $1
RETURNING id;
-- name: GetManagedAgentCount :one
-- This isn't strictly a license query, but it's related to license enforcement.
SELECT
COUNT(DISTINCT wb.id) AS count
FROM
workspace_builds AS wb
JOIN
provisioner_jobs AS pj
ON
wb.job_id = pj.id
WHERE
wb.transition = 'start'::workspace_transition
AND wb.has_ai_task = true
-- Only count jobs that are pending, running or succeeded. Other statuses
-- like cancel(ed|ing), failed or unknown are not considered as managed
-- agent usage. These workspace builds are typically unusable anyway.
AND pj.job_status IN (
'pending'::provisioner_job_status,
'running'::provisioner_job_status,
'succeeded'::provisioner_job_status
)
-- Jobs are counted at the time they are created, not when they are
-- completed, as pending jobs haven't completed yet.
AND wb.created_at BETWEEN @start_time::timestamptz AND @end_time::timestamptz;
+1 -1
View File
@@ -230,7 +230,7 @@ HAVING COUNT(*) = @hard_limit::bigint;
SELECT
t.name as template_name,
tvp.name as preset_name,
o.name as organization_name,
o.name as organization_name,
COUNT(*) as created_count,
COUNT(*) FILTER (WHERE pj.job_status = 'failed'::provisioner_job_status) as failed_count,
COUNT(*) FILTER (
+23 -2
View File
@@ -39,7 +39,7 @@ WITH usage_events AS (
-- than an hour ago. This is so we can retry publishing
-- events where the replica exited or couldn't update the
-- row.
-- The parenthesis around @now::timestamptz are necessary to
-- The parentheses around @now::timestamptz are necessary to
-- avoid sqlc from generating an extra argument.
OR potential_event.publish_started_at < (@now::timestamptz) - INTERVAL '1 hour'
)
@@ -47,7 +47,7 @@ WITH usage_events AS (
-- always permanently reject these events anyways. This is to
-- avoid duplicate events being billed to customers, as
-- Metronome will only deduplicate events within 34 days.
-- Also, the same parenthesis thing here as above.
-- Also, the same parentheses thing here as above.
AND potential_event.created_at > (@now::timestamptz) - INTERVAL '30 days'
ORDER BY potential_event.created_at ASC
FOR UPDATE SKIP LOCKED
@@ -84,3 +84,24 @@ WHERE
-- zero, so this is the best we can do.
AND cardinality(@ids::text[]) = cardinality(@failure_messages::text[])
AND cardinality(@ids::text[]) = cardinality(@set_published_ats::boolean[]);
-- name: GetTotalUsageDCManagedAgentsV1 :one
-- Gets the total number of managed agents created between two dates. Uses the
-- aggregate table to avoid large scans or a complex index on the usage_events
-- table.
--
-- This has the trade off that we can't count accurately between two exact
-- timestamps. The provided timestamps will be converted to UTC and truncated to
-- the events that happened on and between the two dates. Both dates are
-- inclusive.
SELECT
-- The first cast is necessary since you can't sum strings, and the second
-- cast is necessary to make sqlc happy.
COALESCE(SUM((usage_data->>'count')::bigint), 0)::bigint AS total_count
FROM
usage_events_daily
WHERE
event_type = 'dc_managed_agents_v1'
-- Parentheses are necessary to avoid sqlc from generating an extra
-- argument.
AND day BETWEEN date_trunc('day', (@start_date::timestamptz) AT TIME ZONE 'UTC')::date AND date_trunc('day', (@end_date::timestamptz) AT TIME ZONE 'UTC')::date;
+33
View File
@@ -923,3 +923,36 @@ SET
user_acl = @user_acl
WHERE
id = @id;
-- name: GetRegularWorkspaceCreateMetrics :many
-- Count regular workspaces: only those whose first successful 'start' build
-- was not initiated by the prebuild system user.
WITH first_success_build AS (
-- Earliest successful 'start' build per workspace
SELECT DISTINCT ON (wb.workspace_id)
wb.workspace_id,
wb.template_version_preset_id,
wb.initiator_id
FROM workspace_builds wb
JOIN provisioner_jobs pj ON pj.id = wb.job_id
WHERE
wb.transition = 'start'::workspace_transition
AND pj.job_status = 'succeeded'::provisioner_job_status
ORDER BY wb.workspace_id, wb.build_number, wb.id
)
SELECT
t.name AS template_name,
COALESCE(tvp.name, '') AS preset_name,
o.name AS organization_name,
COUNT(*) AS created_count
FROM first_success_build fsb
JOIN workspaces w ON w.id = fsb.workspace_id
JOIN templates t ON t.id = w.template_id
LEFT JOIN template_version_presets tvp ON tvp.id = fsb.template_version_preset_id
JOIN organizations o ON o.id = w.organization_id
WHERE
NOT t.deleted
-- Exclude workspaces whose first successful start was the prebuilds system user
AND fsb.initiator_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
GROUP BY t.name, COALESCE(tvp.name, ''), o.name
ORDER BY t.name, preset_name, o.name;
+1
View File
@@ -67,6 +67,7 @@ const (
UniqueTemplateVersionsPkey UniqueConstraint = "template_versions_pkey" // ALTER TABLE ONLY template_versions ADD CONSTRAINT template_versions_pkey PRIMARY KEY (id);
UniqueTemplateVersionsTemplateIDNameKey UniqueConstraint = "template_versions_template_id_name_key" // ALTER TABLE ONLY template_versions ADD CONSTRAINT template_versions_template_id_name_key UNIQUE (template_id, name);
UniqueTemplatesPkey UniqueConstraint = "templates_pkey" // ALTER TABLE ONLY templates ADD CONSTRAINT templates_pkey PRIMARY KEY (id);
UniqueUsageEventsDailyPkey UniqueConstraint = "usage_events_daily_pkey" // ALTER TABLE ONLY usage_events_daily ADD CONSTRAINT usage_events_daily_pkey PRIMARY KEY (day, event_type);
UniqueUsageEventsPkey UniqueConstraint = "usage_events_pkey" // ALTER TABLE ONLY usage_events ADD CONSTRAINT usage_events_pkey PRIMARY KEY (id);
UniqueUserConfigsPkey UniqueConstraint = "user_configs_pkey" // ALTER TABLE ONLY user_configs ADD CONSTRAINT user_configs_pkey PRIMARY KEY (user_id, key);
UniqueUserDeletedPkey UniqueConstraint = "user_deleted_pkey" // ALTER TABLE ONLY user_deleted ADD CONSTRAINT user_deleted_pkey PRIMARY KEY (id);
+8 -9
View File
@@ -261,20 +261,19 @@ func TestCache_BuildTime(t *testing.T) {
wantTransition := codersdk.WorkspaceTransition(tt.args.transition)
require.Eventuallyf(t, func() bool {
stats := cache.TemplateBuildTimeStats(template.ID)
return stats[wantTransition] != codersdk.TransitionStats{}
ts := stats[wantTransition]
return ts.P50 != nil && *ts.P50 == tt.want.buildTimeMs
}, testutil.WaitLong, testutil.IntervalMedium,
"BuildTime never populated",
"P50 never reached expected value for %v", wantTransition,
)
gotStats = cache.TemplateBuildTimeStats(template.ID)
for transition, stats := range gotStats {
gotStats := cache.TemplateBuildTimeStats(template.ID)
for transition, ts := range gotStats {
if transition == wantTransition {
require.Equal(t, tt.want.buildTimeMs, *stats.P50)
} else {
require.Empty(
t, stats, "%v", transition,
)
// Checked above
continue
}
require.Empty(t, ts, "%v", transition)
}
} else {
var stats codersdk.TemplateBuildTimeStats
+10 -6
View File
@@ -194,15 +194,19 @@ func verifyCollectedMetrics(t *testing.T, expected []*agentproto.Stats_Metric, a
var d dto.Metric
err := actual[i].Write(&d)
require.NoError(t, err)
assert.NoError(t, err)
switch e.Type {
case agentproto.Stats_Metric_COUNTER:
require.Equal(t, e.Value, d.Counter.GetValue())
if e.Value != d.Counter.GetValue() {
return false
}
case agentproto.Stats_Metric_GAUGE:
require.Equal(t, e.Value, d.Gauge.GetValue())
if e.Value != d.Gauge.GetValue() {
return false
}
default:
require.Failf(t, "unsupported type: %s", string(e.Type))
assert.Failf(t, "unsupported type: %s", string(e.Type))
}
expectedLabels := make([]*agentproto.Stats_Metric_Label, len(e.Labels))
@@ -215,7 +219,7 @@ func verifyCollectedMetrics(t *testing.T, expected []*agentproto.Stats_Metric, a
}
sort.Slice(expectedLabels, sortFn)
sort.Slice(dtoLabels, sortFn)
require.Equal(t, expectedLabels, dtoLabels, d.String())
assert.Equal(t, expectedLabels, dtoLabels, d.String())
}
return true
}
@@ -229,7 +233,7 @@ func prometheusMetricToString(t *testing.T, m prometheus.Metric) string {
var d dto.Metric
err := m.Write(&d)
require.NoError(t, err)
assert.NoError(t, err)
dtoLabels := asMetricAgentLabels(d.GetLabel())
sort.Slice(dtoLabels, func(i, j int) bool {
return dtoLabels[i].Name < dtoLabels[j].Name
@@ -165,6 +165,18 @@ func Workspaces(ctx context.Context, logger slog.Logger, registerer prometheus.R
return nil, err
}
workspaceCreationTotal := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "coderd",
Name: "workspace_creation_total",
Help: "Total regular (non-prebuilt) workspace creations by organization, template, and preset.",
},
[]string{"organization_name", "template_name", "preset_name"},
)
if err := registerer.Register(workspaceCreationTotal); err != nil {
return nil, err
}
ctx, cancelFunc := context.WithCancel(ctx)
done := make(chan struct{})
@@ -200,6 +212,27 @@ func Workspaces(ctx context.Context, logger slog.Logger, registerer prometheus.R
string(w.LatestBuildTransition),
).Add(1)
}
// Update regular workspaces (without a prebuild transition) creation counter
regularWorkspaces, err := db.GetRegularWorkspaceCreateMetrics(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
workspaceCreationTotal.Reset()
} else {
logger.Warn(ctx, "failed to load regular workspaces for metrics", slog.Error(err))
}
return
}
workspaceCreationTotal.Reset()
for _, regularWorkspace := range regularWorkspaces {
workspaceCreationTotal.WithLabelValues(
regularWorkspace.OrganizationName,
regularWorkspace.TemplateName,
regularWorkspace.PresetName,
).Add(float64(regularWorkspace.CreatedCount))
}
}
// Use time.Nanosecond to force an initial tick. It will be reset to the
@@ -424,6 +424,107 @@ func TestWorkspaceLatestBuildStatuses(t *testing.T) {
}
}
func TestWorkspaceCreationTotal(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
Name string
Database func() database.Store
ExpectedWorkspaces int
}{
{
Name: "None",
Database: func() database.Store {
db, _ := dbtestutil.NewDB(t)
return db
},
ExpectedWorkspaces: 0,
},
{
// Should count only the successfully created workspaces
Name: "Multiple",
Database: func() database.Store {
db, _ := dbtestutil.NewDB(t)
u := dbgen.User(t, db, database.User{})
org := dbgen.Organization(t, db, database.Organization{})
insertTemplates(t, db, u, org)
insertCanceled(t, db, u, org)
insertFailed(t, db, u, org)
insertFailed(t, db, u, org)
insertSuccess(t, db, u, org)
insertSuccess(t, db, u, org)
insertSuccess(t, db, u, org)
insertRunning(t, db, u, org)
return db
},
ExpectedWorkspaces: 3,
},
{
// Should not include prebuilt workspaces
Name: "MultipleWithPrebuild",
Database: func() database.Store {
ctx := context.Background()
db, _ := dbtestutil.NewDB(t)
u := dbgen.User(t, db, database.User{})
prebuildUser, err := db.GetUserByID(ctx, database.PrebuildsSystemUserID)
require.NoError(t, err)
org := dbgen.Organization(t, db, database.Organization{})
insertTemplates(t, db, u, org)
insertCanceled(t, db, u, org)
insertFailed(t, db, u, org)
insertSuccess(t, db, u, org)
insertSuccess(t, db, prebuildUser, org)
insertRunning(t, db, u, org)
return db
},
ExpectedWorkspaces: 1,
},
{
// Should include deleted workspaces
Name: "MultipleWithDeleted",
Database: func() database.Store {
db, _ := dbtestutil.NewDB(t)
u := dbgen.User(t, db, database.User{})
org := dbgen.Organization(t, db, database.Organization{})
insertTemplates(t, db, u, org)
insertCanceled(t, db, u, org)
insertFailed(t, db, u, org)
insertSuccess(t, db, u, org)
insertRunning(t, db, u, org)
insertDeleted(t, db, u, org)
return db
},
ExpectedWorkspaces: 2,
},
} {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
registry := prometheus.NewRegistry()
closeFunc, err := prometheusmetrics.Workspaces(context.Background(), testutil.Logger(t), registry, tc.Database(), testutil.IntervalFast)
require.NoError(t, err)
t.Cleanup(closeFunc)
require.Eventually(t, func() bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
sum := 0
for _, m := range metrics {
if m.GetName() != "coderd_workspace_creation_total" {
continue
}
for _, metric := range m.Metric {
sum += int(metric.GetCounter().GetValue())
}
}
t.Logf("count = %d, expected == %d", sum, tc.ExpectedWorkspaces)
return sum == tc.ExpectedWorkspaces
}, testutil.WaitShort, testutil.IntervalFast)
})
}
}
func TestAgents(t *testing.T) {
t.Parallel()
@@ -897,6 +998,7 @@ func insertRunning(t *testing.T, db database.Store, u database.User, org databas
Transition: database.WorkspaceTransitionStart,
Reason: database.BuildReasonInitiator,
TemplateVersionID: templateVersionID,
InitiatorID: u.ID,
})
require.NoError(t, err)
// This marks the job as started.
+158
View File
@@ -0,0 +1,158 @@
package provisionerdserver
import (
"context"
"time"
"github.com/prometheus/client_golang/prometheus"
"cdr.dev/slog"
)
type Metrics struct {
logger slog.Logger
workspaceCreationTimings *prometheus.HistogramVec
workspaceClaimTimings *prometheus.HistogramVec
}
type WorkspaceTimingType int
const (
Unsupported WorkspaceTimingType = iota
WorkspaceCreation
PrebuildCreation
PrebuildClaim
)
const (
workspaceTypeRegular = "regular"
workspaceTypePrebuild = "prebuild"
)
type WorkspaceTimingFlags struct {
IsPrebuild bool
IsClaim bool
IsFirstBuild bool
}
func NewMetrics(logger slog.Logger) *Metrics {
log := logger.Named("provisionerd_server_metrics")
return &Metrics{
logger: log,
workspaceCreationTimings: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "coderd",
Name: "workspace_creation_duration_seconds",
Help: "Time to create a workspace by organization, template, preset, and type (regular or prebuild).",
Buckets: []float64{
1, // 1s
10,
30,
60, // 1min
60 * 5,
60 * 10,
60 * 30, // 30min
60 * 60, // 1hr
},
NativeHistogramBucketFactor: 1.1,
// Max number of native buckets kept at once to bound memory.
NativeHistogramMaxBucketNumber: 100,
// Merge/flush small buckets periodically to control churn.
NativeHistogramMinResetDuration: time.Hour,
// Treat tiny values as zero (helps with noisy near-zero latencies).
NativeHistogramZeroThreshold: 0,
NativeHistogramMaxZeroThreshold: 0,
}, []string{"organization_name", "template_name", "preset_name", "type"}),
workspaceClaimTimings: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "coderd",
Name: "prebuilt_workspace_claim_duration_seconds",
Help: "Time to claim a prebuilt workspace by organization, template, and preset.",
// Higher resolution between 15m to show typical prebuild claim times.
// Cap at 5m since longer claims diminish prebuild value.
Buckets: []float64{
1, // 1s
5,
10,
20,
30,
60, // 1m
120, // 2m
180, // 3m
240, // 4m
300, // 5m
},
NativeHistogramBucketFactor: 1.1,
// Max number of native buckets kept at once to bound memory.
NativeHistogramMaxBucketNumber: 100,
// Merge/flush small buckets periodically to control churn.
NativeHistogramMinResetDuration: time.Hour,
// Treat tiny values as zero (helps with noisy near-zero latencies).
NativeHistogramZeroThreshold: 0,
NativeHistogramMaxZeroThreshold: 0,
}, []string{"organization_name", "template_name", "preset_name"}),
}
}
func (m *Metrics) Register(reg prometheus.Registerer) error {
if err := reg.Register(m.workspaceCreationTimings); err != nil {
return err
}
return reg.Register(m.workspaceClaimTimings)
}
// getWorkspaceTimingType classifies a workspace build:
// - PrebuildCreation: creation of a prebuilt workspace
// - PrebuildClaim: claim of an existing prebuilt workspace
// - WorkspaceCreation: first build of a regular (non-prebuilt) workspace
//
// Note: order matters. Creating a prebuilt workspace is also a first build
// (IsPrebuild && IsFirstBuild). We check IsPrebuild before IsFirstBuild so
// prebuilds take precedence. This is the only case where two flags can be true.
func getWorkspaceTimingType(flags WorkspaceTimingFlags) WorkspaceTimingType {
switch {
case flags.IsPrebuild:
return PrebuildCreation
case flags.IsClaim:
return PrebuildClaim
case flags.IsFirstBuild:
return WorkspaceCreation
default:
return Unsupported
}
}
// UpdateWorkspaceTimingsMetrics updates the workspace timing metrics based on the workspace build type
func (m *Metrics) UpdateWorkspaceTimingsMetrics(
ctx context.Context,
flags WorkspaceTimingFlags,
organizationName string,
templateName string,
presetName string,
buildTime float64,
) {
m.logger.Debug(ctx, "update workspace timings metrics",
"organizationName", organizationName,
"templateName", templateName,
"presetName", presetName,
"isPrebuild", flags.IsPrebuild,
"isClaim", flags.IsClaim,
"isWorkspaceFirstBuild", flags.IsFirstBuild)
workspaceTimingType := getWorkspaceTimingType(flags)
switch workspaceTimingType {
case WorkspaceCreation:
// Regular workspace creation (without prebuild pool)
m.workspaceCreationTimings.
WithLabelValues(organizationName, templateName, presetName, workspaceTypeRegular).Observe(buildTime)
case PrebuildCreation:
// Prebuilt workspace creation duration
m.workspaceCreationTimings.
WithLabelValues(organizationName, templateName, presetName, workspaceTypePrebuild).Observe(buildTime)
case PrebuildClaim:
// Prebuilt workspace claim duration
m.workspaceClaimTimings.
WithLabelValues(organizationName, templateName, presetName).Observe(buildTime)
default:
m.logger.Warn(ctx, "unsupported workspace timing flags")
}
}
@@ -129,6 +129,8 @@ type server struct {
heartbeatInterval time.Duration
heartbeatFn func(ctx context.Context) error
metrics *Metrics
}
// We use the null byte (0x00) in generating a canonical map key for tags, so
@@ -178,6 +180,7 @@ func NewServer(
options Options,
enqueuer notifications.Enqueuer,
prebuildsOrchestrator *atomic.Pointer[prebuilds.ReconciliationOrchestrator],
metrics *Metrics,
) (proto.DRPCProvisionerDaemonServer, error) {
// Fail-fast if pointers are nil
if lifecycleCtx == nil {
@@ -248,6 +251,7 @@ func NewServer(
heartbeatFn: options.HeartbeatFn,
PrebuildsOrchestrator: prebuildsOrchestrator,
UsageInserter: usageInserter,
metrics: metrics,
}
if s.heartbeatFn == nil {
@@ -1995,6 +1999,37 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
sidebarAppID = uuid.NullUUID{UUID: id, Valid: true}
}
// This is a hacky workaround for the issue with tasks 'disappearing' on stop:
// reuse has_ai_task and sidebar_app_id from the previous build.
// This workaround should be removed as soon as possible.
if workspaceBuild.Transition == database.WorkspaceTransitionStop && workspaceBuild.BuildNumber > 1 {
if prevBuild, err := s.Database.GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx, database.GetWorkspaceBuildByWorkspaceIDAndBuildNumberParams{
WorkspaceID: workspaceBuild.WorkspaceID,
BuildNumber: workspaceBuild.BuildNumber - 1,
}); err == nil {
hasAITask = prevBuild.HasAITask.Bool
sidebarAppID = prevBuild.AITaskSidebarAppID
warnUnknownSidebarAppID = false
s.Logger.Debug(ctx, "task workaround: reused has_ai_task and sidebar_app_id from previous build to keep track of task",
slog.F("job_id", job.ID.String()),
slog.F("build_number", prevBuild.BuildNumber),
slog.F("workspace_id", workspace.ID),
slog.F("workspace_build_id", workspaceBuild.ID),
slog.F("transition", string(workspaceBuild.Transition)),
slog.F("sidebar_app_id", sidebarAppID.UUID),
slog.F("has_ai_task", hasAITask),
)
} else {
s.Logger.Error(ctx, "task workaround: tracking via has_ai_task and sidebar_app from previous build failed",
slog.Error(err),
slog.F("job_id", job.ID.String()),
slog.F("workspace_id", workspace.ID),
slog.F("workspace_build_id", workspaceBuild.ID),
slog.F("transition", string(workspaceBuild.Transition)),
)
}
}
if warnUnknownSidebarAppID {
// Ref: https://github.com/coder/coder/issues/18776
// This can happen for a number of reasons:
@@ -2250,6 +2285,50 @@ func (s *server) completeWorkspaceBuildJob(ctx context.Context, job database.Pro
}
}
// Update workspace (regular and prebuild) timing metrics
if s.metrics != nil {
// Only consider 'start' workspace builds
if workspaceBuild.Transition == database.WorkspaceTransitionStart {
// Get the updated job to report the metrics with correct data
updatedJob, err := s.Database.GetProvisionerJobByID(ctx, jobID)
if err != nil {
s.Logger.Error(ctx, "get updated job from database", slog.Error(err))
} else
// Only consider 'succeeded' provisioner jobs
if updatedJob.JobStatus == database.ProvisionerJobStatusSucceeded {
presetName := ""
if workspaceBuild.TemplateVersionPresetID.Valid {
preset, err := s.Database.GetPresetByID(ctx, workspaceBuild.TemplateVersionPresetID.UUID)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
s.Logger.Error(ctx, "get preset by ID for workspace timing metrics", slog.Error(err))
}
} else {
presetName = preset.Name
}
}
buildTime := updatedJob.CompletedAt.Time.Sub(updatedJob.StartedAt.Time).Seconds()
s.metrics.UpdateWorkspaceTimingsMetrics(
ctx,
WorkspaceTimingFlags{
// Is a prebuilt workspace creation build
IsPrebuild: input.PrebuiltWorkspaceBuildStage.IsPrebuild(),
// Is a prebuilt workspace claim build
IsClaim: input.PrebuiltWorkspaceBuildStage.IsPrebuiltWorkspaceClaim(),
// Is a regular workspace creation build
// Only consider the first build number for regular workspaces
IsFirstBuild: workspaceBuild.BuildNumber == 1,
},
workspace.OrganizationName,
workspace.TemplateName,
presetName,
buildTime,
)
}
}
}
msg, err := json.Marshal(wspubsub.WorkspaceEvent{
Kind: wspubsub.WorkspaceEventKindStateChange,
WorkspaceID: workspace.ID,
@@ -2876,15 +2955,23 @@ func InsertWorkspaceResource(ctx context.Context, db database.Store, jobID uuid.
return nil
}
func workspaceSessionTokenName(workspace database.Workspace) string {
return fmt.Sprintf("%s_%s_session_token", workspace.OwnerID, workspace.ID)
func WorkspaceSessionTokenName(ownerID, workspaceID uuid.UUID) string {
return fmt.Sprintf("%s_%s_session_token", ownerID, workspaceID)
}
func (s *server) regenerateSessionToken(ctx context.Context, user database.User, workspace database.Workspace) (string, error) {
// NOTE(Cian): Once a workspace is claimed, there's no reason for the session token to be valid any longer.
// Not generating any session token at all for a system user may unintentionally break existing templates,
// which we want to avoid. If there's no session token for the workspace belonging to the prebuilds user,
// then there's nothing for us to worry about here.
// TODO(Cian): Update this to handle _all_ system users. At the time of writing, only one system user exists.
if err := deleteSessionTokenForUserAndWorkspace(ctx, s.Database, database.PrebuildsSystemUserID, workspace.ID); err != nil && !errors.Is(err, sql.ErrNoRows) {
s.Logger.Error(ctx, "failed to delete prebuilds session token", slog.Error(err), slog.F("workspace_id", workspace.ID))
}
newkey, sessionToken, err := apikey.Generate(apikey.CreateParams{
UserID: user.ID,
LoginType: user.LoginType,
TokenName: workspaceSessionTokenName(workspace),
TokenName: WorkspaceSessionTokenName(workspace.OwnerID, workspace.ID),
DefaultLifetime: s.DeploymentValues.Sessions.DefaultTokenDuration.Value(),
LifetimeSeconds: int64(s.DeploymentValues.Sessions.MaximumTokenDuration.Value().Seconds()),
})
@@ -2912,10 +2999,14 @@ func (s *server) regenerateSessionToken(ctx context.Context, user database.User,
}
func deleteSessionToken(ctx context.Context, db database.Store, workspace database.Workspace) error {
return deleteSessionTokenForUserAndWorkspace(ctx, db, workspace.OwnerID, workspace.ID)
}
func deleteSessionTokenForUserAndWorkspace(ctx context.Context, db database.Store, userID, workspaceID uuid.UUID) error {
err := db.InTx(func(tx database.Store) error {
key, err := tx.GetAPIKeyByName(ctx, database.GetAPIKeyByNameParams{
UserID: workspace.OwnerID,
TokenName: workspaceSessionTokenName(workspace),
UserID: userID,
TokenName: WorkspaceSessionTokenName(userID, workspaceID),
})
if err == nil {
err = tx.DeleteAPIKeyByID(ctx, key.ID)
@@ -2842,9 +2842,12 @@ func TestCompleteJob(t *testing.T) {
// has_ai_task has a default value of nil, but once the workspace build completes it will have a value;
// it is set to "true" if the related template has any coder_ai_task resources defined, and its sidebar app ID
// will be set as well in that case.
// HACK(johnstcn): we also set it to "true" if any _previous_ workspace builds ever had it set to "true".
// This is to avoid tasks "disappearing" when you stop them.
t.Run("WorkspaceBuild", func(t *testing.T) {
type testcase struct {
name string
seedFunc func(context.Context, testing.TB, database.Store) error // If you need to insert other resources
transition database.WorkspaceTransition
input *proto.CompletedJob_WorkspaceBuild
expectHasAiTask bool
@@ -2944,6 +2947,17 @@ func TestCompleteJob(t *testing.T) {
expectHasAiTask: true,
expectUsageEvent: false,
},
{
name: "current build does not have ai task but previous build did",
seedFunc: seedPreviousWorkspaceStartWithAITask,
transition: database.WorkspaceTransitionStop,
input: &proto.CompletedJob_WorkspaceBuild{
AiTasks: []*sdkproto.AITask{},
Resources: []*sdkproto.Resource{},
},
expectHasAiTask: true,
expectUsageEvent: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
@@ -2980,6 +2994,9 @@ func TestCompleteJob(t *testing.T) {
})
ctx := testutil.Context(t, testutil.WaitShort)
if tc.seedFunc != nil {
require.NoError(t, tc.seedFunc(ctx, t, db))
}
buildJobID := uuid.New()
wsBuildID := uuid.New()
@@ -2999,8 +3016,13 @@ func TestCompleteJob(t *testing.T) {
Tags: pd.Tags,
})
require.NoError(t, err)
var buildNum int32
if latestBuild, err := db.GetLatestWorkspaceBuildByWorkspaceID(ctx, workspaceTable.ID); err == nil {
buildNum = latestBuild.BuildNumber
}
build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
ID: wsBuildID,
BuildNumber: buildNum + 1,
JobID: buildJobID,
WorkspaceID: workspaceTable.ID,
TemplateVersionID: version.ID,
@@ -3038,7 +3060,7 @@ func TestCompleteJob(t *testing.T) {
require.True(t, build.HasAITask.Valid) // We ALWAYS expect a value to be set, therefore not nil, i.e. valid = true.
require.Equal(t, tc.expectHasAiTask, build.HasAITask.Bool)
if tc.expectHasAiTask {
if tc.expectHasAiTask && build.Transition != database.WorkspaceTransitionStop {
require.Equal(t, sidebarAppID, build.AITaskSidebarAppID.UUID.String())
}
@@ -3977,6 +3999,70 @@ func TestNotifications(t *testing.T) {
})
}
func TestServer_ExpirePrebuildsSessionToken(t *testing.T) {
t.Parallel()
// Given: a prebuilt workspace where an API key was previously created for the prebuilds user.
var (
ctx = testutil.Context(t, testutil.WaitShort)
srv, db, ps, pd = setup(t, false, nil)
user = dbgen.User(t, db, database.User{})
template = dbgen.Template(t, db, database.Template{
OrganizationID: pd.OrganizationID,
CreatedBy: user.ID,
})
version = dbgen.TemplateVersion(t, db, database.TemplateVersion{
TemplateID: uuid.NullUUID{UUID: template.ID, Valid: true},
OrganizationID: pd.OrganizationID,
CreatedBy: user.ID,
})
workspace = dbgen.Workspace(t, db, database.WorkspaceTable{
OrganizationID: pd.OrganizationID,
TemplateID: template.ID,
OwnerID: database.PrebuildsSystemUserID,
})
workspaceBuildID = uuid.New()
buildJob = dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
OrganizationID: pd.OrganizationID,
FileID: dbgen.File(t, db, database.File{CreatedBy: user.ID}).ID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{
WorkspaceBuildID: workspaceBuildID,
})),
InitiatorID: database.PrebuildsSystemUserID,
Tags: pd.Tags,
})
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
ID: workspaceBuildID,
WorkspaceID: workspace.ID,
TemplateVersionID: version.ID,
JobID: buildJob.ID,
Transition: database.WorkspaceTransitionStart,
InitiatorID: database.PrebuildsSystemUserID,
})
existingKey, _ = dbgen.APIKey(t, db, database.APIKey{
UserID: database.PrebuildsSystemUserID,
TokenName: provisionerdserver.WorkspaceSessionTokenName(database.PrebuildsSystemUserID, workspace.ID),
})
)
// When: the prebuild claim job is acquired
fs := newFakeStream(ctx)
err := srv.AcquireJobWithCancel(fs)
require.NoError(t, err)
job, err := fs.waitForJob()
require.NoError(t, err)
require.NotNil(t, job)
workspaceBuildJob := job.Type.(*proto.AcquiredJob_WorkspaceBuild_).WorkspaceBuild
require.NotNil(t, workspaceBuildJob.Metadata)
// Assert test invariant: we acquired the expected build job
require.Equal(t, workspaceBuildID.String(), workspaceBuildJob.WorkspaceBuildId)
// Then: The session token should be deleted
_, err = db.GetAPIKeyByID(ctx, existingKey.ID)
require.ErrorIs(t, err, sql.ErrNoRows, "api key for prebuilds user should be deleted")
}
type overrides struct {
ctx context.Context
deploymentValues *codersdk.DeploymentValues
@@ -4122,6 +4208,7 @@ func setup(t *testing.T, ignoreLogErrors bool, ov *overrides) (proto.DRPCProvisi
},
notifEnq,
&op,
provisionerdserver.NewMetrics(logger),
)
require.NoError(t, err)
return srv, db, ps, daemon
@@ -4244,3 +4331,63 @@ func (f *fakeUsageInserter) InsertDiscreteUsageEvent(_ context.Context, _ databa
f.collectedEvents = append(f.collectedEvents, event)
return nil
}
func seedPreviousWorkspaceStartWithAITask(ctx context.Context, t testing.TB, db database.Store) error {
t.Helper()
// If the below looks slightly convoluted, that's because it is.
// The workspace doesn't yet have a latest build, so querying all
// workspaces will fail.
tpls, err := db.GetTemplates(ctx)
if err != nil {
return xerrors.Errorf("seedFunc: get template: %w", err)
}
if len(tpls) != 1 {
return xerrors.Errorf("seedFunc: expected exactly one template, got %d", len(tpls))
}
ws, err := db.GetWorkspacesByTemplateID(ctx, tpls[0].ID)
if err != nil {
return xerrors.Errorf("seedFunc: get workspaces: %w", err)
}
if len(ws) != 1 {
return xerrors.Errorf("seedFunc: expected exactly one workspace, got %d", len(ws))
}
w := ws[0]
prevJob := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
OrganizationID: w.OrganizationID,
InitiatorID: w.OwnerID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
})
tvs, err := db.GetTemplateVersionsByTemplateID(ctx, database.GetTemplateVersionsByTemplateIDParams{
TemplateID: tpls[0].ID,
})
if err != nil {
return xerrors.Errorf("seedFunc: get template version: %w", err)
}
if len(tvs) != 1 {
return xerrors.Errorf("seedFunc: expected exactly one template version, got %d", len(tvs))
}
if tpls[0].ActiveVersionID == uuid.Nil {
return xerrors.Errorf("seedFunc: active version id is nil")
}
res := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
JobID: prevJob.ID,
})
agt := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
ResourceID: res.ID,
})
wa := dbgen.WorkspaceApp(t, db, database.WorkspaceApp{
AgentID: agt.ID,
})
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
BuildNumber: 1,
HasAITask: sql.NullBool{Valid: true, Bool: true},
AITaskSidebarAppID: uuid.NullUUID{Valid: true, UUID: wa.ID},
ID: w.ID,
InitiatorID: w.OwnerID,
JobID: prevJob.ID,
TemplateVersionID: tvs[0].ID,
Transition: database.WorkspaceTransitionStart,
WorkspaceID: w.ID,
})
return nil
}
+15 -9
View File
@@ -24,7 +24,7 @@ const (
Requirements:
- Only lowercase letters, numbers, and hyphens
- Start with "task-"
- Maximum 28 characters total
- Maximum 27 characters total
- Descriptive of the main task
Examples:
@@ -145,17 +145,23 @@ func Generate(ctx context.Context, prompt string, opts ...Option) (string, error
return "", ErrNoNameGenerated
}
generatedName := acc.Messages()[0].Content
if err := codersdk.NameValid(generatedName); err != nil {
return "", xerrors.Errorf("generated name %v not valid: %w", generatedName, err)
}
if generatedName == "task-unnamed" {
taskName := acc.Messages()[0].Content
if taskName == "task-unnamed" {
return "", ErrNoNameGenerated
}
return fmt.Sprintf("%s-%s", generatedName, generateSuffix()), nil
// We append a suffix to the end of the task name to reduce
// the chance of collisions. We truncate the task name to
// to a maximum of 27 bytes, so that when we append the
// 5 byte suffix (`-` and 4 byte hex slug), it should
// remain within the 32 byte workspace name limit.
taskName = taskName[:min(len(taskName), 27)]
taskName = fmt.Sprintf("%s-%s", taskName, generateSuffix())
if err := codersdk.NameValid(taskName); err != nil {
return "", xerrors.Errorf("generated name %v not valid: %w", taskName, err)
}
return taskName, nil
}
func anthropicDataStream(ctx context.Context, client anthropic.Client, model anthropic.Model, input []aisdk.Message) (aisdk.DataStream, error) {
+8 -5
View File
@@ -817,12 +817,13 @@ func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Re
var (
ctx = r.Context()
workspaceAgent = httpmw.WorkspaceAgentParam(r)
logger = api.Logger.Named("agent_container_watcher").With(slog.F("agent_id", workspaceAgent.ID))
)
// If the agent is unreachable, the request will hang. Assume that if we
// don't get a response after 30s that the agent is unreachable.
dialCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
dialCtx, dialCancel := context.WithTimeout(ctx, 30*time.Second)
defer dialCancel()
apiAgent, err := db2sdk.WorkspaceAgent(
api.DERPMap(),
*api.TailnetCoordinator.Load(),
@@ -857,8 +858,7 @@ func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Re
}
defer release()
watcherLogger := api.Logger.Named("agent_container_watcher").With(slog.F("agent_id", workspaceAgent.ID))
containersCh, closer, err := agentConn.WatchContainers(ctx, watcherLogger)
containersCh, closer, err := agentConn.WatchContainers(ctx, logger)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error watching agent's containers.",
@@ -877,6 +877,9 @@ func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Re
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
// Here we close the websocket for reading, so that the websocket library will handle pings and
// close frames.
_ = conn.CloseRead(context.Background())
@@ -884,7 +887,7 @@ func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Re
ctx, wsNetConn := codersdk.WebsocketNetConn(ctx, conn, websocket.MessageText)
defer wsNetConn.Close()
go httpapi.Heartbeat(ctx, conn)
go httpapi.HeartbeatClose(ctx, logger, cancel, conn)
encoder := json.NewEncoder(wsNetConn)
+136 -1
View File
@@ -59,10 +59,145 @@ func (fakeAgentProvider) Close() error {
return nil
}
type channelCloser struct {
closeFn func()
}
func (c *channelCloser) Close() error {
c.closeFn()
return nil
}
func TestWatchAgentContainers(t *testing.T) {
t.Parallel()
t.Run("WebSocketClosesProperly", func(t *testing.T) {
t.Run("CoderdWebSocketCanHandleClientClosing", func(t *testing.T) {
t.Parallel()
// This test ensures that the agent containers `/watch` websocket can gracefully
// handle the client websocket closing. This test was created in
// response to this issue: https://github.com/coder/coder/issues/19449
var (
ctx = testutil.Context(t, testutil.WaitLong)
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug).Named("coderd")
mCtrl = gomock.NewController(t)
mDB = dbmock.NewMockStore(mCtrl)
mCoordinator = tailnettest.NewMockCoordinator(mCtrl)
mAgentConn = agentconnmock.NewMockAgentConn(mCtrl)
fAgentProvider = fakeAgentProvider{
agentConn: func(ctx context.Context, agentID uuid.UUID) (_ workspacesdk.AgentConn, release func(), _ error) {
return mAgentConn, func() {}, nil
},
}
workspaceID = uuid.New()
agentID = uuid.New()
resourceID = uuid.New()
jobID = uuid.New()
buildID = uuid.New()
containersCh = make(chan codersdk.WorkspaceAgentListContainersResponse)
r = chi.NewMux()
api = API{
ctx: ctx,
Options: &Options{
AgentInactiveDisconnectTimeout: testutil.WaitShort,
Database: mDB,
Logger: logger,
DeploymentValues: &codersdk.DeploymentValues{},
TailnetCoordinator: tailnettest.NewFakeCoordinator(),
},
}
)
var tailnetCoordinator tailnet.Coordinator = mCoordinator
api.TailnetCoordinator.Store(&tailnetCoordinator)
api.agentProvider = fAgentProvider
// Setup: Allow `ExtractWorkspaceAgentParams` to complete.
mDB.EXPECT().GetWorkspaceAgentByID(gomock.Any(), agentID).Return(database.WorkspaceAgent{
ID: agentID,
ResourceID: resourceID,
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
FirstConnectedAt: sql.NullTime{Valid: true, Time: dbtime.Now()},
LastConnectedAt: sql.NullTime{Valid: true, Time: dbtime.Now()},
}, nil)
mDB.EXPECT().GetWorkspaceResourceByID(gomock.Any(), resourceID).Return(database.WorkspaceResource{
ID: resourceID,
JobID: jobID,
}, nil)
mDB.EXPECT().GetProvisionerJobByID(gomock.Any(), jobID).Return(database.ProvisionerJob{
ID: jobID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
}, nil)
mDB.EXPECT().GetWorkspaceBuildByJobID(gomock.Any(), jobID).Return(database.WorkspaceBuild{
WorkspaceID: workspaceID,
ID: buildID,
}, nil)
// And: Allow `db2dsk.WorkspaceAgent` to complete.
mCoordinator.EXPECT().Node(gomock.Any()).Return(nil)
// And: Allow `WatchContainers` to be called, returing our `containersCh` channel.
mAgentConn.EXPECT().WatchContainers(gomock.Any(), gomock.Any()).
DoAndReturn(func(_ context.Context, _ slog.Logger) (<-chan codersdk.WorkspaceAgentListContainersResponse, io.Closer, error) {
return containersCh, &channelCloser{closeFn: func() {
close(containersCh)
}}, nil
})
// And: We mount the HTTP Handler
r.With(httpmw.ExtractWorkspaceAgentParam(mDB)).
Get("/workspaceagents/{workspaceagent}/containers/watch", api.watchWorkspaceAgentContainers)
// Given: We create the HTTP server
srv := httptest.NewServer(r)
defer srv.Close()
// And: Dial the WebSocket
wsURL := strings.Replace(srv.URL, "http://", "ws://", 1)
conn, resp, err := websocket.Dial(ctx, fmt.Sprintf("%s/workspaceagents/%s/containers/watch", wsURL, agentID), nil)
require.NoError(t, err)
if resp.Body != nil {
defer resp.Body.Close()
}
// And: Create a streaming decoder
decoder := wsjson.NewDecoder[codersdk.WorkspaceAgentListContainersResponse](conn, websocket.MessageText, logger)
defer decoder.Close()
decodeCh := decoder.Chan()
// And: We can successfully send through the channel.
testutil.RequireSend(ctx, t, containersCh, codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{{
ID: "test-container-id",
}},
})
// And: Receive the data.
containerResp := testutil.RequireReceive(ctx, t, decodeCh)
require.Len(t, containerResp.Containers, 1)
require.Equal(t, "test-container-id", containerResp.Containers[0].ID)
// When: We close the WebSocket
conn.Close(websocket.StatusNormalClosure, "test closing connection")
// Then: We expect `containersCh` to be closed.
select {
case <-ctx.Done():
t.Fail()
case _, ok := <-containersCh:
require.False(t, ok, "channel is expected to be closed")
}
})
t.Run("CoderdWebSocketCanHandleAgentClosing", func(t *testing.T) {
t.Parallel()
// This test ensures that the agent containers `/watch` websocket can gracefully
+19
View File
@@ -143,9 +143,12 @@ deployment. They will always be available from the agent.
| `coderd_oauth2_external_requests_rate_limit_total` | gauge | DEPRECATED: use coderd_oauth2_external_requests_rate_limit instead | `name` `resource` |
| `coderd_oauth2_external_requests_rate_limit_used` | gauge | The number of requests made in this interval. | `name` `resource` |
| `coderd_oauth2_external_requests_total` | counter | The total number of api calls made to external oauth2 providers. 'status_code' will be 0 if the request failed with no response. | `name` `source` `status_code` |
| `coderd_prebuilt_workspace_claim_duration_seconds` | histogram | Time to claim a prebuilt workspace by organization, template, and preset. | `organization_name` `preset_name` `template_name` |
| `coderd_provisionerd_job_timings_seconds` | histogram | The provisioner job time duration in seconds. | `provisioner` `status` |
| `coderd_provisionerd_jobs_current` | gauge | The number of currently running provisioner jobs. | `provisioner` |
| `coderd_workspace_builds_total` | counter | The number of workspaces started, updated, or deleted. | `action` `owner_email` `status` `template_name` `template_version` `workspace_name` |
| `coderd_workspace_creation_duration_seconds` | histogram | Time to create a workspace by organization, template, preset, and type (regular or prebuild). | `organization_name` `preset_name` `template_name` `type` |
| `coderd_workspace_creation_total` | counter | Total regular (non-prebuilt) workspace creations by organization, template, and preset. | `organization_name` `preset_name` `template_name` |
| `coderd_workspace_latest_build_status` | gauge | The current workspace statuses by template, transition, and owner. | `status` `template_name` `template_version` `workspace_owner` `workspace_transition` |
| `go_gc_duration_seconds` | summary | A summary of the pause duration of garbage collection cycles. | |
| `go_goroutines` | gauge | Number of goroutines that currently exist. | |
@@ -185,3 +188,19 @@ deployment. They will always be available from the agent.
| `promhttp_metric_handler_requests_total` | counter | Total number of scrapes by HTTP status code. | `code` |
<!-- End generated by 'make docs/admin/integrations/prometheus.md'. -->
### Note on Prometheus native histogram support
The following metrics support native histograms:
* `coderd_workspace_creation_duration_seconds`
* `coderd_prebuilt_workspace_claim_duration_seconds`
Native histograms are an **experimental** Prometheus feature that removes the need to predefine bucket boundaries and allows higher-resolution buckets that adapt to deployment characteristics.
Whether a metric is exposed as classic or native depends entirely on the Prometheus server configuration (see [Prometheus docs](https://prometheus.io/docs/specs/native_histograms/) for details):
* If native histograms are enabled, Prometheus ingests the high-resolution histogram.
* If not, it falls back to the predefined buckets.
⚠️ Important: classic and native histograms cannot be aggregated together. If Prometheus is switched from classic to native at a certain point in time, dashboards may need to account for that transition.
For this reason, its recommended to follow [Prometheus migration guidelines](https://prometheus.io/docs/specs/native_histograms/#migration-considerations) when moving from classic to native histograms.
@@ -294,6 +294,7 @@ Coder provides several metrics to monitor your prebuilt workspaces:
- `coderd_prebuilt_workspaces_desired` (gauge): Target number of prebuilt workspaces that should be available.
- `coderd_prebuilt_workspaces_running` (gauge): Current number of prebuilt workspaces in a `running` state.
- `coderd_prebuilt_workspaces_eligible` (gauge): Current number of prebuilt workspaces eligible to be claimed.
- `coderd_prebuilt_workspace_claim_duration_seconds` ([_native histogram_](https://prometheus.io/docs/specs/native_histograms) support): Time to claim a prebuilt workspace from the prebuild pool.
#### Logs
+1
View File
@@ -61,5 +61,6 @@ needs of different teams.
changes are reviewed and tested.
- [Permissions and Policies](./template-permissions.md): Control who may access
and modify your template.
- [External Workspaces](./managing-templates/external-workspaces.md): Learn how to connect your existing infrastructure to Coder workspaces.
<children></children>
@@ -0,0 +1,131 @@
# External Workspaces
External workspaces allow you to seamlessly connect externally managed infrastructure as Coder workspaces. This enables you to integrate existing servers, on-premises systems, or any capable machine with the Coder environment, ensuring a smooth and efficient development workflow without requiring Coder to provision additional compute resources.
## Prerequisites
- Access to external compute resources that can run the Coder agent:
- **Windows**: amd64 or arm64 architecture
- **Linux**: amd64, arm64, or armv7 architecture
- **macOS**: amd64 or arm64 architecture
- **Examples**: VMs, bare-metal servers, Kubernetes nodes, or any machine meeting the above requirements.
- Networking access to your Coder deployment.
- A workspace template that includes a [`coder_external_agent`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/external_agent) resource.
We provide an example template on how to set up external workspaces in the [Coder Registry](https://registry.coder.com/templates/coder-labs/externally-managed-workspace)
## Benefits
External workspaces offer flexibility and control in complex environments:
- **Incremental adoption of Coder**
Integrate with existing infrastructure gradually without needing to migrate everything at once. This is particularly useful when gradually migrating worklods to Coder without refactoring current infrastructure.
- **Flexibility**
Attach cloud, hybrid, or on-premises machines as developer workspaces. This enables connecting existing on-premises GPU servers for ML development or bringing manually provisioned VMs in restricted networks under Coder's workspace management.
- **Separation of concerns**
Provision compute resources externally (using your existing IaC or manual processes) while managing workspace configuration (apps, scripts) with Terraform. This approach is ideal for running agents in CI pipelines to provision short-lived, externally managed workspaces for testing or build automation.
## Known limitations
- **Lifecycle control**
Start/stop/restart actions in the Coder UI are disabled for external workspaces.
- **No automatic deprovisioning**
Deleting an external workspace in Coder removes the agent token and record, but does not delete the underlying compute resource.
- **Manual agent management**
Administrators are responsible for deploying and maintaining agents on external resources.
- **Limited UI indicators**
External workspaces are marked in the UI, but underlying infrastructure health is not monitored by Coder.
## When to use it?
Use external workspaces if:
- You have compute resources provisioned outside of Coders Terraform flows.
- You want to connect specialized or legacy systems to your Coder deployment.
- You are migrating incrementally to Coder and need hybrid support.
- You need finer control over how and where agents run, while still benefiting from Coders workspace experience.
## How to use it?
You can create and manage external workspaces using either the **CLI** or the **UI**.
<div class="tabs">
## CLI
1. **Create an external workspace**
```bash
coder external-workspaces create hello-world \
--template=externally-managed-workspace -y
```
- Validates that the template includes a `coder_external_agent` resource.
- Once created, the workspace is registered in Coder but marked as requiring an external agent.
2. **List external workspaces**
```bash
coder external-workspaces list
```
Example output:
```bash
WORKSPACE TEMPLATE STATUS HEALTHY LAST BUILT CURRENT VERSION OUTDATED
hello-world externally-managed-workspace Started true 15m happy_mendel9 false
```
3. **Retrieve agent connection instructions**
Use this command to query the script you must run on the external machine:
```bash
coder external-workspaces agent-instructions hello-world
```
Example:
```bash
Please run the following command to attach external agent to the workspace hello-world:
curl -fsSL "https://<DEPLOYMENT_URL>/api/v2/init-script/linux/amd64" | CODER_AGENT_TOKEN="<token>" sh
```
You can also output JSON for automation:
```bash
coder external-workspaces agent-instructions hello-world --output=json
```
```json
{
"workspace_name": "hello-world",
"agent_name": "main",
"auth_type": "token",
"auth_token": "<token>",
"init_script": "curl -fsSL \"https://<DEPLOYMENT_URL>/api/v2/init-script/linux/arm64\" | CODER_AGENT_TOKEN=\"<token>\" sh"
}
```
## UI
1. Import the external workspace template (see prerequisites).
2. In the Coder UI, go to **Workspaces → New workspace** and select the imported template.
3. Once the workspace is created, Coder will display **connection details** with the command users need to run on the external machine to start the agent.
4. The workspace will appear in the dashboard, but with the following differences:
- **Start**, **Stop**, and **Restart** actions are disabled.
- Users are provided with instructions for launching the agent manually on the external machine.
![External Workspace View](../../../images/admin/templates/external-workspace.png)
</div>
Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

+6
View File
@@ -537,6 +537,12 @@
"title": "Workspace Scheduling",
"description": "Learn how to control how workspaces are started and stopped",
"path": "./admin/templates/managing-templates/schedule.md"
},
{
"title": "External Workspaces",
"description": "Learn how to manage external workspaces",
"path": "./admin/templates/managing-templates/external-workspaces.md",
"state": ["premium", "early access"]
}
]
},
+4 -4
View File
@@ -984,10 +984,10 @@ func (api *API) CheckBuildUsage(ctx context.Context, store database.Store, templ
// This check is intentionally not committed to the database. It's fine if
// it's not 100% accurate or allows for minor breaches due to build races.
// nolint:gocritic // Requires permission to read all workspaces to read managed agent count.
managedAgentCount, err := store.GetManagedAgentCount(agpldbauthz.AsSystemRestricted(ctx), database.GetManagedAgentCountParams{
StartTime: managedAgentLimit.UsagePeriod.Start,
EndTime: managedAgentLimit.UsagePeriod.End,
// nolint:gocritic // Requires permission to read all usage events.
managedAgentCount, err := store.GetTotalUsageDCManagedAgentsV1(agpldbauthz.AsSystemRestricted(ctx), database.GetTotalUsageDCManagedAgentsV1Params{
StartDate: managedAgentLimit.UsagePeriod.Start,
EndDate: managedAgentLimit.UsagePeriod.End,
})
if err != nil {
return wsbuilder.UsageCheckResponse{}, xerrors.Errorf("get managed agent count: %w", err)
@@ -5,6 +5,7 @@ import (
"crypto/ed25519"
"crypto/rand"
"crypto/tls"
"database/sql"
"io"
"net/http"
"os/exec"
@@ -23,10 +24,13 @@ import (
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/prebuilds"
"github.com/coder/coder/v2/coderd/util/ptr"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/drpcsdk"
"github.com/coder/coder/v2/enterprise/coderd"
"github.com/coder/coder/v2/enterprise/coderd/license"
entprebuilds "github.com/coder/coder/v2/enterprise/coderd/prebuilds"
"github.com/coder/coder/v2/enterprise/dbcrypt"
"github.com/coder/coder/v2/provisioner/echo"
"github.com/coder/coder/v2/provisioner/terraform"
@@ -446,3 +450,98 @@ func newExternalProvisionerDaemon(t testing.TB, client *codersdk.Client, org uui
return closer
}
func GetRunningPrebuilds(
ctx context.Context,
t *testing.T,
db database.Store,
desiredPrebuilds int,
) []database.GetRunningPrebuiltWorkspacesRow {
t.Helper()
var runningPrebuilds []database.GetRunningPrebuiltWorkspacesRow
testutil.Eventually(ctx, t, func(context.Context) bool {
prebuiltWorkspaces, err := db.GetRunningPrebuiltWorkspaces(ctx)
assert.NoError(t, err, "failed to get running prebuilds")
for _, prebuild := range prebuiltWorkspaces {
runningPrebuilds = append(runningPrebuilds, prebuild)
agents, err := db.GetWorkspaceAgentsInLatestBuildByWorkspaceID(ctx, prebuild.ID)
assert.NoError(t, err, "failed to get agents")
// Manually mark all agents as ready since tests don't have real agent processes
// that would normally report their lifecycle state. Prebuilt workspaces are only
// eligible for claiming when their agents reach the "ready" state.
for _, agent := range agents {
err = db.UpdateWorkspaceAgentLifecycleStateByID(ctx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
ID: agent.ID,
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
StartedAt: sql.NullTime{Time: time.Now().Add(time.Hour), Valid: true},
ReadyAt: sql.NullTime{Time: time.Now().Add(-1 * time.Hour), Valid: true},
})
assert.NoError(t, err, "failed to update agent")
}
}
t.Logf("found %d running prebuilds so far, want %d", len(runningPrebuilds), desiredPrebuilds)
return len(runningPrebuilds) == desiredPrebuilds
}, testutil.IntervalSlow, "found %d running prebuilds, expected %d", len(runningPrebuilds), desiredPrebuilds)
return runningPrebuilds
}
func MustRunReconciliationLoopForPreset(
ctx context.Context,
t *testing.T,
db database.Store,
reconciler *entprebuilds.StoreReconciler,
preset codersdk.Preset,
) []*prebuilds.ReconciliationActions {
t.Helper()
state, err := reconciler.SnapshotState(ctx, db)
require.NoError(t, err)
ps, err := state.FilterByPreset(preset.ID)
require.NoError(t, err)
require.NotNil(t, ps)
actions, err := reconciler.CalculateActions(ctx, *ps)
require.NoError(t, err)
require.NotNil(t, actions)
require.NoError(t, reconciler.ReconcilePreset(ctx, *ps))
return actions
}
func MustClaimPrebuild(
ctx context.Context,
t *testing.T,
client *codersdk.Client,
userClient *codersdk.Client,
username string,
version codersdk.TemplateVersion,
presetID uuid.UUID,
autostartSchedule ...string,
) codersdk.Workspace {
t.Helper()
var startSchedule string
if len(autostartSchedule) > 0 {
startSchedule = autostartSchedule[0]
}
workspaceName := strings.ReplaceAll(testutil.GetRandomName(t), "_", "-")
userWorkspace, err := userClient.CreateUserWorkspace(ctx, username, codersdk.CreateWorkspaceRequest{
TemplateVersionID: version.ID,
Name: workspaceName,
TemplateVersionPresetID: presetID,
AutostartSchedule: ptr.Ref(startSchedule),
})
require.NoError(t, err)
build := coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, userWorkspace.LatestBuild.ID)
require.Equal(t, build.Job.Status, codersdk.ProvisionerJobSucceeded)
workspace := coderdtest.MustWorkspace(t, client, userWorkspace.ID)
require.Equal(t, codersdk.WorkspaceTransitionStart, workspace.LatestBuild.Transition)
return workspace
}
+126 -44
View File
@@ -6,6 +6,7 @@ import (
"database/sql"
"fmt"
"math"
"sort"
"time"
"github.com/golang-jwt/jwt/v4"
@@ -95,17 +96,6 @@ func Entitlements(
return codersdk.Entitlements{}, xerrors.Errorf("query active user count: %w", err)
}
// nolint:gocritic // Getting external workspaces is a system function.
externalWorkspaces, err := db.GetWorkspaces(dbauthz.AsSystemRestricted(ctx), database.GetWorkspacesParams{
HasExternalAgent: sql.NullBool{
Bool: true,
Valid: true,
},
})
if err != nil {
return codersdk.Entitlements{}, xerrors.Errorf("query external workspaces: %w", err)
}
// nolint:gocritic // Getting external templates is a system function.
externalTemplates, err := db.GetTemplatesWithFilter(dbauthz.AsSystemRestricted(ctx), database.GetTemplatesWithFilterParams{
HasExternalAgent: sql.NullBool{
@@ -118,16 +108,24 @@ func Entitlements(
}
entitlements, err := LicensesEntitlements(ctx, now, licenses, enablements, keys, FeatureArguments{
ActiveUserCount: activeUserCount,
ReplicaCount: replicaCount,
ExternalAuthCount: externalAuthCount,
ExternalWorkspaceCount: int64(len(externalWorkspaces)),
ExternalTemplateCount: int64(len(externalTemplates)),
ActiveUserCount: activeUserCount,
ReplicaCount: replicaCount,
ExternalAuthCount: externalAuthCount,
ExternalTemplateCount: int64(len(externalTemplates)),
ManagedAgentCountFn: func(ctx context.Context, startTime time.Time, endTime time.Time) (int64, error) {
// This is not super accurate, as the start and end times will be
// truncated to the date in UTC timezone. This is an optimization
// so we can use an aggregate table instead of scanning the usage
// events table.
//
// High accuracy is not super necessary, as we give buffers in our
// licenses (e.g. higher hard limit) to account for additional
// usage.
//
// nolint:gocritic // Requires permission to read all workspaces to read managed agent count.
return db.GetManagedAgentCount(dbauthz.AsSystemRestricted(ctx), database.GetManagedAgentCountParams{
StartTime: startTime,
EndTime: endTime,
return db.GetTotalUsageDCManagedAgentsV1(dbauthz.AsSystemRestricted(ctx), database.GetTotalUsageDCManagedAgentsV1Params{
StartDate: startTime,
EndDate: endTime,
})
},
})
@@ -139,11 +137,10 @@ func Entitlements(
}
type FeatureArguments struct {
ActiveUserCount int64
ReplicaCount int
ExternalAuthCount int
ExternalWorkspaceCount int64
ExternalTemplateCount int64
ActiveUserCount int64
ReplicaCount int
ExternalAuthCount int
ExternalTemplateCount int64
// Unfortunately, managed agent count is not a simple count of the current
// state of the world, but a count between two points in time determined by
// the licenses.
@@ -192,6 +189,13 @@ func LicensesEntitlements(
})
}
// nextLicenseValidityPeriod holds the current or next contiguous period
// where there will be at least one active license. This is used for
// generating license expiry warnings. Previously we would generate licenses
// expiry warnings for each license, but it means that the warning will show
// even if you've loaded up a new license that doesn't have any gap.
nextLicenseValidityPeriod := &licenseValidityPeriod{}
// TODO: License specific warnings and errors should be tied to the license, not the
// 'Entitlements' group as a whole.
for _, license := range licenses {
@@ -201,6 +205,17 @@ func LicensesEntitlements(
// The license isn't valid yet. We don't consider any entitlements contained in it, but
// it's also not an error. Just skip it silently. This can happen if an administrator
// uploads a license for a new term that hasn't started yet.
//
// We still want to factor this into our validity period, though.
// This ensures we can suppress license expiry warnings for expiring
// licenses while a new license is ready to take its place.
//
// claims is nil, so reparse the claims with the IgnoreNbf function.
claims, err = ParseClaimsIgnoreNbf(license.JWT, keys)
if err != nil {
continue
}
nextLicenseValidityPeriod.ApplyClaims(claims)
continue
}
if err != nil {
@@ -209,6 +224,10 @@ func LicensesEntitlements(
continue
}
// Obviously, valid licenses should be considered for the license
// validity period.
nextLicenseValidityPeriod.ApplyClaims(claims)
usagePeriodStart := claims.NotBefore.Time // checked not-nil when validating claims
usagePeriodEnd := claims.ExpiresAt.Time // checked not-nil when validating claims
if usagePeriodStart.After(usagePeriodEnd) {
@@ -237,10 +256,6 @@ func LicensesEntitlements(
entitlement = codersdk.EntitlementGracePeriod
}
// Will add a warning if the license is expiring soon.
// This warning can be raised multiple times if there is more than 1 license.
licenseExpirationWarning(&entitlements, now, claims)
// 'claims.AllFeature' is the legacy way to set 'claims.FeatureSet = codersdk.FeatureSetEnterprise'
// If both are set, ignore the legacy 'claims.AllFeature'
if claims.AllFeatures && claims.FeatureSet == "" {
@@ -405,6 +420,10 @@ func LicensesEntitlements(
// Now the license specific warnings and errors are added to the entitlements.
// Add a single warning if we are currently in the license validity period
// and it's expiring soon.
nextLicenseValidityPeriod.LicenseExpirationWarning(&entitlements, now)
// If HA is enabled, ensure the feature is entitled.
if featureArguments.ReplicaCount > 1 {
feature := entitlements.Features[codersdk.FeatureHighAvailability]
@@ -445,18 +464,6 @@ func LicensesEntitlements(
}
}
if featureArguments.ExternalWorkspaceCount > 0 {
feature := entitlements.Features[codersdk.FeatureWorkspaceExternalAgent]
switch feature.Entitlement {
case codersdk.EntitlementNotEntitled:
entitlements.Errors = append(entitlements.Errors,
"You have external workspaces but your license is not entitled to this feature.")
case codersdk.EntitlementGracePeriod:
entitlements.Warnings = append(entitlements.Warnings,
"You have external workspaces but your license is expired.")
}
}
if featureArguments.ExternalTemplateCount > 0 {
feature := entitlements.Features[codersdk.FeatureWorkspaceExternalAgent]
switch feature.Entitlement {
@@ -742,10 +749,85 @@ func keyFunc(keys map[string]ed25519.PublicKey) func(*jwt.Token) (interface{}, e
}
}
// licenseExpirationWarning adds a warning message if the license is expiring soon.
func licenseExpirationWarning(entitlements *codersdk.Entitlements, now time.Time, claims *Claims) {
// Add warning if license is expiring soon
daysToExpire := int(math.Ceil(claims.LicenseExpires.Sub(now).Hours() / 24))
// licenseValidityPeriod keeps track of all license validity periods, and
// generates warnings over contiguous periods across multiple licenses.
//
// Note: this does not track the actual entitlements of each license to ensure
// newer licenses cover the same features as older licenses before merging. It
// is assumed that all licenses cover the same features.
type licenseValidityPeriod struct {
// parts contains all tracked license periods prior to merging.
parts [][2]time.Time
}
// ApplyClaims tracks a license validity period. This should only be called with
// valid (including not-yet-valid), unexpired licenses.
func (p *licenseValidityPeriod) ApplyClaims(claims *Claims) {
if claims == nil || claims.NotBefore == nil || claims.LicenseExpires == nil {
// Bad data
return
}
p.Apply(claims.NotBefore.Time, claims.LicenseExpires.Time)
}
// Apply adds a license validity period.
func (p *licenseValidityPeriod) Apply(start, end time.Time) {
if end.Before(start) {
// Bad data
return
}
p.parts = append(p.parts, [2]time.Time{start, end})
}
// merged merges the license validity periods into contiguous blocks, and sorts
// the merged blocks.
func (p *licenseValidityPeriod) merged() [][2]time.Time {
if len(p.parts) == 0 {
return nil
}
// Sort the input periods by start time.
sorted := make([][2]time.Time, len(p.parts))
copy(sorted, p.parts)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i][0].Before(sorted[j][0])
})
out := make([][2]time.Time, 0, len(sorted))
cur := sorted[0]
for i := 1; i < len(sorted); i++ {
next := sorted[i]
// If the current period's end time is before or equal to the next
// period's start time, they should be merged.
if !next[0].After(cur[1]) {
// Pick the maximum end time.
if next[1].After(cur[1]) {
cur[1] = next[1]
}
continue
}
// They don't overlap, so commit the current period and start a new one.
out = append(out, cur)
cur = next
}
// Commit the final period.
out = append(out, cur)
return out
}
// LicenseExpirationWarning adds a warning message if we are currently in the
// license validity period and it's expiring soon.
func (p *licenseValidityPeriod) LicenseExpirationWarning(entitlements *codersdk.Entitlements, now time.Time) {
merged := p.merged()
if len(merged) == 0 {
// No licenses
return
}
end := merged[0][1]
daysToExpire := int(math.Ceil(end.Sub(now).Hours() / 24))
showWarningDays := 30
isTrial := entitlements.Trial
if isTrial {
@@ -0,0 +1,140 @@
package license
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNextLicenseValidityPeriod(t *testing.T) {
t.Parallel()
t.Run("Apply", func(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
licensePeriods [][2]time.Time
expectedPeriods [][2]time.Time
}{
{
name: "None",
licensePeriods: [][2]time.Time{},
expectedPeriods: [][2]time.Time{},
},
{
name: "One",
licensePeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC)},
},
expectedPeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC)},
},
},
{
name: "TwoOverlapping",
licensePeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 4, 0, 0, 0, 0, time.UTC)},
},
expectedPeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 4, 0, 0, 0, 0, time.UTC)},
},
},
{
name: "TwoNonOverlapping",
licensePeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 4, 0, 0, 0, 0, time.UTC)},
},
expectedPeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 4, 0, 0, 0, 0, time.UTC)},
},
},
{
name: "ThreeOverlapping",
licensePeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 5, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 4, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 6, 0, 0, 0, 0, time.UTC)},
},
expectedPeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 6, 0, 0, 0, 0, time.UTC)},
},
},
{
name: "ThreeNonOverlapping",
licensePeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 4, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 5, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 6, 0, 0, 0, 0, time.UTC)},
},
expectedPeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 4, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 5, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 6, 0, 0, 0, 0, time.UTC)},
},
},
{
name: "PeriodContainsAnotherPeriod",
licensePeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 8, 0, 0, 0, 0, time.UTC)},
{time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 6, 0, 0, 0, 0, time.UTC)},
},
expectedPeriods: [][2]time.Time{
{time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 8, 0, 0, 0, 0, time.UTC)},
},
},
{
name: "EndBeforeStart",
licensePeriods: [][2]time.Time{
{time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)},
},
expectedPeriods: nil,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// Test with all possible permutations of the periods to ensure
// consistency regardless of the order.
ps := permutations(tc.licensePeriods)
for _, p := range ps {
t.Logf("permutation: %v", p)
period := &licenseValidityPeriod{}
for _, times := range p {
t.Logf("applying %v", times)
period.Apply(times[0], times[1])
}
assert.Equal(t, tc.expectedPeriods, period.merged(), "merged")
}
})
}
})
}
func permutations[T any](arr []T) [][]T {
var res [][]T
var helper func([]T, int)
helper = func(a []T, i int) {
if i == len(a)-1 {
// make a copy before appending
tmp := make([]T, len(a))
copy(tmp, a)
res = append(res, tmp)
return
}
for j := i; j < len(a); j++ {
a[i], a[j] = a[j], a[i]
helper(a, i+1)
a[i], a[j] = a[j], a[i] // backtrack
}
}
helper(arr, 0)
return res
}
+124 -20
View File
@@ -180,6 +180,121 @@ func TestEntitlements(t *testing.T) {
)
})
t.Run("Expiration warning suppressed if new license covers gap", func(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
// Insert the expiring license
graceDate := dbtime.Now().AddDate(0, 0, 1)
_, err := db.InsertLicense(context.Background(), database.InsertLicenseParams{
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureUserLimit: 100,
codersdk.FeatureAuditLog: 1,
},
FeatureSet: codersdk.FeatureSetPremium,
GraceAt: graceDate,
ExpiresAt: dbtime.Now().AddDate(0, 0, 5),
}),
Exp: time.Now().AddDate(0, 0, 5),
})
require.NoError(t, err)
// Warning should be generated.
entitlements, err := license.Entitlements(context.Background(), db, 1, 1, coderdenttest.Keys, all)
require.NoError(t, err)
require.True(t, entitlements.HasLicense)
require.False(t, entitlements.Trial)
require.Equal(t, codersdk.EntitlementEntitled, entitlements.Features[codersdk.FeatureAuditLog].Entitlement)
require.Len(t, entitlements.Warnings, 1)
require.Contains(t, entitlements.Warnings, "Your license expires in 1 day.")
// Insert the new, not-yet-valid license that starts BEFORE the expiring
// license expires.
_, err = db.InsertLicense(context.Background(), database.InsertLicenseParams{
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureUserLimit: 100,
codersdk.FeatureAuditLog: 1,
},
FeatureSet: codersdk.FeatureSetPremium,
NotBefore: graceDate.Add(-time.Hour), // contiguous, and also in the future
GraceAt: dbtime.Now().AddDate(1, 0, 0),
ExpiresAt: dbtime.Now().AddDate(1, 0, 5),
}),
Exp: dbtime.Now().AddDate(1, 0, 5),
})
require.NoError(t, err)
// Warning should be suppressed.
entitlements, err = license.Entitlements(context.Background(), db, 1, 1, coderdenttest.Keys, all)
require.NoError(t, err)
require.True(t, entitlements.HasLicense)
require.False(t, entitlements.Trial)
require.Equal(t, codersdk.EntitlementEntitled, entitlements.Features[codersdk.FeatureAuditLog].Entitlement)
require.Len(t, entitlements.Warnings, 0) // suppressed
})
t.Run("Expiration warning not suppressed if new license has gap", func(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
// Insert the expiring license
graceDate := dbtime.Now().AddDate(0, 0, 1)
_, err := db.InsertLicense(context.Background(), database.InsertLicenseParams{
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureUserLimit: 100,
codersdk.FeatureAuditLog: 1,
},
FeatureSet: codersdk.FeatureSetPremium,
GraceAt: graceDate,
ExpiresAt: dbtime.Now().AddDate(0, 0, 5),
}),
Exp: time.Now().AddDate(0, 0, 5),
})
require.NoError(t, err)
// Should generate a warning.
entitlements, err := license.Entitlements(context.Background(), db, 1, 1, coderdenttest.Keys, all)
require.NoError(t, err)
require.True(t, entitlements.HasLicense)
require.False(t, entitlements.Trial)
require.Equal(t, codersdk.EntitlementEntitled, entitlements.Features[codersdk.FeatureAuditLog].Entitlement)
require.Len(t, entitlements.Warnings, 1)
require.Contains(t, entitlements.Warnings, "Your license expires in 1 day.")
// Insert the new, not-yet-valid license that starts AFTER the expiring
// license expires (e.g. there's a gap)
_, err = db.InsertLicense(context.Background(), database.InsertLicenseParams{
JWT: coderdenttest.GenerateLicense(t, coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureUserLimit: 100,
codersdk.FeatureAuditLog: 1,
},
FeatureSet: codersdk.FeatureSetPremium,
NotBefore: graceDate.Add(time.Minute), // gap of 1 second!
GraceAt: dbtime.Now().AddDate(1, 0, 0),
ExpiresAt: dbtime.Now().AddDate(1, 0, 5),
}),
Exp: dbtime.Now().AddDate(1, 0, 5),
})
require.NoError(t, err)
// Warning should still be generated.
entitlements, err = license.Entitlements(context.Background(), db, 1, 1, coderdenttest.Keys, all)
require.NoError(t, err)
require.True(t, entitlements.HasLicense)
require.False(t, entitlements.Trial)
require.Equal(t, codersdk.EntitlementEntitled, entitlements.Features[codersdk.FeatureAuditLog].Entitlement)
require.Len(t, entitlements.Warnings, 1)
require.Contains(t, entitlements.Warnings, "Your license expires in 1 day.")
})
t.Run("Expiration warning for trials", func(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
@@ -712,20 +827,22 @@ func TestEntitlements(t *testing.T) {
GetActiveUserCount(gomock.Any(), false).
Return(int64(1), nil)
mDB.EXPECT().
GetManagedAgentCount(gomock.Any(), gomock.Cond(func(params database.GetManagedAgentCountParams) bool {
// gomock doesn't seem to compare times very nicely.
if !assert.WithinDuration(t, licenseOpts.NotBefore, params.StartTime, time.Second) {
GetTotalUsageDCManagedAgentsV1(gomock.Any(), gomock.Cond(func(params database.GetTotalUsageDCManagedAgentsV1Params) bool {
// gomock doesn't seem to compare times very nicely, so check
// them manually.
//
// The query truncates these times to the date in UTC timezone,
// but we still check that we're passing in the correct
// timestamp in the first place.
if !assert.WithinDuration(t, licenseOpts.NotBefore, params.StartDate, time.Second) {
return false
}
if !assert.WithinDuration(t, licenseOpts.ExpiresAt, params.EndTime, time.Second) {
if !assert.WithinDuration(t, licenseOpts.ExpiresAt, params.EndDate, time.Second) {
return false
}
return true
})).
Return(int64(175), nil)
mDB.EXPECT().
GetWorkspaces(gomock.Any(), gomock.Any()).
Return([]database.GetWorkspacesRow{}, nil)
mDB.EXPECT().
GetTemplatesWithFilter(gomock.Any(), gomock.Any()).
Return([]database.Template{}, nil)
@@ -1116,19 +1233,6 @@ func TestLicenseEntitlements(t *testing.T) {
assert.Equal(t, int64(200), *feature.Actual)
},
},
{
Name: "ExternalWorkspace",
Licenses: []*coderdenttest.LicenseOptions{
enterpriseLicense().UserLimit(100),
},
Arguments: license.FeatureArguments{
ExternalWorkspaceCount: 1,
},
AssertEntitlements: func(t *testing.T, entitlements codersdk.Entitlements) {
assert.Equal(t, codersdk.EntitlementEntitled, entitlements.Features[codersdk.FeatureWorkspaceExternalAgent].Entitlement)
assert.True(t, entitlements.Features[codersdk.FeatureWorkspaceExternalAgent].Enabled)
},
},
{
Name: "ExternalTemplate",
Licenses: []*coderdenttest.LicenseOptions{
+1
View File
@@ -361,6 +361,7 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
},
api.NotificationsEnqueuer,
&api.AGPL.PrebuildsReconciler,
api.ProvisionerdServerMetrics,
)
if err != nil {
if !xerrors.Is(err, context.Canceled) {
+137
View File
@@ -26,6 +26,7 @@ import (
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/autobuild"
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbfake"
@@ -2873,6 +2874,142 @@ func TestPrebuildActivityBump(t *testing.T) {
require.Zero(t, workspace.LatestBuild.MaxDeadline)
}
func TestWorkspaceProvisionerdServerMetrics(t *testing.T) {
t.Parallel()
// Setup
clock := quartz.NewMock(t)
ctx := testutil.Context(t, testutil.WaitSuperLong)
db, pb := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
logger := testutil.Logger(t)
reg := prometheus.NewRegistry()
provisionerdserverMetrics := provisionerdserver.NewMetrics(logger)
err := provisionerdserverMetrics.Register(reg)
require.NoError(t, err)
client, _, api, owner := coderdenttest.NewWithAPI(t, &coderdenttest.Options{
Options: &coderdtest.Options{
Database: db,
Pubsub: pb,
IncludeProvisionerDaemon: true,
Clock: clock,
ProvisionerdServerMetrics: provisionerdserverMetrics,
},
})
// Setup Prebuild reconciler
cache := files.New(prometheus.NewRegistry(), &coderdtest.FakeAuthorizer{})
reconciler := prebuilds.NewStoreReconciler(
db, pb, cache,
codersdk.PrebuildsConfig{},
logger,
clock,
prometheus.NewRegistry(),
notifications.NewNoopEnqueuer(),
api.AGPL.BuildUsageChecker,
)
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer(db)
api.AGPL.PrebuildsClaimer.Store(&claimer)
organizationName, err := client.Organization(ctx, owner.OrganizationID)
require.NoError(t, err)
userClient, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleMember())
// Setup template and template version with a preset with 1 prebuild instance
versionPrebuild := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, templateWithAgentAndPresetsWithPrebuilds(1))
coderdtest.AwaitTemplateVersionJobCompleted(t, client, versionPrebuild.ID)
templatePrebuild := coderdtest.CreateTemplate(t, client, owner.OrganizationID, versionPrebuild.ID)
presetsPrebuild, err := client.TemplateVersionPresets(ctx, versionPrebuild.ID)
require.NoError(t, err)
require.Len(t, presetsPrebuild, 1)
// Setup template and template version with a preset without prebuild instances
versionNoPrebuild := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, templateWithAgentAndPresetsWithPrebuilds(0))
coderdtest.AwaitTemplateVersionJobCompleted(t, client, versionNoPrebuild.ID)
templateNoPrebuild := coderdtest.CreateTemplate(t, client, owner.OrganizationID, versionNoPrebuild.ID)
presetsNoPrebuild, err := client.TemplateVersionPresets(ctx, versionNoPrebuild.ID)
require.NoError(t, err)
require.Len(t, presetsNoPrebuild, 1)
// Given: no histogram value for prebuilt workspaces creation
prebuildCreationMetric := promhelp.MetricValue(t, reg, "coderd_workspace_creation_duration_seconds", prometheus.Labels{
"organization_name": organizationName.Name,
"template_name": templatePrebuild.Name,
"preset_name": presetsPrebuild[0].Name,
"type": "prebuild",
})
require.Nil(t, prebuildCreationMetric)
// Given: reconciliation loop runs and starts prebuilt workspace
coderdenttest.MustRunReconciliationLoopForPreset(ctx, t, db, reconciler, presetsPrebuild[0])
runningPrebuilds := coderdenttest.GetRunningPrebuilds(ctx, t, db, 1)
require.Len(t, runningPrebuilds, 1)
// Then: the histogram value for prebuilt workspace creation should be updated
prebuildCreationHistogram := promhelp.HistogramValue(t, reg, "coderd_workspace_creation_duration_seconds", prometheus.Labels{
"organization_name": organizationName.Name,
"template_name": templatePrebuild.Name,
"preset_name": presetsPrebuild[0].Name,
"type": "prebuild",
})
require.NotNil(t, prebuildCreationHistogram)
require.Equal(t, uint64(1), prebuildCreationHistogram.GetSampleCount())
// Given: a running prebuilt workspace, ready to be claimed
prebuild := coderdtest.MustWorkspace(t, client, runningPrebuilds[0].ID)
require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition)
require.Nil(t, prebuild.DormantAt)
require.Nil(t, prebuild.DeletingAt)
// Given: no histogram value for prebuilt workspaces claim
prebuildClaimMetric := promhelp.MetricValue(t, reg, "coderd_prebuilt_workspace_claim_duration_seconds", prometheus.Labels{
"organization_name": organizationName.Name,
"template_name": templatePrebuild.Name,
"preset_name": presetsPrebuild[0].Name,
})
require.Nil(t, prebuildClaimMetric)
// Given: the prebuilt workspace is claimed by a user
workspace := coderdenttest.MustClaimPrebuild(ctx, t, client, userClient, user.Username, versionPrebuild, presetsPrebuild[0].ID)
require.Equal(t, prebuild.ID, workspace.ID)
// Then: the histogram value for prebuilt workspace claim should be updated
prebuildClaimHistogram := promhelp.HistogramValue(t, reg, "coderd_prebuilt_workspace_claim_duration_seconds", prometheus.Labels{
"organization_name": organizationName.Name,
"template_name": templatePrebuild.Name,
"preset_name": presetsPrebuild[0].Name,
})
require.NotNil(t, prebuildClaimHistogram)
require.Equal(t, uint64(1), prebuildClaimHistogram.GetSampleCount())
// Given: no histogram value for regular workspaces creation
regularWorkspaceHistogramMetric := promhelp.MetricValue(t, reg, "coderd_workspace_creation_duration_seconds", prometheus.Labels{
"organization_name": organizationName.Name,
"template_name": templateNoPrebuild.Name,
"preset_name": presetsNoPrebuild[0].Name,
"type": "regular",
})
require.Nil(t, regularWorkspaceHistogramMetric)
// Given: a user creates a regular workspace (without prebuild pool)
regularWorkspace, err := client.CreateUserWorkspace(ctx, user.ID.String(), codersdk.CreateWorkspaceRequest{
TemplateVersionID: versionNoPrebuild.ID,
TemplateVersionPresetID: presetsNoPrebuild[0].ID,
Name: coderdtest.RandomUsername(t),
})
require.NoError(t, err)
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, regularWorkspace.LatestBuild.ID)
// Then: the histogram value for regular workspace creation should be updated
regularWorkspaceHistogram := promhelp.HistogramValue(t, reg, "coderd_workspace_creation_duration_seconds", prometheus.Labels{
"organization_name": organizationName.Name,
"template_name": templateNoPrebuild.Name,
"preset_name": presetsNoPrebuild[0].Name,
"type": "regular",
})
require.NotNil(t, regularWorkspaceHistogram)
require.Equal(t, uint64(1), regularWorkspaceHistogram.GetSampleCount())
}
// TestWorkspaceTemplateParamsChange tests a workspace with a parameter that
// validation changes on apply. The params used in create workspace are invalid
// according to the static params on import.
+39 -36
View File
@@ -36,7 +36,7 @@ replace github.com/tcnksm/go-httpstat => github.com/coder/go-httpstat v0.0.0-202
// There are a few minor changes we make to Tailscale that we're slowly upstreaming. Compare here:
// https://github.com/tailscale/tailscale/compare/main...coder:tailscale:main
replace tailscale.com => github.com/coder/tailscale v1.1.1-0.20250729141742-067f1e5d9716
replace tailscale.com => github.com/coder/tailscale v1.1.1-0.20250829055706-6eafe0f9199e
// This is replaced to include
// 1. a fix for a data race: c.f. https://github.com/tailscale/wireguard-go/pull/25
@@ -66,7 +66,7 @@ replace github.com/charmbracelet/bubbletea => github.com/coder/bubbletea v1.2.2-
// Trivy has some issues that we're floating patches for, and will hopefully
// be upstreamed eventually.
replace github.com/aquasecurity/trivy => github.com/coder/trivy v0.0.0-20250527170238-9416a59d7019
replace github.com/aquasecurity/trivy => github.com/coder/trivy v0.0.0-20250807211036-0bb0acd620a8
// afero/tarfs has a bug that breaks our usage. A PR has been submitted upstream.
// https://github.com/spf13/afero/pull/487
@@ -126,7 +126,7 @@ require (
github.com/go-jose/go-jose/v4 v4.1.1
github.com/go-logr/logr v1.4.3
github.com/go-playground/validator/v10 v10.27.0
github.com/gofrs/flock v0.12.0
github.com/gofrs/flock v0.12.1
github.com/gohugoio/hugo v0.148.1
github.com/golang-jwt/jwt/v4 v4.5.2
github.com/golang-migrate/migrate/v4 v4.18.1
@@ -158,7 +158,7 @@ require (
github.com/mocktools/go-smtp-mock/v2 v2.5.0
github.com/muesli/termenv v0.16.0
github.com/natefinch/atomic v1.0.1
github.com/open-policy-agent/opa v1.4.2
github.com/open-policy-agent/opa v1.6.0
github.com/ory/dockertest/v3 v3.12.0
github.com/pion/udp v0.1.4
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
@@ -170,12 +170,12 @@ require (
github.com/prometheus/common v0.65.0
github.com/quasilyte/go-ruleguard/dsl v0.3.22
github.com/robfig/cron/v3 v3.0.1
github.com/shirou/gopsutil/v4 v4.25.4
github.com/shirou/gopsutil/v4 v4.25.5
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
github.com/spf13/afero v1.14.0
github.com/spf13/pflag v1.0.6
github.com/spf13/pflag v1.0.7
github.com/sqlc-dev/pqtype v0.3.0
github.com/stretchr/testify v1.10.0
github.com/stretchr/testify v1.11.1
github.com/swaggo/http-swagger/v2 v2.0.1
github.com/swaggo/swag v1.16.2
github.com/tidwall/gjson v1.18.0
@@ -187,8 +187,8 @@ require (
go.mozilla.org/pkcs7 v0.9.0
go.nhat.io/otelsql v0.16.0
go.opentelemetry.io/otel v1.37.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
go.opentelemetry.io/otel/sdk v1.37.0
go.opentelemetry.io/otel/trace v1.37.0
go.uber.org/atomic v1.11.0
@@ -196,7 +196,7 @@ require (
go.uber.org/mock v0.6.0
go4.org/netipx v0.0.0-20230728180743-ad4cb58a6516
golang.org/x/crypto v0.41.0
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
golang.org/x/mod v0.27.0
golang.org/x/net v0.43.0
golang.org/x/oauth2 v0.30.0
@@ -225,7 +225,7 @@ require (
cloud.google.com/go/longrunning v0.6.7 // indirect
dario.cat/mergo v1.0.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/DataDog/appsec-internal-go v1.11.2 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.64.2 // indirect
github.com/DataDog/datadog-agent/pkg/proto v0.64.2 // indirect
@@ -244,7 +244,7 @@ require (
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/ProtonMail/go-crypto v1.1.6 // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/agnivade/levenshtein v1.2.1 // indirect
github.com/akutz/memconn v0.1.0 // indirect
@@ -274,7 +274,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bep/godartsass/v2 v2.5.0 // indirect
github.com/bep/golibsass v1.2.0 // indirect
github.com/bmatcuk/doublestar/v4 v4.8.1 // indirect
github.com/bmatcuk/doublestar/v4 v4.9.0 // indirect
github.com/charmbracelet/x/ansi v0.8.0 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/chromedp/sysutil v1.1.0 // indirect
@@ -284,14 +284,14 @@ require (
github.com/containerd/continuity v0.4.5 // indirect
github.com/coreos/go-iptables v0.6.0 // indirect
github.com/dlclark/regexp2 v1.11.5 // indirect
github.com/docker/cli v28.1.1+incompatible // indirect
github.com/docker/docker v28.1.1+incompatible // indirect
github.com/docker/cli v28.3.2+incompatible // indirect
github.com/docker/docker v28.3.3+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dop251/goja v0.0.0-20241024094426-79f3a7efcdbd // indirect
github.com/dustin/go-humanize v1.0.1
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
github.com/ebitengine/purego v0.8.3 // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/elastic/go-windows v1.0.0 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -308,7 +308,6 @@ require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-test/deep v1.1.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gobwas/httphead v0.1.0 // indirect
@@ -322,19 +321,18 @@ require (
github.com/google/btree v1.1.3 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/nftables v0.2.0 // indirect
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-cty v1.5.0 // indirect
github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/hashicorp/go-terraform-address v0.0.0-20240523040243-ccea9d309e0c
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/hcl v1.0.1-vault-7 // indirect
@@ -366,14 +364,14 @@ require (
github.com/mdlayher/sdnotify v1.0.0 // indirect
github.com/mdlayher/socket v0.5.0 // indirect
github.com/microcosm-cc/bluemonday v1.0.27
github.com/miekg/dns v1.1.57 // indirect
github.com/miekg/dns v1.1.58 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
@@ -445,7 +443,7 @@ require (
go.opentelemetry.io/contrib v1.19.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel/metric v1.37.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
@@ -461,10 +459,10 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
howett.net/plist v1.0.0 // indirect
kernel.org/pub/linux/libs/security/libcap/psx v1.2.73 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
sigs.k8s.io/yaml v1.5.0 // indirect
)
require github.com/coder/clistat v1.0.0
require github.com/coder/clistat v1.1.1
require github.com/SherClockHolmes/webpush-go v1.4.0
@@ -472,7 +470,7 @@ require (
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
github.com/go-json-experiment/json v0.0.0-20250725192818-e39067aee2d2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.3 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
)
@@ -481,10 +479,10 @@ require (
github.com/brianvoe/gofakeit/v7 v7.4.0
github.com/coder/agentapi-sdk-go v0.0.0-20250505131810-560d1d88d225
github.com/coder/aisdk-go v0.0.9
github.com/coder/preview v1.0.3
github.com/coder/preview v1.0.4
github.com/fsnotify/fsnotify v1.9.0
github.com/go-git/go-git/v5 v5.16.2
github.com/mark3labs/mcp-go v0.38.0
github.com/mark3labs/mcp-go v0.32.0
)
require (
@@ -501,12 +499,15 @@ require (
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect
github.com/Masterminds/semver/v3 v3.3.1 // indirect
github.com/alecthomas/chroma v0.10.0 // indirect
github.com/aquasecurity/go-version v0.0.1 // indirect
github.com/aquasecurity/trivy v0.58.2 // indirect
github.com/aquasecurity/iamgo v0.0.10 // indirect
github.com/aquasecurity/jfather v0.0.8 // indirect
github.com/aquasecurity/trivy v0.61.1-0.20250407075540-f1329c7ea1aa // indirect
github.com/aquasecurity/trivy-checks v1.11.3-0.20250604022615-9a7efa7c9169 // indirect
github.com/aws/aws-sdk-go v1.55.7 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf // indirect
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect
@@ -515,31 +516,33 @@ require (
github.com/esiqveland/notify v0.13.3 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.6.2 // indirect
github.com/google/go-containerregistry v0.20.6 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/hashicorp/go-getter v1.7.9 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/invopop/jsonschema v0.13.0 // indirect
github.com/jackmordaunt/icns/v3 v3.0.1 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
github.com/openai/openai-go v1.7.0 // indirect
github.com/package-url/packageurl-go v0.1.3 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/samber/lo v1.50.0 // indirect
github.com/samber/lo v1.51.0 // indirect
github.com/sergeymakinen/go-bmp v1.0.0 // indirect
github.com/sergeymakinen/go-ico v1.0.0-beta.0 // indirect
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
github.com/tidwall/sjson v1.2.5 // indirect
github.com/tmaxmax/go-sse v0.10.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
github.com/ulikunitz/xz v0.5.15 // indirect
github.com/vektah/gqlparser/v2 v2.5.28 // indirect
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
google.golang.org/genai v1.12.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
+89 -78
View File
@@ -624,8 +624,8 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum
git.sr.ht/~jackmordaunt/go-toast v1.1.2 h1:/yrfI55LRt1M7H1vkaw+NaH1+L1CDxrqDltwm5euVuE=
git.sr.ht/~jackmordaunt/go-toast v1.1.2/go.mod h1:jA4OqHKTQ4AFBdwrSnwnskUIIS3HYzlJSgdzCKqfavo=
git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69 h1:+tu3HOoMXB7RXEINRVIpxJCT+KdYiI7LAEAUrOw3dIU=
github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69/go.mod h1:L1AbZdiDllfyYH5l5OkAaZtk7VkWe89bPJFmnDBNHxg=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -687,8 +687,8 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/SherClockHolmes/webpush-go v1.4.0 h1:ocnzNKWN23T9nvHi6IfyrQjkIc0oJWv1B1pULsf9i3s=
github.com/SherClockHolmes/webpush-go v1.4.0/go.mod h1:XSq8pKX11vNV8MJEMwjrlTkxhAj1zKfxmyhdV7Pd6UA=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
@@ -715,6 +715,8 @@ github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3Uu
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/ammario/tlru v0.4.0 h1:sJ80I0swN3KOX2YxC6w8FbCqpQucWdbb+J36C05FPuU=
github.com/ammario/tlru v0.4.0/go.mod h1:aYzRFu0XLo4KavE9W8Lx7tzjkX+pAApz+NgcKYIFUBQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
@@ -737,6 +739,8 @@ github.com/aquasecurity/iamgo v0.0.10 h1:t/HG/MI1eSephztDc+Rzh/YfgEa+NqgYRSfr6pH
github.com/aquasecurity/iamgo v0.0.10/go.mod h1:GI9IQJL2a+C+V2+i3vcwnNKuIJXZ+HAfqxZytwy+cPk=
github.com/aquasecurity/jfather v0.0.8 h1:tUjPoLGdlkJU0qE7dSzd1MHk2nQFNPR0ZfF+6shaExE=
github.com/aquasecurity/jfather v0.0.8/go.mod h1:Ag+L/KuR/f8vn8okUi8Wc1d7u8yOpi2QTaGX10h71oY=
github.com/aquasecurity/trivy-checks v1.11.3-0.20250604022615-9a7efa7c9169 h1:TckzIxUX7lZaU9f2lNxCN0noYYP8fzmSQf6a4JdV83w=
github.com/aquasecurity/trivy-checks v1.11.3-0.20250604022615-9a7efa7c9169/go.mod h1:nT69xgRcBD4NlHwTBpWMYirpK5/Zpl8M+XDOgmjMn2k=
github.com/aquasecurity/trivy-iac v0.8.0 h1:NKFhk/BTwQ0jIh4t74V8+6UIGUvPlaxO9HPlSMQi3fo=
github.com/aquasecurity/trivy-iac v0.8.0/go.mod h1:ARiMeNqcaVWOXJmp8hmtMnNm/Jd836IOmDBUW5r4KEk=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
@@ -790,8 +794,6 @@ github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWp
github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bep/clocks v0.5.0 h1:hhvKVGLPQWRVsBP/UB7ErrHYIO42gINVbvqxvYTPVps=
@@ -822,8 +824,8 @@ github.com/bep/tmc v0.5.1 h1:CsQnSC6MsomH64gw0cT5f+EwQDcvZz4AazKunFwTpuI=
github.com/bep/tmc v0.5.1/go.mod h1:tGYHN8fS85aJPhDLgXETVKp+PR382OvFi2+q2GkGsq0=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38=
github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bmatcuk/doublestar/v4 v4.9.0 h1:DBvuZxjdKkRP/dr4GVV4w2fnmrk5Hxc90T51LZjv0JA=
github.com/bmatcuk/doublestar/v4 v4.9.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bool64/shared v0.1.5 h1:fp3eUhBsrSjNCQPcSdQqZxxh9bBwrYiZ+zOKFkM0/2E=
github.com/bool64/shared v0.1.5/go.mod h1:081yz68YC9jeFB3+Bbmno2RFWvGKv1lPKkMP6MHJlPs=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
@@ -832,14 +834,14 @@ github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZ
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/brianvoe/gofakeit/v7 v7.4.0 h1:Q7R44v1E9vkath1SxBqxXzhLnyOcGm/Ex3CQwjudJuI=
github.com/brianvoe/gofakeit/v7 v7.4.0/go.mod h1:QXuPeBw164PJCzCUZVmgpgHJ3Llj49jSLVkKPMtxtxA=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g=
github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
@@ -907,8 +909,8 @@ github.com/coder/aisdk-go v0.0.9 h1:Vzo/k2qwVGLTR10ESDeP2Ecek1SdPfZlEjtTfMveiVo=
github.com/coder/aisdk-go v0.0.9/go.mod h1:KF6/Vkono0FJJOtWtveh5j7yfNrSctVTpwgweYWSp5M=
github.com/coder/bubbletea v1.2.2-0.20241212190825-007a1cdb2c41 h1:SBN/DA63+ZHwuWwPHPYoCZ/KLAjHv5g4h2MS4f2/MTI=
github.com/coder/bubbletea v1.2.2-0.20241212190825-007a1cdb2c41/go.mod h1:I9ULxr64UaOSUv7hcb3nX4kowodJCVS7vt7VVJk/kW4=
github.com/coder/clistat v1.0.0 h1:MjiS7qQ1IobuSSgDnxcCSyBPESs44hExnh2TEqMcGnA=
github.com/coder/clistat v1.0.0/go.mod h1:F+gLef+F9chVrleq808RBxdaoq52R4VLopuLdAsh8Y4=
github.com/coder/clistat v1.1.1 h1:T45dlwr7fSmjLPGLk7QRKgynnDeMOPoraHSGtLIHY3s=
github.com/coder/clistat v1.1.1/go.mod h1:F+gLef+F9chVrleq808RBxdaoq52R4VLopuLdAsh8Y4=
github.com/coder/flog v1.1.0 h1:kbAes1ai8fIS5OeV+QAnKBQE22ty1jRF/mcAwHpLBa4=
github.com/coder/flog v1.1.0/go.mod h1:UQlQvrkJBvnRGo69Le8E24Tcl5SJleAAR7gYEHzAmdQ=
github.com/coder/glog v1.0.1-0.20220322161911-7365fe7f2cd1/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
@@ -922,8 +924,8 @@ github.com/coder/pq v1.10.5-0.20250807075151-6ad9b0a25151 h1:YAxwg3lraGNRwoQ18H7
github.com/coder/pq v1.10.5-0.20250807075151-6ad9b0a25151/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/coder/pretty v0.0.0-20230908205945-e89ba86370e0 h1:3A0ES21Ke+FxEM8CXx9n47SZOKOpgSE1bbJzlE4qPVs=
github.com/coder/pretty v0.0.0-20230908205945-e89ba86370e0/go.mod h1:5UuS2Ts+nTToAMeOjNlnHFkPahrtDkmpydBen/3wgZc=
github.com/coder/preview v1.0.3 h1:et0/frnLB68PPwsGaa1KAZQdBKBxNSqzMplYKsBpcNA=
github.com/coder/preview v1.0.3/go.mod h1:hQtBEqOFMJ3SHl9Q9pVvDA9CpeCEXBwbONNK29+3MLk=
github.com/coder/preview v1.0.4 h1:f506bnyhHtI3ICl/8Eb/gemcKvm/AGzQ91uyxjF+D9k=
github.com/coder/preview v1.0.4/go.mod h1:PpLayC3ngQQ0iUhW2yVRFszOooto4JrGGMomv1rqUvA=
github.com/coder/quartz v0.2.1 h1:QgQ2Vc1+mvzewg2uD/nj8MJ9p9gE+QhGJm+Z+NGnrSE=
github.com/coder/quartz v0.2.1/go.mod h1:vsiCc+AHViMKH2CQpGIpFgdHIEQsxwm8yCscqKmzbRA=
github.com/coder/retry v1.5.1 h1:iWu8YnD8YqHs3XwqrqsjoBTAVqT9ml6z9ViJ2wlMiqc=
@@ -932,14 +934,14 @@ github.com/coder/serpent v0.10.0 h1:ofVk9FJXSek+SmL3yVE3GoArP83M+1tX+H7S4t8BSuM=
github.com/coder/serpent v0.10.0/go.mod h1:cZFW6/fP+kE9nd/oRkEHJpG6sXCtQ+AX7WMMEHv0Y3Q=
github.com/coder/ssh v0.0.0-20231128192721-70855dedb788 h1:YoUSJ19E8AtuUFVYBpXuOD6a/zVP3rcxezNsoDseTUw=
github.com/coder/ssh v0.0.0-20231128192721-70855dedb788/go.mod h1:aGQbuCLyhRLMzZF067xc84Lh7JDs1FKwCmF1Crl9dxQ=
github.com/coder/tailscale v1.1.1-0.20250729141742-067f1e5d9716 h1:hi7o0sA+RPBq8Rvvz+hNrC/OTL2897OKREMIRIuQeTs=
github.com/coder/tailscale v1.1.1-0.20250729141742-067f1e5d9716/go.mod h1:l7ml5uu7lFh5hY28lGYM4b/oFSmuPHYX6uk4RAu23Lc=
github.com/coder/tailscale v1.1.1-0.20250829055706-6eafe0f9199e h1:9RKGKzGLHtTvVBQublzDGtCtal3cXP13diCHoAIGPeI=
github.com/coder/tailscale v1.1.1-0.20250829055706-6eafe0f9199e/go.mod h1:jU9T1vEs+DOs8NtGp1F2PT0/TOGVwtg/JCCKYRgvMOs=
github.com/coder/terraform-config-inspect v0.0.0-20250107175719-6d06d90c630e h1:JNLPDi2P73laR1oAclY6jWzAbucf70ASAvf5mh2cME0=
github.com/coder/terraform-config-inspect v0.0.0-20250107175719-6d06d90c630e/go.mod h1:Gz/z9Hbn+4KSp8A2FBtNszfLSdT2Tn/uAKGuVqqWmDI=
github.com/coder/terraform-provider-coder/v2 v2.10.0 h1:cGPMfARGHKb80kZsbDX/t/YKwMOwI5zkIyVCQziHR2M=
github.com/coder/terraform-provider-coder/v2 v2.10.0/go.mod h1:f8xPh0riDTRwqoPWkjas5VgIBaiRiWH+STb0TZw2fgY=
github.com/coder/trivy v0.0.0-20250527170238-9416a59d7019 h1:MHkv/W7l9eRAN9gOG0qZ1TLRGWIIfNi92273vPAQ8Fs=
github.com/coder/trivy v0.0.0-20250527170238-9416a59d7019/go.mod h1:eqk+w9RLBmbd/cB5XfPZFuVn77cf/A6fB7qmEVeSmXk=
github.com/coder/trivy v0.0.0-20250807211036-0bb0acd620a8 h1:VYB/6cIIKsVkwXOAWbqpj4Ux+WwF/XTnRyvHcwfHZ7A=
github.com/coder/trivy v0.0.0-20250807211036-0bb0acd620a8/go.mod h1:O73tP+UvJlI2GQZD060Jt0sf+6alKcGAgORh6sgB0+M=
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/coder/wgtunnel v0.1.13-0.20240522110300-ade90dfb2da0 h1:C2/eCr+r0a5Auuw3YOiSyLNHkdMtyCZHPFBx7syN4rk=
@@ -948,6 +950,10 @@ github.com/coder/wireguard-go v0.0.0-20240522052547-769cdd7f7818 h1:bNhUTaKl3q0b
github.com/coder/wireguard-go v0.0.0-20240522052547-769cdd7f7818/go.mod h1:fAlLM6hUgnf4Sagxn2Uy5Us0PBgOYWz+63HwHUVGEbw=
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E=
@@ -990,12 +996,13 @@ github.com/disintegration/gift v1.2.1 h1:Y005a1X4Z7Uc+0gLpSAsKhWi4qLtsdEcMIbbdvd
github.com/disintegration/gift v1.2.1/go.mod h1:Jh2i7f7Q2BM7Ezno3PhfezbR1xpUg9dUg3/RlKGr4HI=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I=
github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/cli v28.3.2+incompatible h1:mOt9fcLE7zaACbxW1GeS65RI67wIJrTnqS3hP2huFsY=
github.com/docker/cli v28.3.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -1008,8 +1015,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
github.com/ebitengine/purego v0.8.3 h1:K+0AjQp63JEZTEMZiwsI9g0+hAMNohwUOtY0RPGexmc=
github.com/ebitengine/purego v0.8.3/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/elastic/go-sysinfo v1.15.1 h1:zBmTnFEXxIQ3iwcQuk7MzaUotmKRp3OabbbWM8TdzIQ=
github.com/elastic/go-sysinfo v1.15.1/go.mod h1:jPSuTgXG+dhhh0GKIyI2Cso+w5lPJ5PvVqKlL8LV/Hk=
github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY=
@@ -1152,8 +1159,8 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyL
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
@@ -1169,8 +1176,8 @@ github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakr
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.12.0 h1:xHW8t8GPAiGtqz7KxiSqfOEXwpOaqhpYZrTE2MQBgXY=
github.com/gofrs/flock v0.12.0/go.mod h1:FirDy1Ing0mI2+kB6wk+vyyAH+e6xiE+EYA0jnzV9jc=
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
@@ -1194,8 +1201,8 @@ github.com/gohugoio/localescompressed v1.0.1/go.mod h1:jBF6q8D7a0vaEmcWPNcAjUZLJ
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.3 h1:kkGXqQOBSDDWRhWNXTFpqGSCMyh/PLnqUvMGJPDJDs0=
github.com/golang-jwt/jwt/v5 v5.2.3/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y=
github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
@@ -1265,6 +1272,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU=
github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y=
github.com/google/go-github/v43 v43.0.1-0.20220414155304-00e42332e405 h1:DdHws/YnnPrSywrjNYu2lEHqYHWp/LnEx56w59esd54=
github.com/google/go-github/v43 v43.0.1-0.20220414155304-00e42332e405/go.mod h1:4RgUDSnsxP19d65zJWqvqJ/poJxBCvmna50eXmIvoR8=
github.com/google/go-github/v61 v61.0.0 h1:VwQCBwhyE9JclCI+22/7mLB1PuU9eowCXKY5pNlu1go=
@@ -1299,8 +1308,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18=
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
@@ -1333,15 +1342,13 @@ github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hairyhenderson/go-codeowners v0.7.0 h1:s0W4wF8bdsBEjTWzwzSlsatSthWtTAF2xLgo4a4RwAo=
github.com/hairyhenderson/go-codeowners v0.7.0/go.mod h1:wUlNgQ3QjqC4z8DnM5nnCYVq/icpqXJyJOukKx5U8/Q=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -1363,8 +1370,8 @@ github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0U
github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0=
github.com/hashicorp/go-reap v0.0.0-20170704170343-bf58d8a43e7b h1:3GrpnZQBxcMj1gCXQLelfjCT1D5MPGTuGMKHVzSIH6A=
github.com/hashicorp/go-reap v0.0.0-20170704170343-bf58d8a43e7b/go.mod h1:qIFzeFcJU3OIFk/7JreWXcUjFmcCaeHTH9KoNyHYVCs=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
github.com/hashicorp/go-terraform-address v0.0.0-20240523040243-ccea9d309e0c h1:5v6L/m/HcAZYbrLGYBpPkcCVtDWwIgFxq2+FUmfPxPk=
@@ -1419,8 +1426,6 @@ github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwso
github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE=
github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA=
github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI=
github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E=
github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
github.com/jackmordaunt/icns/v3 v3.0.1 h1:xxot6aNuGrU+lNgxz5I5H0qSeCjNKp8uTXB1j8D4S3o=
github.com/jackmordaunt/icns/v3 v3.0.1/go.mod h1:5sHL59nqTd2ynTnowxB/MDQFhKNqkK8X687uKNygaSQ=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
@@ -1511,8 +1516,8 @@ github.com/makeworld-the-better-one/dither/v2 v2.4.0 h1:Az/dYXiTcwcRSe59Hzw4RI1r
github.com/makeworld-the-better-one/dither/v2 v2.4.0/go.mod h1:VBtN8DXO7SNtyGmLiGA7IsFeKrBkQPze1/iAeM95arc=
github.com/marekm4/color-extractor v1.2.1 h1:3Zb2tQsn6bITZ8MBVhc33Qn1k5/SEuZ18mrXGUqIwn0=
github.com/marekm4/color-extractor v1.2.1/go.mod h1:90VjmiHI6M8ez9eYUaXLdcKnS+BAOp7w+NpwBdkJmpA=
github.com/mark3labs/mcp-go v0.38.0 h1:E5tmJiIXkhwlV0pLAwAT0O5ZjUZSISE/2Jxg+6vpq4I=
github.com/mark3labs/mcp-go v0.38.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g=
github.com/mark3labs/mcp-go v0.32.0 h1:fgwmbfL2gbd67obg57OfV2Dnrhs1HtSdlY/i5fn7MU8=
github.com/mark3labs/mcp-go v0.32.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -1544,8 +1549,8 @@ github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
@@ -1576,8 +1581,8 @@ github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/mocktools/go-smtp-mock/v2 v2.5.0 h1:0wUW3YhTHUO6SEqWczCHpLynwIfXieGtxpWJa44YVCM=
github.com/mocktools/go-smtp-mock/v2 v2.5.0/go.mod h1:h9AOf/IXLSU2m/1u4zsjtOM/WddPwdOUBz56dV9f81M=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -1619,8 +1624,8 @@ github.com/olekukonko/ll v0.0.8 h1:sbGZ1Fx4QxJXEqL/6IG8GEFnYojUSQ45dJVwN2FH2fc=
github.com/olekukonko/ll v0.0.8/go.mod h1:En+sEW0JNETl26+K8eZ6/W4UQ7CYSrrgg/EdIYT2H8g=
github.com/olekukonko/tablewriter v1.0.8 h1:f6wJzHg4QUtJdvrVPKco4QTrAylgaU0+b9br/lJxEiQ=
github.com/olekukonko/tablewriter v1.0.8/go.mod h1:H428M+HzoUXC6JU2Abj9IT9ooRmdq9CxuDmKMtrOCMs=
github.com/open-policy-agent/opa v1.4.2 h1:ag4upP7zMsa4WE2p1pwAFeG4Pn3mNwfAx9DLhhJfbjU=
github.com/open-policy-agent/opa v1.4.2/go.mod h1:DNzZPKqKh4U0n0ANxcCVlw8lCSv2c+h5G/3QvSYdWZ8=
github.com/open-policy-agent/opa v1.6.0 h1:/S/cnNQJ2MUMNzizHPbisTWBHowmLkPrugY5jjkPlRQ=
github.com/open-policy-agent/opa v1.6.0/go.mod h1:zFmw4P+W62+CWGYRDDswfVYSCnPo6oYaktQnfIaRFC4=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.120.1 h1:lK/3zr73guK9apbXTcnDnYrC0YCQ25V3CIULYz3k2xU=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.120.1/go.mod h1:01TvyaK8x640crO2iFwW/6CFCZgNsOvOGH3B5J239m0=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.120.1 h1:TCyOus9tym82PD1VYtthLKMVMlVyRwtDI4ck4SR2+Ok=
@@ -1641,6 +1646,8 @@ github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCy
github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=
github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
github.com/package-url/packageurl-go v0.1.3 h1:4juMED3hHiz0set3Vq3KeQ75KD1avthoXLtmE3I0PLs=
github.com/package-url/packageurl-go v0.1.3/go.mod h1:nKAWB8E6uk1MHqiS/lQb9pYBGH2+mdJ2PJc2s50dQY0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
@@ -1719,8 +1726,8 @@ github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0t
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
github.com/samber/lo v1.50.0 h1:XrG0xOeHs+4FQ8gJR97zDz5uOFMW7OwFWiFVzqopKgY=
github.com/samber/lo v1.50.0/go.mod h1:RjZyNk6WSnUFRKK6EyOhsRJMqft3G+pg7dCWHQCWvsc=
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM=
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
@@ -1731,8 +1738,8 @@ github.com/sergeymakinen/go-ico v1.0.0-beta.0 h1:m5qKH7uPKLdrygMWxbamVn+tl2HfiA3
github.com/sergeymakinen/go-ico v1.0.0-beta.0/go.mod h1:wQ47mTczswBO5F0NoDt7O0IXgnV4Xy3ojrroMQzyhUk=
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/shirou/gopsutil/v4 v4.25.4 h1:cdtFO363VEOOFrUCjZRh4XVJkb548lyF0q0uTeMqYPw=
github.com/shirou/gopsutil/v4 v4.25.4/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
github.com/shirou/gopsutil/v4 v4.25.5 h1:rtd9piuSMGeU8g1RMXjZs9y9luK5BwtnG7dZaQUJAsc=
github.com/shirou/gopsutil/v4 v4.25.5/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
@@ -1747,8 +1754,8 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE=
github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
github.com/sqlc-dev/pqtype v0.3.0 h1:b09TewZ3cSnO5+M1Kqq05y0+OjqIptxELaSayg7bmqk=
@@ -1771,8 +1778,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/swaggest/assertjson v1.9.0 h1:dKu0BfJkIxv/xe//mkCrK5yZbs79jL7OVf9Ija7o2xQ=
github.com/swaggest/assertjson v1.9.0/go.mod h1:b+ZKX2VRiUjxfUIal0HDN85W0nHPAYUbYH5WkkSsFsU=
github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw=
@@ -1803,10 +1810,10 @@ github.com/tdewolff/parse/v2 v2.8.1 h1:J5GSHru6o3jF1uLlEKVXkDxxcVx6yzOlIVIotK4w2
github.com/tdewolff/parse/v2 v2.8.1/go.mod h1:Hwlni2tiVNKyzR1o6nUs4FOF07URA+JLBLd6dlIXYqo=
github.com/tdewolff/test v1.0.11 h1:FdLbwQVHxqG16SlkGveC0JVyrJN62COWTRyUFzfbtBE=
github.com/tdewolff/test v1.0.11/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8=
github.com/testcontainers/testcontainers-go v0.37.0 h1:L2Qc0vkTw2EHWQ08djon0D2uw7Z/PtHS/QzZZ5Ra/hg=
github.com/testcontainers/testcontainers-go v0.37.0/go.mod h1:QPzbxZhQ6Bclip9igjLFj6z0hs01bU8lrl2dHQmgFGM=
github.com/testcontainers/testcontainers-go/modules/localstack v0.37.0 h1:nPuxUYseqS0eYJg7KDJd95PhoMhdpTnSNtkDLwWFngo=
github.com/testcontainers/testcontainers-go/modules/localstack v0.37.0/go.mod h1:Mw+N4qqJ5iWbg45yWsdLzICfeCEwvYNudfAHHFqCU8Q=
github.com/testcontainers/testcontainers-go v0.38.0 h1:d7uEapLcv2P8AvH8ahLqDMMxda2W9gQN1nRbHS28HBw=
github.com/testcontainers/testcontainers-go v0.38.0/go.mod h1:C52c9MoHpWO+C4aqmgSU+hxlR5jlEayWtgYrb8Pzz1w=
github.com/testcontainers/testcontainers-go/modules/localstack v0.38.0 h1:3ljIy6FmHtFhZsZwsaMIj/27nCRm0La7N/dl5Jou8AA=
github.com/testcontainers/testcontainers-go/modules/localstack v0.38.0/go.mod h1:BTsbqWC9huPV8Jg8k46Jz4x1oRAA9XGxneuuOOIrtKY=
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@@ -1834,14 +1841,16 @@ github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1
github.com/u-root/uio v0.0.0-20240209044354-b3d14b93376a h1:BH1SOPEvehD2kVrndDnGJiUF0TrBpNs+iyYocu6h0og=
github.com/u-root/uio v0.0.0-20240209044354-b3d14b93376a/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY=
github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/unrolled/secure v1.17.0 h1:Io7ifFgo99Bnh0J7+Q+qcMzWM6kaDPCA5FroFZEdbWU=
github.com/unrolled/secure v1.17.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.65.0 h1:j/u3uzFEGFfRxw79iYzJN+TteTJwbYkru9uDp3d0Yf8=
github.com/valyala/fasthttp v1.65.0/go.mod h1:P/93/YkKPMsKSnATEeELUCkG8a7Y+k99uxNHVbKINr4=
github.com/vektah/gqlparser/v2 v2.5.28 h1:bIulcl3LF69ba6EiZVGD88y4MkM+Jxrf3P2MX8xLRkY=
github.com/vektah/gqlparser/v2 v2.5.28/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo=
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
@@ -1860,8 +1869,6 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/wagslane/go-password-validator v0.3.0 h1:vfxOPzGHkz5S146HDpavl0cw1DSVP061Ry2PX0/ON6I=
github.com/wagslane/go-password-validator v0.3.0/go.mod h1:TI1XJ6T5fRdRnHqHt14pvy1tNVnrwe7m3/f1f2fDphQ=
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
@@ -1959,12 +1966,12 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 h1:6VjV6Et+1Hd2iLZEPtdV7vie80Yyqf7oikJLjQ/myi0=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0/go.mod h1:u8hcp8ji5gaM/RfcOo8z9NMnf1pVLfVY7lBY2VOGuUU=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI=
@@ -1982,8 +1989,8 @@ go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXe
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
@@ -1995,6 +2002,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8=
go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
go4.org/netipx v0.0.0-20230728180743-ad4cb58a6516 h1:X66ZEoMN2SuaoI/dfZVYobB6E5zjZyyHUMWlCA7MgGE=
@@ -2031,8 +2042,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -2750,8 +2761,8 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4=
k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA=
k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
kernel.org/pub/linux/libs/security/libcap/cap v1.2.73 h1:Th2b8jljYqkyZKS3aD3N9VpYsQpHuXLgea+SZUIfODA=
@@ -2798,8 +2809,8 @@ rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ=
sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4=
software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE=
software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ=
storj.io/drpc v0.0.33 h1:yCGZ26r66ZdMP0IcTYsj7WDAUIIjzXk6DJhbhvt9FHI=
+31
View File
@@ -715,6 +715,37 @@ coderd_workspace_latest_build_status{status="failed",template_name="docker",temp
coderd_workspace_builds_total{action="START",owner_email="admin@coder.com",status="failed",template_name="docker",template_version="gallant_wright0",workspace_name="test1"} 1
coderd_workspace_builds_total{action="START",owner_email="admin@coder.com",status="success",template_name="docker",template_version="gallant_wright0",workspace_name="test1"} 1
coderd_workspace_builds_total{action="STOP",owner_email="admin@coder.com",status="success",template_name="docker",template_version="gallant_wright0",workspace_name="test1"} 1
# HELP coderd_workspace_creation_total Total regular (non-prebuilt) workspace creations by organization, template, and preset.
# TYPE coderd_workspace_creation_total counter
coderd_workspace_creation_total{organization_name="{organization}",preset_name="",template_name="docker"} 1
# HELP coderd_workspace_creation_duration_seconds Time to create a workspace by organization, template, preset, and type (regular or prebuild).
# TYPE coderd_workspace_creation_duration_seconds histogram
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="1"} 0
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="10"} 1
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="30"} 1
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="60"} 1
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="300"} 1
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="600"} 1
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="1800"} 1
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="3600"} 1
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="+Inf"} 1
coderd_workspace_creation_duration_seconds_sum{organization_name="{organization}",preset_name="Falkenstein",template_name="template-example",type="prebuild"} 4.406214
coderd_workspace_creation_duration_seconds_count{organization_name="{organization}",preset_name="Falkenstein",template_name="template-example",type="prebuild"} 1
# HELP coderd_prebuilt_workspace_claim_duration_seconds Time to claim a prebuilt workspace by organization, template, and preset.
# TYPE coderd_prebuilt_workspace_claim_duration_seconds histogram
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="1"} 0
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="5"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="10"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="20"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="30"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="60"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="120"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="180"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="240"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="300"} 1
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="+Inf"} 1
coderd_prebuilt_workspace_claim_duration_seconds_sum{organization_name="{organization}",preset_name="Falkenstein",template_name="docker"} 4.860075
coderd_prebuilt_workspace_claim_duration_seconds_count{organization_name="{organization}",preset_name="Falkenstein",template_name="docker"} 1
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 2.4056e-05
@@ -30,7 +30,7 @@ export const RadioGroupItem = React.forwardRef<
<RadioGroupPrimitive.Item
ref={ref}
className={cn(
`aspect-square h-4 w-4 rounded-full border border-solid border-border text-content-primary bg-surface-primary
`relative aspect-square h-4 w-4 rounded-full border border-solid border-border text-content-primary bg-surface-primary
focus:outline-none focus-visible:ring-2 focus-visible:ring-content-link
focus-visible:ring-offset-4 focus-visible:ring-offset-surface-primary
disabled:cursor-not-allowed disabled:opacity-25 disabled:border-surface-invert-primary