Compare commits

...

11 Commits

Author SHA1 Message Date
Dean Sheather e0ebeebb29 chore: apply Dockerfile architecture fixes (#17601) 2025-04-29 09:34:51 -05:00
gcp-cherry-pick-bot[bot] dd50c4ecc9 fix(scripts/release): handle cherry-pick bot titles in check commit metadata (cherry-pick #17535) (#17537)
Co-authored-by: Mathias Fredriksson <mafredri@gmail.com>
2025-04-23 17:46:46 +05:00
gcp-cherry-pick-bot[bot] bda202f3f1 feat: add path & method labels to prometheus metrics (cherry-pick #17362) (#17416) 2025-04-18 21:37:19 +02:00
Michael Suchacz 0f27da0359 feat: extend request logs with auth & DB info and log long lived connections early (#17422) 2025-04-16 19:37:59 +02:00
gcp-cherry-pick-bot[bot] 7c4c5048bc chore: fix gpg forwarding test (cherry-pick #17355) (#17429)
Cherry-picked chore: fix gpg forwarding test (#17355)

Co-authored-by: Dean Sheather <dean@deansheather.com>
2025-04-16 19:11:13 +02:00
gcp-cherry-pick-bot[bot] 17dbb517ad chore: ignore commit metadata check in release script (cherry-pick #16495) (#16831)
Cherry-picked chore: ignore commit metadata check in release script
(#16495)

The `scripts/release/check_commit_metadata.sh` check was too strict for
our new cherry-picking process. This turns the error into a warning log.

Co-authored-by: Stephen Kirby <58410745+stirby@users.noreply.github.com>
2025-03-06 11:00:57 -08:00
Jon Ayers d31c994018 chore: upgrade terraform to 1.10.5 (#16519) (#16806)
- Updates `terraform` to
[v1.10.5](https://github.com/hashicorp/terraform/blob/v1.10.5/CHANGELOG.md#1105-january-22-2025)
- Updates provider to >=2.0.0 in provider testdata fixtures
- Fixes provider to required release version for resource monitors
- Fixes missing leading / in volumes in resource monitor tests ---------

---------

Co-authored-by: Colin Adler <colin1adler@gmail.com>
Co-authored-by: Cian Johnston <cian@coder.com>
2025-03-04 14:12:12 -08:00
gcp-cherry-pick-bot[bot] 552c4cd93d fix: handle undefined job while updating build progress (cherry-pick #16732) (#16741)
Cherry-picked fix: handle undefined job while updating build progress
(#16732)

Fixes: https://github.com/coder/coder/issues/15444

Co-authored-by: Marcin Tojek <mtojek@users.noreply.github.com>
2025-02-28 15:08:59 +05:00
gcp-cherry-pick-bot[bot] fb71cb5f96 fix: fix broken troubleshooting link (cherry-pick #16469) (#16472)
Co-authored-by: Marcin Tojek <mtojek@users.noreply.github.com>
fix: fix broken troubleshooting link (#16469)
Fixes: https://github.com/coder/coder/issues/16468
2025-02-06 14:51:07 +05:00
gcp-cherry-pick-bot[bot] 2f32b11831 fix(site): fix agent and web terminal troubleshooting links (cherry-pick #16353) (#16405)
Co-authored-by: M Atif Ali <atif@coder.com>
2025-02-04 12:53:51 +05:00
Stephen Kirby a9775fa3d5 chore: cherry-pick items for 2.19 (#16412)
Co-authored-by: Hugo Dutka <hugo@coder.com>
Co-authored-by: Danielle Maywood <danielle@themaywoods.com>
Co-authored-by: Edward Angert <EdwardAngert@users.noreply.github.com>
Co-authored-by: ケイラ <mckayla@hey.com>
2025-02-03 17:06:05 -06:00
150 changed files with 6167 additions and 1122 deletions
+1 -1
View File
@@ -7,5 +7,5 @@ runs:
- name: Install Terraform
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
with:
terraform_version: 1.9.8
terraform_version: 1.10.5
terraform_wrapper: false
+3
View File
@@ -78,3 +78,6 @@ result
# Zed
.zed_server
# dlv debug binaries for go tests
__debug_bin*
+6 -2
View File
@@ -563,8 +563,8 @@ GEN_FILES := \
site/e2e/provisionerGenerated.ts \
examples/examples.gen.json \
$(TAILNETTEST_MOCKS) \
coderd/database/pubsub/psmock/psmock.go
coderd/database/pubsub/psmock/psmock.go \
coderd/httpmw/loggermw/loggermock/loggermock.go
# all gen targets should be added here and to gen/mark-fresh
gen: gen/db $(GEN_FILES)
@@ -598,6 +598,7 @@ gen/mark-fresh:
examples/examples.gen.json \
$(TAILNETTEST_MOCKS) \
coderd/database/pubsub/psmock/psmock.go \
coderd/httpmw/loggermw/loggermock/loggermock.go \
"
for file in $$files; do
@@ -629,6 +630,9 @@ coderd/database/dbmock/dbmock.go: coderd/database/db.go coderd/database/querier.
coderd/database/pubsub/psmock/psmock.go: coderd/database/pubsub/pubsub.go
go generate ./coderd/database/pubsub/psmock
coderd/httpmw/loggermw/loggermock/loggermock.go: coderd/httpmw/loggermw/logger.go
go generate ./coderd/httpmw/loggermw/loggermock/
$(TAILNETTEST_MOCKS): tailnet/coordinator.go tailnet/service.go
go generate ./tailnet/tailnettest/
+4 -4
View File
@@ -120,7 +120,7 @@ func Agent(ctx context.Context, writer io.Writer, agentID uuid.UUID, opts AgentO
if agent.Status == codersdk.WorkspaceAgentTimeout {
now := time.Now()
sw.Log(now, codersdk.LogLevelInfo, "The workspace agent is having trouble connecting, wait for it to connect or restart your workspace.")
sw.Log(now, codersdk.LogLevelInfo, troubleshootingMessage(agent, fmt.Sprintf("%s/templates#agent-connection-issues", opts.DocsURL)))
sw.Log(now, codersdk.LogLevelInfo, troubleshootingMessage(agent, fmt.Sprintf("%s/admin/templates/troubleshooting#agent-connection-issues", opts.DocsURL)))
for agent.Status == codersdk.WorkspaceAgentTimeout {
if agent, err = fetch(); err != nil {
return xerrors.Errorf("fetch: %w", err)
@@ -225,13 +225,13 @@ func Agent(ctx context.Context, writer io.Writer, agentID uuid.UUID, opts AgentO
sw.Fail(stage, safeDuration(sw, agent.ReadyAt, agent.StartedAt))
// Use zero time (omitted) to separate these from the startup logs.
sw.Log(time.Time{}, codersdk.LogLevelWarn, "Warning: A startup script exited with an error and your workspace may be incomplete.")
sw.Log(time.Time{}, codersdk.LogLevelWarn, troubleshootingMessage(agent, fmt.Sprintf("%s/templates#startup-script-exited-with-an-error", opts.DocsURL)))
sw.Log(time.Time{}, codersdk.LogLevelWarn, troubleshootingMessage(agent, fmt.Sprintf("%s/admin/templates/troubleshooting#startup-script-exited-with-an-error", opts.DocsURL)))
default:
switch {
case agent.LifecycleState.Starting():
// Use zero time (omitted) to separate these from the startup logs.
sw.Log(time.Time{}, codersdk.LogLevelWarn, "Notice: The startup scripts are still running and your workspace may be incomplete.")
sw.Log(time.Time{}, codersdk.LogLevelWarn, troubleshootingMessage(agent, fmt.Sprintf("%s/templates#your-workspace-may-be-incomplete", opts.DocsURL)))
sw.Log(time.Time{}, codersdk.LogLevelWarn, troubleshootingMessage(agent, fmt.Sprintf("%s/admin/templates/troubleshooting#your-workspace-may-be-incomplete", opts.DocsURL)))
// Note: We don't complete or fail the stage here, it's
// intentionally left open to indicate this stage didn't
// complete.
@@ -253,7 +253,7 @@ func Agent(ctx context.Context, writer io.Writer, agentID uuid.UUID, opts AgentO
stage := "The workspace agent lost connection"
sw.Start(stage)
sw.Log(time.Now(), codersdk.LogLevelWarn, "Wait for it to reconnect or restart your workspace.")
sw.Log(time.Now(), codersdk.LogLevelWarn, troubleshootingMessage(agent, fmt.Sprintf("%s/templates#agent-connection-issues", opts.DocsURL)))
sw.Log(time.Now(), codersdk.LogLevelWarn, troubleshootingMessage(agent, fmt.Sprintf("%s/admin/templates/troubleshooting#agent-connection-issues", opts.DocsURL)))
disconnectedAt := agent.DisconnectedAt
for agent.Status == codersdk.WorkspaceAgentDisconnected {
+1 -1
View File
@@ -159,7 +159,7 @@ func (r *RootCmd) ping() *serpent.Command {
LocalNetInfo: ni,
Verbose: r.verbose,
PingP2P: didP2p,
TroubleshootingURL: appearanceConfig.DocsURL + "/networking/troubleshooting",
TroubleshootingURL: appearanceConfig.DocsURL + "/admin/networking/troubleshooting",
}
awsRanges, err := cliutil.FetchAWSIPRanges(diagCtx, cliutil.AWSIPRangesURL)
+35 -33
View File
@@ -781,40 +781,42 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
// This should be output before the logs start streaming.
cliui.Infof(inv.Stdout, "\n==> Logs will stream in below (press ctrl+c to gracefully exit):")
if vals.Telemetry.Enable {
vals, err := vals.WithoutSecrets()
if err != nil {
return xerrors.Errorf("remove secrets from deployment values: %w", err)
}
options.Telemetry, err = telemetry.New(telemetry.Options{
BuiltinPostgres: builtinPostgres,
DeploymentID: deploymentID,
Database: options.Database,
Logger: logger.Named("telemetry"),
URL: vals.Telemetry.URL.Value(),
Tunnel: tunnel != nil,
DeploymentConfig: vals,
ParseLicenseJWT: func(lic *telemetry.License) error {
// This will be nil when running in AGPL-only mode.
if options.ParseLicenseClaims == nil {
return nil
}
email, trial, err := options.ParseLicenseClaims(lic.JWT)
if err != nil {
return err
}
if email != "" {
lic.Email = &email
}
lic.Trial = &trial
deploymentConfigWithoutSecrets, err := vals.WithoutSecrets()
if err != nil {
return xerrors.Errorf("remove secrets from deployment values: %w", err)
}
telemetryReporter, err := telemetry.New(telemetry.Options{
Disabled: !vals.Telemetry.Enable.Value(),
BuiltinPostgres: builtinPostgres,
DeploymentID: deploymentID,
Database: options.Database,
Logger: logger.Named("telemetry"),
URL: vals.Telemetry.URL.Value(),
Tunnel: tunnel != nil,
DeploymentConfig: deploymentConfigWithoutSecrets,
ParseLicenseJWT: func(lic *telemetry.License) error {
// This will be nil when running in AGPL-only mode.
if options.ParseLicenseClaims == nil {
return nil
},
})
if err != nil {
return xerrors.Errorf("create telemetry reporter: %w", err)
}
defer options.Telemetry.Close()
}
email, trial, err := options.ParseLicenseClaims(lic.JWT)
if err != nil {
return err
}
if email != "" {
lic.Email = &email
}
lic.Trial = &trial
return nil
},
})
if err != nil {
return xerrors.Errorf("create telemetry reporter: %w", err)
}
defer telemetryReporter.Close()
if vals.Telemetry.Enable.Value() {
options.Telemetry = telemetryReporter
} else {
logger.Warn(ctx, fmt.Sprintf(`telemetry disabled, unable to notify of security issues. Read more: %s/admin/setup/telemetry`, vals.DocsURL.String()))
}
+168 -18
View File
@@ -39,6 +39,7 @@ import (
"tailscale.com/types/key"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/buildinfo"
"github.com/coder/coder/v2/cli"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/cli/config"
@@ -947,36 +948,40 @@ func TestServer(t *testing.T) {
t.Run("Telemetry", func(t *testing.T) {
t.Parallel()
deployment := make(chan struct{}, 64)
snapshot := make(chan *telemetry.Snapshot, 64)
r := chi.NewRouter()
r.Post("/deployment", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
deployment <- struct{}{}
})
r.Post("/snapshot", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
ss := &telemetry.Snapshot{}
err := json.NewDecoder(r.Body).Decode(ss)
require.NoError(t, err)
snapshot <- ss
})
server := httptest.NewServer(r)
defer server.Close()
telemetryServerURL, deployment, snapshot := mockTelemetryServer(t)
inv, _ := clitest.New(t,
inv, cfg := clitest.New(t,
"server",
"--in-memory",
"--http-address", ":0",
"--access-url", "http://example.com",
"--telemetry",
"--telemetry-url", server.URL,
"--telemetry-url", telemetryServerURL.String(),
"--cache-dir", t.TempDir(),
)
clitest.Start(t, inv)
<-deployment
<-snapshot
accessURL := waitAccessURL(t, cfg)
ctx := testutil.Context(t, testutil.WaitMedium)
client := codersdk.New(accessURL)
body, err := client.Request(ctx, http.MethodGet, "/", nil)
require.NoError(t, err)
require.NoError(t, body.Body.Close())
require.Eventually(t, func() bool {
snap := <-snapshot
htmlFirstServedFound := false
for _, item := range snap.TelemetryItems {
if item.Key == string(telemetry.TelemetryItemKeyHTMLFirstServedAt) {
htmlFirstServedFound = true
}
}
return htmlFirstServedFound
}, testutil.WaitMedium, testutil.IntervalFast, "no html_first_served telemetry item")
})
t.Run("Prometheus", func(t *testing.T) {
t.Parallel()
@@ -1990,3 +1995,148 @@ func TestServer_DisabledDERP(t *testing.T) {
err = c.Connect(ctx)
require.Error(t, err)
}
type runServerOpts struct {
waitForSnapshot bool
telemetryDisabled bool
waitForTelemetryDisabledCheck bool
}
func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
t.Parallel()
if !dbtestutil.WillUsePostgres() {
t.Skip("this test requires postgres")
}
telemetryServerURL, deployment, snapshot := mockTelemetryServer(t)
dbConnURL, err := dbtestutil.Open(t)
require.NoError(t, err)
cacheDir := t.TempDir()
runServer := func(t *testing.T, opts runServerOpts) (chan error, context.CancelFunc) {
ctx, cancelFunc := context.WithCancel(context.Background())
inv, _ := clitest.New(t,
"server",
"--postgres-url", dbConnURL,
"--http-address", ":0",
"--access-url", "http://example.com",
"--telemetry="+strconv.FormatBool(!opts.telemetryDisabled),
"--telemetry-url", telemetryServerURL.String(),
"--cache-dir", cacheDir,
"--log-filter", ".*",
)
finished := make(chan bool, 2)
errChan := make(chan error, 1)
pty := ptytest.New(t).Attach(inv)
go func() {
errChan <- inv.WithContext(ctx).Run()
finished <- true
}()
go func() {
defer func() {
finished <- true
}()
if opts.waitForSnapshot {
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "submitted snapshot")
}
if opts.waitForTelemetryDisabledCheck {
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "finished telemetry status check")
}
}()
<-finished
return errChan, cancelFunc
}
waitForShutdown := func(t *testing.T, errChan chan error) error {
t.Helper()
select {
case err := <-errChan:
return err
case <-time.After(testutil.WaitMedium):
t.Fatalf("timed out waiting for server to shutdown")
}
return nil
}
errChan, cancelFunc := runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
cancelFunc()
require.NoError(t, waitForShutdown(t, errChan))
// Since telemetry was disabled, we expect no deployments or snapshots.
require.Empty(t, deployment)
require.Empty(t, snapshot)
errChan, cancelFunc = runServer(t, runServerOpts{waitForSnapshot: true})
cancelFunc()
require.NoError(t, waitForShutdown(t, errChan))
// we expect to see a deployment and a snapshot twice:
// 1. the first pair is sent when the server starts
// 2. the second pair is sent when the server shuts down
for i := 0; i < 2; i++ {
select {
case <-snapshot:
case <-time.After(testutil.WaitShort / 2):
t.Fatalf("timed out waiting for snapshot")
}
select {
case <-deployment:
case <-time.After(testutil.WaitShort / 2):
t.Fatalf("timed out waiting for deployment")
}
}
errChan, cancelFunc = runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
cancelFunc()
require.NoError(t, waitForShutdown(t, errChan))
// Since telemetry is disabled, we expect no deployment. We expect a snapshot
// with the telemetry disabled item.
require.Empty(t, deployment)
select {
case ss := <-snapshot:
require.Len(t, ss.TelemetryItems, 1)
require.Equal(t, string(telemetry.TelemetryItemKeyTelemetryEnabled), ss.TelemetryItems[0].Key)
require.Equal(t, "false", ss.TelemetryItems[0].Value)
case <-time.After(testutil.WaitShort / 2):
t.Fatalf("timed out waiting for snapshot")
}
errChan, cancelFunc = runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
cancelFunc()
require.NoError(t, waitForShutdown(t, errChan))
// Since telemetry is disabled and we've already sent a snapshot, we expect no
// new deployments or snapshots.
require.Empty(t, deployment)
require.Empty(t, snapshot)
}
func mockTelemetryServer(t *testing.T) (*url.URL, chan *telemetry.Deployment, chan *telemetry.Snapshot) {
t.Helper()
deployment := make(chan *telemetry.Deployment, 64)
snapshot := make(chan *telemetry.Snapshot, 64)
r := chi.NewRouter()
r.Post("/deployment", func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, buildinfo.Version(), r.Header.Get(telemetry.VersionHeader))
dd := &telemetry.Deployment{}
err := json.NewDecoder(r.Body).Decode(dd)
require.NoError(t, err)
deployment <- dd
// Ensure the header is sent only after deployment is sent
w.WriteHeader(http.StatusAccepted)
})
r.Post("/snapshot", func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, buildinfo.Version(), r.Header.Get(telemetry.VersionHeader))
ss := &telemetry.Snapshot{}
err := json.NewDecoder(r.Body).Decode(ss)
require.NoError(t, err)
snapshot <- ss
// Ensure the header is sent only after snapshot is sent
w.WriteHeader(http.StatusAccepted)
})
server := httptest.NewServer(r)
t.Cleanup(server.Close)
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
return serverURL, deployment, snapshot
}
+3 -1
View File
@@ -1843,7 +1843,9 @@ Expire-Date: 0
tpty.WriteLine("gpg --list-keys && echo gpg-''-listkeys-command-done")
listKeysOutput := tpty.ExpectMatch("gpg--listkeys-command-done")
require.Contains(t, listKeysOutput, "[ultimate] Coder Test <test@coder.com>")
require.Contains(t, listKeysOutput, "[ultimate] Dean Sheather (work key) <dean@coder.com>")
// It's fine that this key is expired. We're just testing that the key trust
// gets synced properly.
require.Contains(t, listKeysOutput, "[ expired] Dean Sheather (work key) <dean@coder.com>")
// Try to sign something. This demonstrates that the forwarding is
// working as expected, since the workspace doesn't have access to the
+410
View File
@@ -3438,6 +3438,100 @@ const docTemplate = `{
}
}
},
"/organizations/{organization}/settings/idpsync/groups/config": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Enterprise"
],
"summary": "Update group IdP Sync config",
"operationId": "update-group-idp-sync-config",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID or name",
"name": "organization",
"in": "path",
"required": true
},
{
"description": "New config values",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchGroupIDPSyncConfigRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.GroupSyncSettings"
}
}
}
}
},
"/organizations/{organization}/settings/idpsync/groups/mapping": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Enterprise"
],
"summary": "Update group IdP Sync mapping",
"operationId": "update-group-idp-sync-mapping",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID or name",
"name": "organization",
"in": "path",
"required": true
},
{
"description": "Description of the mappings to add and remove",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchGroupIDPSyncMappingRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.GroupSyncSettings"
}
}
}
}
},
"/organizations/{organization}/settings/idpsync/roles": {
"get": {
"security": [
@@ -3518,6 +3612,100 @@ const docTemplate = `{
}
}
},
"/organizations/{organization}/settings/idpsync/roles/config": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Enterprise"
],
"summary": "Update role IdP Sync config",
"operationId": "update-role-idp-sync-config",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID or name",
"name": "organization",
"in": "path",
"required": true
},
{
"description": "New config values",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchRoleIDPSyncConfigRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.RoleSyncSettings"
}
}
}
}
},
"/organizations/{organization}/settings/idpsync/roles/mapping": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Enterprise"
],
"summary": "Update role IdP Sync mapping",
"operationId": "update-role-idp-sync-mapping",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID or name",
"name": "organization",
"in": "path",
"required": true
},
{
"description": "Description of the mappings to add and remove",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchRoleIDPSyncMappingRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.RoleSyncSettings"
}
}
}
}
},
"/organizations/{organization}/templates": {
"get": {
"security": [
@@ -4248,6 +4436,84 @@ const docTemplate = `{
}
}
},
"/settings/idpsync/organization/config": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Enterprise"
],
"summary": "Update organization IdP Sync config",
"operationId": "update-organization-idp-sync-config",
"parameters": [
{
"description": "New config values",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchOrganizationIDPSyncConfigRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.OrganizationSyncSettings"
}
}
}
}
},
"/settings/idpsync/organization/mapping": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Enterprise"
],
"summary": "Update organization IdP Sync mapping",
"operationId": "update-organization-idp-sync-mapping",
"parameters": [
{
"description": "Description of the mappings to add and remove",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchOrganizationIDPSyncMappingRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.OrganizationSyncSettings"
}
}
}
}
},
"/tailnet": {
"get": {
"security": [
@@ -12391,6 +12657,57 @@ const docTemplate = `{
}
}
},
"codersdk.PatchGroupIDPSyncConfigRequest": {
"type": "object",
"properties": {
"auto_create_missing_groups": {
"type": "boolean"
},
"field": {
"type": "string"
},
"regex_filter": {
"$ref": "#/definitions/regexp.Regexp"
}
}
},
"codersdk.PatchGroupIDPSyncMappingRequest": {
"type": "object",
"properties": {
"add": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
},
"remove": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
}
}
},
"codersdk.PatchGroupRequest": {
"type": "object",
"properties": {
@@ -12420,6 +12737,99 @@ const docTemplate = `{
}
}
},
"codersdk.PatchOrganizationIDPSyncConfigRequest": {
"type": "object",
"properties": {
"assign_default": {
"type": "boolean"
},
"field": {
"type": "string"
}
}
},
"codersdk.PatchOrganizationIDPSyncMappingRequest": {
"type": "object",
"properties": {
"add": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
},
"remove": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
}
}
},
"codersdk.PatchRoleIDPSyncConfigRequest": {
"type": "object",
"properties": {
"field": {
"type": "string"
}
}
},
"codersdk.PatchRoleIDPSyncMappingRequest": {
"type": "object",
"properties": {
"add": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
},
"remove": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
}
}
},
"codersdk.PatchTemplateVersionRequest": {
"type": "object",
"properties": {
+374
View File
@@ -3030,6 +3030,88 @@
}
}
},
"/organizations/{organization}/settings/idpsync/groups/config": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Enterprise"],
"summary": "Update group IdP Sync config",
"operationId": "update-group-idp-sync-config",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID or name",
"name": "organization",
"in": "path",
"required": true
},
{
"description": "New config values",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchGroupIDPSyncConfigRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.GroupSyncSettings"
}
}
}
}
},
"/organizations/{organization}/settings/idpsync/groups/mapping": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Enterprise"],
"summary": "Update group IdP Sync mapping",
"operationId": "update-group-idp-sync-mapping",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID or name",
"name": "organization",
"in": "path",
"required": true
},
{
"description": "Description of the mappings to add and remove",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchGroupIDPSyncMappingRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.GroupSyncSettings"
}
}
}
}
},
"/organizations/{organization}/settings/idpsync/roles": {
"get": {
"security": [
@@ -3100,6 +3182,88 @@
}
}
},
"/organizations/{organization}/settings/idpsync/roles/config": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Enterprise"],
"summary": "Update role IdP Sync config",
"operationId": "update-role-idp-sync-config",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID or name",
"name": "organization",
"in": "path",
"required": true
},
{
"description": "New config values",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchRoleIDPSyncConfigRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.RoleSyncSettings"
}
}
}
}
},
"/organizations/{organization}/settings/idpsync/roles/mapping": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Enterprise"],
"summary": "Update role IdP Sync mapping",
"operationId": "update-role-idp-sync-mapping",
"parameters": [
{
"type": "string",
"format": "uuid",
"description": "Organization ID or name",
"name": "organization",
"in": "path",
"required": true
},
{
"description": "Description of the mappings to add and remove",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchRoleIDPSyncMappingRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.RoleSyncSettings"
}
}
}
}
},
"/organizations/{organization}/templates": {
"get": {
"security": [
@@ -3744,6 +3908,72 @@
}
}
},
"/settings/idpsync/organization/config": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Enterprise"],
"summary": "Update organization IdP Sync config",
"operationId": "update-organization-idp-sync-config",
"parameters": [
{
"description": "New config values",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchOrganizationIDPSyncConfigRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.OrganizationSyncSettings"
}
}
}
}
},
"/settings/idpsync/organization/mapping": {
"patch": {
"security": [
{
"CoderSessionToken": []
}
],
"consumes": ["application/json"],
"produces": ["application/json"],
"tags": ["Enterprise"],
"summary": "Update organization IdP Sync mapping",
"operationId": "update-organization-idp-sync-mapping",
"parameters": [
{
"description": "Description of the mappings to add and remove",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/codersdk.PatchOrganizationIDPSyncMappingRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/codersdk.OrganizationSyncSettings"
}
}
}
}
},
"/tailnet": {
"get": {
"security": [
@@ -11172,6 +11402,57 @@
}
}
},
"codersdk.PatchGroupIDPSyncConfigRequest": {
"type": "object",
"properties": {
"auto_create_missing_groups": {
"type": "boolean"
},
"field": {
"type": "string"
},
"regex_filter": {
"$ref": "#/definitions/regexp.Regexp"
}
}
},
"codersdk.PatchGroupIDPSyncMappingRequest": {
"type": "object",
"properties": {
"add": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
},
"remove": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
}
}
},
"codersdk.PatchGroupRequest": {
"type": "object",
"properties": {
@@ -11201,6 +11482,99 @@
}
}
},
"codersdk.PatchOrganizationIDPSyncConfigRequest": {
"type": "object",
"properties": {
"assign_default": {
"type": "boolean"
},
"field": {
"type": "string"
}
}
},
"codersdk.PatchOrganizationIDPSyncMappingRequest": {
"type": "object",
"properties": {
"add": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
},
"remove": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
}
}
},
"codersdk.PatchRoleIDPSyncConfigRequest": {
"type": "object",
"properties": {
"field": {
"type": "string"
}
}
},
"codersdk.PatchRoleIDPSyncMappingRequest": {
"type": "object",
"properties": {
"add": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
},
"remove": {
"type": "array",
"items": {
"type": "object",
"properties": {
"gets": {
"description": "The ID of the Coder resource the user should be added to",
"type": "string"
},
"given": {
"description": "The IdP claim the user has",
"type": "string"
}
}
}
}
}
},
"codersdk.PatchTemplateVersionRequest": {
"type": "object",
"properties": {
+4 -1
View File
@@ -63,6 +63,7 @@ import (
"github.com/coder/coder/v2/coderd/healthcheck/derphealth"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/coderd/metricscache"
"github.com/coder/coder/v2/coderd/notifications"
"github.com/coder/coder/v2/coderd/portsharing"
@@ -585,6 +586,8 @@ func New(options *Options) *API {
AppearanceFetcher: &api.AppearanceFetcher,
BuildInfo: buildInfo,
Entitlements: options.Entitlements,
Telemetry: options.Telemetry,
Logger: options.Logger.Named("site"),
})
api.SiteHandler.Experiments.Store(&experiments)
@@ -785,7 +788,7 @@ func New(options *Options) *API {
tracing.Middleware(api.TracerProvider),
httpmw.AttachRequestID,
httpmw.ExtractRealIP(api.RealIPConfig),
httpmw.Logger(api.Logger),
loggermw.Logger(api.Logger),
rolestore.CustomRoleMW,
prometheusMW,
// Build-Version is helpful for debugging.
+48 -8
View File
@@ -24,6 +24,7 @@ import (
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/httpapi/httpapiconstraints"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/util/slice"
"github.com/coder/coder/v2/provisionersdk"
@@ -162,6 +163,7 @@ func ActorFromContext(ctx context.Context) (rbac.Subject, bool) {
var (
subjectProvisionerd = rbac.Subject{
Type: rbac.SubjectTypeProvisionerd,
FriendlyName: "Provisioner Daemon",
ID: uuid.Nil.String(),
Roles: rbac.Roles([]rbac.Role{
@@ -193,6 +195,7 @@ var (
}.WithCachedASTValue()
subjectAutostart = rbac.Subject{
Type: rbac.SubjectTypeAutostart,
FriendlyName: "Autostart",
ID: uuid.Nil.String(),
Roles: rbac.Roles([]rbac.Role{
@@ -216,6 +219,7 @@ var (
// See unhanger package.
subjectHangDetector = rbac.Subject{
Type: rbac.SubjectTypeHangDetector,
FriendlyName: "Hang Detector",
ID: uuid.Nil.String(),
Roles: rbac.Roles([]rbac.Role{
@@ -236,6 +240,7 @@ var (
// See cryptokeys package.
subjectCryptoKeyRotator = rbac.Subject{
Type: rbac.SubjectTypeCryptoKeyRotator,
FriendlyName: "Crypto Key Rotator",
ID: uuid.Nil.String(),
Roles: rbac.Roles([]rbac.Role{
@@ -254,6 +259,7 @@ var (
// See cryptokeys package.
subjectCryptoKeyReader = rbac.Subject{
Type: rbac.SubjectTypeCryptoKeyReader,
FriendlyName: "Crypto Key Reader",
ID: uuid.Nil.String(),
Roles: rbac.Roles([]rbac.Role{
@@ -271,6 +277,7 @@ var (
}.WithCachedASTValue()
subjectNotifier = rbac.Subject{
Type: rbac.SubjectTypeNotifier,
FriendlyName: "Notifier",
ID: uuid.Nil.String(),
Roles: rbac.Roles([]rbac.Role{
@@ -288,6 +295,7 @@ var (
}.WithCachedASTValue()
subjectSystemRestricted = rbac.Subject{
Type: rbac.SubjectTypeSystemRestricted,
FriendlyName: "System",
ID: uuid.Nil.String(),
Roles: rbac.Roles([]rbac.Role{
@@ -323,6 +331,7 @@ var (
}.WithCachedASTValue()
subjectSystemReadProvisionerDaemons = rbac.Subject{
Type: rbac.SubjectTypeSystemReadProvisionerDaemons,
FriendlyName: "Provisioner Daemons Reader",
ID: uuid.Nil.String(),
Roles: rbac.Roles([]rbac.Role{
@@ -343,47 +352,47 @@ var (
// AsProvisionerd returns a context with an actor that has permissions required
// for provisionerd to function.
func AsProvisionerd(ctx context.Context) context.Context {
return context.WithValue(ctx, authContextKey{}, subjectProvisionerd)
return As(ctx, subjectProvisionerd)
}
// AsAutostart returns a context with an actor that has permissions required
// for autostart to function.
func AsAutostart(ctx context.Context) context.Context {
return context.WithValue(ctx, authContextKey{}, subjectAutostart)
return As(ctx, subjectAutostart)
}
// AsHangDetector returns a context with an actor that has permissions required
// for unhanger.Detector to function.
func AsHangDetector(ctx context.Context) context.Context {
return context.WithValue(ctx, authContextKey{}, subjectHangDetector)
return As(ctx, subjectHangDetector)
}
// AsKeyRotator returns a context with an actor that has permissions required for rotating crypto keys.
func AsKeyRotator(ctx context.Context) context.Context {
return context.WithValue(ctx, authContextKey{}, subjectCryptoKeyRotator)
return As(ctx, subjectCryptoKeyRotator)
}
// AsKeyReader returns a context with an actor that has permissions required for reading crypto keys.
func AsKeyReader(ctx context.Context) context.Context {
return context.WithValue(ctx, authContextKey{}, subjectCryptoKeyReader)
return As(ctx, subjectCryptoKeyReader)
}
// AsNotifier returns a context with an actor that has permissions required for
// creating/reading/updating/deleting notifications.
func AsNotifier(ctx context.Context) context.Context {
return context.WithValue(ctx, authContextKey{}, subjectNotifier)
return As(ctx, subjectNotifier)
}
// AsSystemRestricted returns a context with an actor that has permissions
// required for various system operations (login, logout, metrics cache).
func AsSystemRestricted(ctx context.Context) context.Context {
return context.WithValue(ctx, authContextKey{}, subjectSystemRestricted)
return As(ctx, subjectSystemRestricted)
}
// AsSystemReadProvisionerDaemons returns a context with an actor that has permissions
// to read provisioner daemons.
func AsSystemReadProvisionerDaemons(ctx context.Context) context.Context {
return context.WithValue(ctx, authContextKey{}, subjectSystemReadProvisionerDaemons)
return As(ctx, subjectSystemReadProvisionerDaemons)
}
var AsRemoveActor = rbac.Subject{
@@ -401,6 +410,9 @@ func As(ctx context.Context, actor rbac.Subject) context.Context {
// should be removed from the context.
return context.WithValue(ctx, authContextKey{}, nil)
}
if rlogger := loggermw.RequestLoggerFromContext(ctx); rlogger != nil {
rlogger.WithAuthContext(actor)
}
return context.WithValue(ctx, authContextKey{}, actor)
}
@@ -2096,6 +2108,20 @@ func (q *querier) GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID)
return q.db.GetTailnetTunnelPeerIDs(ctx, srcID)
}
func (q *querier) GetTelemetryItem(ctx context.Context, key string) (database.TelemetryItem, error) {
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
return database.TelemetryItem{}, err
}
return q.db.GetTelemetryItem(ctx, key)
}
func (q *querier) GetTelemetryItems(ctx context.Context) ([]database.TelemetryItem, error) {
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
return nil, err
}
return q.db.GetTelemetryItems(ctx)
}
func (q *querier) GetTemplateAppInsights(ctx context.Context, arg database.GetTemplateAppInsightsParams) ([]database.GetTemplateAppInsightsRow, error) {
if err := q.authorizeTemplateInsights(ctx, arg.TemplateIDs); err != nil {
return nil, err
@@ -3085,6 +3111,13 @@ func (q *querier) InsertReplica(ctx context.Context, arg database.InsertReplicaP
return q.db.InsertReplica(ctx, arg)
}
func (q *querier) InsertTelemetryItemIfNotExists(ctx context.Context, arg database.InsertTelemetryItemIfNotExistsParams) error {
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceSystem); err != nil {
return err
}
return q.db.InsertTelemetryItemIfNotExists(ctx, arg)
}
func (q *querier) InsertTemplate(ctx context.Context, arg database.InsertTemplateParams) error {
obj := rbac.ResourceTemplate.InOrg(arg.OrganizationID)
if err := q.authorizeContext(ctx, policy.ActionCreate, obj); err != nil {
@@ -4345,6 +4378,13 @@ func (q *querier) UpsertTailnetTunnel(ctx context.Context, arg database.UpsertTa
return q.db.UpsertTailnetTunnel(ctx, arg)
}
func (q *querier) UpsertTelemetryItem(ctx context.Context, arg database.UpsertTelemetryItemParams) error {
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil {
return err
}
return q.db.UpsertTelemetryItem(ctx, arg)
}
func (q *querier) UpsertTemplateUsageStats(ctx context.Context) error {
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceSystem); err != nil {
return err
+18
View File
@@ -4224,6 +4224,24 @@ func (s *MethodTestSuite) TestSystemFunctions() {
s.Run("GetWorkspaceModulesCreatedAfter", s.Subtest(func(db database.Store, check *expects) {
check.Args(dbtime.Now()).Asserts(rbac.ResourceSystem, policy.ActionRead)
}))
s.Run("GetTelemetryItem", s.Subtest(func(db database.Store, check *expects) {
check.Args("test").Asserts(rbac.ResourceSystem, policy.ActionRead).Errors(sql.ErrNoRows)
}))
s.Run("GetTelemetryItems", s.Subtest(func(db database.Store, check *expects) {
check.Args().Asserts(rbac.ResourceSystem, policy.ActionRead)
}))
s.Run("InsertTelemetryItemIfNotExists", s.Subtest(func(db database.Store, check *expects) {
check.Args(database.InsertTelemetryItemIfNotExistsParams{
Key: "test",
Value: "value",
}).Asserts(rbac.ResourceSystem, policy.ActionCreate)
}))
s.Run("UpsertTelemetryItem", s.Subtest(func(db database.Store, check *expects) {
check.Args(database.UpsertTelemetryItemParams{
Key: "test",
Value: "value",
}).Asserts(rbac.ResourceSystem, policy.ActionUpdate)
}))
}
func (s *MethodTestSuite) TestNotifications() {
+17
View File
@@ -1093,6 +1093,23 @@ func ProvisionerJobTimings(t testing.TB, db database.Store, build database.Works
return timings
}
func TelemetryItem(t testing.TB, db database.Store, seed database.TelemetryItem) database.TelemetryItem {
if seed.Key == "" {
seed.Key = testutil.GetRandomName(t)
}
if seed.Value == "" {
seed.Value = time.Now().Format(time.RFC3339)
}
err := db.UpsertTelemetryItem(genCtx, database.UpsertTelemetryItemParams{
Key: seed.Key,
Value: seed.Value,
})
require.NoError(t, err, "upsert telemetry item")
item, err := db.GetTelemetryItem(genCtx, seed.Key)
require.NoError(t, err, "get telemetry item")
return item
}
func provisionerJobTiming(t testing.TB, db database.Store, seed database.ProvisionerJobTiming) database.ProvisionerJobTiming {
timing, err := db.InsertProvisionerJobTimings(genCtx, database.InsertProvisionerJobTimingsParams{
JobID: takeFirst(seed.JobID, uuid.New()),
+70
View File
@@ -89,6 +89,7 @@ func New() database.Store {
locks: map[int64]struct{}{},
runtimeConfig: map[string]string{},
userStatusChanges: make([]database.UserStatusChange, 0),
telemetryItems: make([]database.TelemetryItem, 0),
},
}
// Always start with a default org. Matching migration 198.
@@ -258,6 +259,7 @@ type data struct {
defaultProxyDisplayName string
defaultProxyIconURL string
userStatusChanges []database.UserStatusChange
telemetryItems []database.TelemetryItem
}
func tryPercentile(fs []float64, p float64) float64 {
@@ -4330,6 +4332,23 @@ func (*FakeQuerier) GetTailnetTunnelPeerIDs(context.Context, uuid.UUID) ([]datab
return nil, ErrUnimplemented
}
func (q *FakeQuerier) GetTelemetryItem(_ context.Context, key string) (database.TelemetryItem, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
for _, item := range q.telemetryItems {
if item.Key == key {
return item, nil
}
}
return database.TelemetryItem{}, sql.ErrNoRows
}
func (q *FakeQuerier) GetTelemetryItems(_ context.Context) ([]database.TelemetryItem, error) {
return q.telemetryItems, nil
}
func (q *FakeQuerier) GetTemplateAppInsights(ctx context.Context, arg database.GetTemplateAppInsightsParams) ([]database.GetTemplateAppInsightsRow, error) {
err := validateDatabaseType(arg)
if err != nil {
@@ -8120,6 +8139,30 @@ func (q *FakeQuerier) InsertReplica(_ context.Context, arg database.InsertReplic
return replica, nil
}
func (q *FakeQuerier) InsertTelemetryItemIfNotExists(_ context.Context, arg database.InsertTelemetryItemIfNotExistsParams) error {
err := validateDatabaseType(arg)
if err != nil {
return err
}
q.mutex.Lock()
defer q.mutex.Unlock()
for _, item := range q.telemetryItems {
if item.Key == arg.Key {
return nil
}
}
q.telemetryItems = append(q.telemetryItems, database.TelemetryItem{
Key: arg.Key,
Value: arg.Value,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
})
return nil
}
func (q *FakeQuerier) InsertTemplate(_ context.Context, arg database.InsertTemplateParams) error {
if err := validateDatabaseType(arg); err != nil {
return err
@@ -10874,6 +10917,33 @@ func (*FakeQuerier) UpsertTailnetTunnel(_ context.Context, arg database.UpsertTa
return database.TailnetTunnel{}, ErrUnimplemented
}
func (q *FakeQuerier) UpsertTelemetryItem(_ context.Context, arg database.UpsertTelemetryItemParams) error {
err := validateDatabaseType(arg)
if err != nil {
return err
}
q.mutex.Lock()
defer q.mutex.Unlock()
for i, item := range q.telemetryItems {
if item.Key == arg.Key {
q.telemetryItems[i].Value = arg.Value
q.telemetryItems[i].UpdatedAt = time.Now()
return nil
}
}
q.telemetryItems = append(q.telemetryItems, database.TelemetryItem{
Key: arg.Key,
Value: arg.Value,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
})
return nil
}
func (q *FakeQuerier) UpsertTemplateUsageStats(ctx context.Context) error {
q.mutex.Lock()
defer q.mutex.Unlock()
+28
View File
@@ -1134,6 +1134,20 @@ func (m queryMetricsStore) GetTailnetTunnelPeerIDs(ctx context.Context, srcID uu
return r0, r1
}
func (m queryMetricsStore) GetTelemetryItem(ctx context.Context, key string) (database.TelemetryItem, error) {
start := time.Now()
r0, r1 := m.s.GetTelemetryItem(ctx, key)
m.queryLatencies.WithLabelValues("GetTelemetryItem").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m queryMetricsStore) GetTelemetryItems(ctx context.Context) ([]database.TelemetryItem, error) {
start := time.Now()
r0, r1 := m.s.GetTelemetryItems(ctx)
m.queryLatencies.WithLabelValues("GetTelemetryItems").Observe(time.Since(start).Seconds())
return r0, r1
}
func (m queryMetricsStore) GetTemplateAppInsights(ctx context.Context, arg database.GetTemplateAppInsightsParams) ([]database.GetTemplateAppInsightsRow, error) {
start := time.Now()
r0, r1 := m.s.GetTemplateAppInsights(ctx, arg)
@@ -1911,6 +1925,13 @@ func (m queryMetricsStore) InsertReplica(ctx context.Context, arg database.Inser
return replica, err
}
func (m queryMetricsStore) InsertTelemetryItemIfNotExists(ctx context.Context, arg database.InsertTelemetryItemIfNotExistsParams) error {
start := time.Now()
r0 := m.s.InsertTelemetryItemIfNotExists(ctx, arg)
m.queryLatencies.WithLabelValues("InsertTelemetryItemIfNotExists").Observe(time.Since(start).Seconds())
return r0
}
func (m queryMetricsStore) InsertTemplate(ctx context.Context, arg database.InsertTemplateParams) error {
start := time.Now()
err := m.s.InsertTemplate(ctx, arg)
@@ -2772,6 +2793,13 @@ func (m queryMetricsStore) UpsertTailnetTunnel(ctx context.Context, arg database
return r0, r1
}
func (m queryMetricsStore) UpsertTelemetryItem(ctx context.Context, arg database.UpsertTelemetryItemParams) error {
start := time.Now()
r0 := m.s.UpsertTelemetryItem(ctx, arg)
m.queryLatencies.WithLabelValues("UpsertTelemetryItem").Observe(time.Since(start).Seconds())
return r0
}
func (m queryMetricsStore) UpsertTemplateUsageStats(ctx context.Context) error {
start := time.Now()
r0 := m.s.UpsertTemplateUsageStats(ctx)
+58
View File
@@ -2346,6 +2346,36 @@ func (mr *MockStoreMockRecorder) GetTailnetTunnelPeerIDs(ctx, srcID any) *gomock
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTailnetTunnelPeerIDs", reflect.TypeOf((*MockStore)(nil).GetTailnetTunnelPeerIDs), ctx, srcID)
}
// GetTelemetryItem mocks base method.
func (m *MockStore) GetTelemetryItem(ctx context.Context, key string) (database.TelemetryItem, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetTelemetryItem", ctx, key)
ret0, _ := ret[0].(database.TelemetryItem)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetTelemetryItem indicates an expected call of GetTelemetryItem.
func (mr *MockStoreMockRecorder) GetTelemetryItem(ctx, key any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTelemetryItem", reflect.TypeOf((*MockStore)(nil).GetTelemetryItem), ctx, key)
}
// GetTelemetryItems mocks base method.
func (m *MockStore) GetTelemetryItems(ctx context.Context) ([]database.TelemetryItem, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetTelemetryItems", ctx)
ret0, _ := ret[0].([]database.TelemetryItem)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetTelemetryItems indicates an expected call of GetTelemetryItems.
func (mr *MockStoreMockRecorder) GetTelemetryItems(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTelemetryItems", reflect.TypeOf((*MockStore)(nil).GetTelemetryItems), ctx)
}
// GetTemplateAppInsights mocks base method.
func (m *MockStore) GetTemplateAppInsights(ctx context.Context, arg database.GetTemplateAppInsightsParams) ([]database.GetTemplateAppInsightsRow, error) {
m.ctrl.T.Helper()
@@ -4051,6 +4081,20 @@ func (mr *MockStoreMockRecorder) InsertReplica(ctx, arg any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertReplica", reflect.TypeOf((*MockStore)(nil).InsertReplica), ctx, arg)
}
// InsertTelemetryItemIfNotExists mocks base method.
func (m *MockStore) InsertTelemetryItemIfNotExists(ctx context.Context, arg database.InsertTelemetryItemIfNotExistsParams) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InsertTelemetryItemIfNotExists", ctx, arg)
ret0, _ := ret[0].(error)
return ret0
}
// InsertTelemetryItemIfNotExists indicates an expected call of InsertTelemetryItemIfNotExists.
func (mr *MockStoreMockRecorder) InsertTelemetryItemIfNotExists(ctx, arg any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertTelemetryItemIfNotExists", reflect.TypeOf((*MockStore)(nil).InsertTelemetryItemIfNotExists), ctx, arg)
}
// InsertTemplate mocks base method.
func (m *MockStore) InsertTemplate(ctx context.Context, arg database.InsertTemplateParams) error {
m.ctrl.T.Helper()
@@ -5861,6 +5905,20 @@ func (mr *MockStoreMockRecorder) UpsertTailnetTunnel(ctx, arg any) *gomock.Call
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTailnetTunnel", reflect.TypeOf((*MockStore)(nil).UpsertTailnetTunnel), ctx, arg)
}
// UpsertTelemetryItem mocks base method.
func (m *MockStore) UpsertTelemetryItem(ctx context.Context, arg database.UpsertTelemetryItemParams) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpsertTelemetryItem", ctx, arg)
ret0, _ := ret[0].(error)
return ret0
}
// UpsertTelemetryItem indicates an expected call of UpsertTelemetryItem.
func (mr *MockStoreMockRecorder) UpsertTelemetryItem(ctx, arg any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertTelemetryItem", reflect.TypeOf((*MockStore)(nil).UpsertTelemetryItem), ctx, arg)
}
// UpsertTemplateUsageStats mocks base method.
func (m *MockStore) UpsertTemplateUsageStats(ctx context.Context) error {
m.ctrl.T.Helper()
+10
View File
@@ -1164,6 +1164,13 @@ CREATE TABLE tailnet_tunnels (
updated_at timestamp with time zone NOT NULL
);
CREATE TABLE telemetry_items (
key text NOT NULL,
value text NOT NULL,
created_at timestamp with time zone DEFAULT now() NOT NULL,
updated_at timestamp with time zone DEFAULT now() NOT NULL
);
CREATE TABLE template_usage_stats (
start_time timestamp with time zone NOT NULL,
end_time timestamp with time zone NOT NULL,
@@ -2026,6 +2033,9 @@ ALTER TABLE ONLY tailnet_peers
ALTER TABLE ONLY tailnet_tunnels
ADD CONSTRAINT tailnet_tunnels_pkey PRIMARY KEY (coordinator_id, src_id, dst_id);
ALTER TABLE ONLY telemetry_items
ADD CONSTRAINT telemetry_items_pkey PRIMARY KEY (key);
ALTER TABLE ONLY template_usage_stats
ADD CONSTRAINT template_usage_stats_pkey PRIMARY KEY (start_time, template_id, user_id);
@@ -0,0 +1 @@
DROP TABLE telemetry_items;
@@ -0,0 +1,6 @@
CREATE TABLE telemetry_items (
key TEXT NOT NULL PRIMARY KEY,
value TEXT NOT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
);
@@ -0,0 +1,4 @@
INSERT INTO
telemetry_items (key, value)
VALUES
('example_key', 'example_value');
+7
View File
@@ -2787,6 +2787,13 @@ type TailnetTunnel struct {
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
}
type TelemetryItem struct {
Key string `db:"key" json:"key"`
Value string `db:"value" json:"value"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
}
// Joins in the display name information such as username, avatar, and organization name.
type Template struct {
ID uuid.UUID `db:"id" json:"id"`
+4
View File
@@ -224,6 +224,8 @@ type sqlcQuerier interface {
GetTailnetPeers(ctx context.Context, id uuid.UUID) ([]TailnetPeer, error)
GetTailnetTunnelPeerBindings(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerBindingsRow, error)
GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerIDsRow, error)
GetTelemetryItem(ctx context.Context, key string) (TelemetryItem, error)
GetTelemetryItems(ctx context.Context) ([]TelemetryItem, error)
// GetTemplateAppInsights returns the aggregate usage of each app in a given
// timeframe. The result can be filtered on template_ids, meaning only user data
// from workspaces based on those templates will be included.
@@ -404,6 +406,7 @@ type sqlcQuerier interface {
InsertProvisionerJobTimings(ctx context.Context, arg InsertProvisionerJobTimingsParams) ([]ProvisionerJobTiming, error)
InsertProvisionerKey(ctx context.Context, arg InsertProvisionerKeyParams) (ProvisionerKey, error)
InsertReplica(ctx context.Context, arg InsertReplicaParams) (Replica, error)
InsertTelemetryItemIfNotExists(ctx context.Context, arg InsertTelemetryItemIfNotExistsParams) error
InsertTemplate(ctx context.Context, arg InsertTemplateParams) error
InsertTemplateVersion(ctx context.Context, arg InsertTemplateVersionParams) error
InsertTemplateVersionParameter(ctx context.Context, arg InsertTemplateVersionParameterParams) (TemplateVersionParameter, error)
@@ -546,6 +549,7 @@ type sqlcQuerier interface {
UpsertTailnetCoordinator(ctx context.Context, id uuid.UUID) (TailnetCoordinator, error)
UpsertTailnetPeer(ctx context.Context, arg UpsertTailnetPeerParams) (TailnetPeer, error)
UpsertTailnetTunnel(ctx context.Context, arg UpsertTailnetTunnelParams) (TailnetTunnel, error)
UpsertTelemetryItem(ctx context.Context, arg UpsertTelemetryItemParams) error
// This query aggregates the workspace_agent_stats and workspace_app_stats data
// into a single table for efficient storage and querying. Half-hour buckets are
// used to store the data, and the minutes are summed for each user and template
+84 -2
View File
@@ -8702,6 +8702,86 @@ func (q *sqlQuerier) UpsertTailnetTunnel(ctx context.Context, arg UpsertTailnetT
return i, err
}
const getTelemetryItem = `-- name: GetTelemetryItem :one
SELECT key, value, created_at, updated_at FROM telemetry_items WHERE key = $1
`
func (q *sqlQuerier) GetTelemetryItem(ctx context.Context, key string) (TelemetryItem, error) {
row := q.db.QueryRowContext(ctx, getTelemetryItem, key)
var i TelemetryItem
err := row.Scan(
&i.Key,
&i.Value,
&i.CreatedAt,
&i.UpdatedAt,
)
return i, err
}
const getTelemetryItems = `-- name: GetTelemetryItems :many
SELECT key, value, created_at, updated_at FROM telemetry_items
`
func (q *sqlQuerier) GetTelemetryItems(ctx context.Context) ([]TelemetryItem, error) {
rows, err := q.db.QueryContext(ctx, getTelemetryItems)
if err != nil {
return nil, err
}
defer rows.Close()
var items []TelemetryItem
for rows.Next() {
var i TelemetryItem
if err := rows.Scan(
&i.Key,
&i.Value,
&i.CreatedAt,
&i.UpdatedAt,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const insertTelemetryItemIfNotExists = `-- name: InsertTelemetryItemIfNotExists :exec
INSERT INTO telemetry_items (key, value)
VALUES ($1, $2)
ON CONFLICT (key) DO NOTHING
`
type InsertTelemetryItemIfNotExistsParams struct {
Key string `db:"key" json:"key"`
Value string `db:"value" json:"value"`
}
func (q *sqlQuerier) InsertTelemetryItemIfNotExists(ctx context.Context, arg InsertTelemetryItemIfNotExistsParams) error {
_, err := q.db.ExecContext(ctx, insertTelemetryItemIfNotExists, arg.Key, arg.Value)
return err
}
const upsertTelemetryItem = `-- name: UpsertTelemetryItem :exec
INSERT INTO telemetry_items (key, value)
VALUES ($1, $2)
ON CONFLICT (key) DO UPDATE SET value = $2, updated_at = NOW() WHERE telemetry_items.key = $1
`
type UpsertTelemetryItemParams struct {
Key string `db:"key" json:"key"`
Value string `db:"value" json:"value"`
}
func (q *sqlQuerier) UpsertTelemetryItem(ctx context.Context, arg UpsertTelemetryItemParams) error {
_, err := q.db.ExecContext(ctx, upsertTelemetryItem, arg.Key, arg.Value)
return err
}
const getTemplateAverageBuildTime = `-- name: GetTemplateAverageBuildTime :one
WITH build_times AS (
SELECT
@@ -10607,10 +10687,10 @@ func (q *sqlQuerier) GetActiveUserCount(ctx context.Context) (int64, error) {
const getAuthorizationUserRoles = `-- name: GetAuthorizationUserRoles :one
SELECT
-- username is returned just to help for logging purposes
-- username and email are returned just to help for logging purposes
-- status is used to enforce 'suspended' users, as all roles are ignored
-- when suspended.
id, username, status,
id, username, status, email,
-- All user roles, including their org roles.
array_cat(
-- All users are members
@@ -10651,6 +10731,7 @@ type GetAuthorizationUserRolesRow struct {
ID uuid.UUID `db:"id" json:"id"`
Username string `db:"username" json:"username"`
Status UserStatus `db:"status" json:"status"`
Email string `db:"email" json:"email"`
Roles []string `db:"roles" json:"roles"`
Groups []string `db:"groups" json:"groups"`
}
@@ -10664,6 +10745,7 @@ func (q *sqlQuerier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.
&i.ID,
&i.Username,
&i.Status,
&i.Email,
pq.Array(&i.Roles),
pq.Array(&i.Groups),
)
@@ -0,0 +1,15 @@
-- name: InsertTelemetryItemIfNotExists :exec
INSERT INTO telemetry_items (key, value)
VALUES ($1, $2)
ON CONFLICT (key) DO NOTHING;
-- name: GetTelemetryItem :one
SELECT * FROM telemetry_items WHERE key = $1;
-- name: UpsertTelemetryItem :exec
INSERT INTO telemetry_items (key, value)
VALUES ($1, $2)
ON CONFLICT (key) DO UPDATE SET value = $2, updated_at = NOW() WHERE telemetry_items.key = $1;
-- name: GetTelemetryItems :many
SELECT * FROM telemetry_items;
+2 -2
View File
@@ -244,10 +244,10 @@ WHERE
-- This function returns roles for authorization purposes. Implied member roles
-- are included.
SELECT
-- username is returned just to help for logging purposes
-- username and email are returned just to help for logging purposes
-- status is used to enforce 'suspended' users, as all roles are ignored
-- when suspended.
id, username, status,
id, username, status, email,
-- All user roles, including their org roles.
array_cat(
-- All users are members
+1
View File
@@ -55,6 +55,7 @@ const (
UniqueTailnetCoordinatorsPkey UniqueConstraint = "tailnet_coordinators_pkey" // ALTER TABLE ONLY tailnet_coordinators ADD CONSTRAINT tailnet_coordinators_pkey PRIMARY KEY (id);
UniqueTailnetPeersPkey UniqueConstraint = "tailnet_peers_pkey" // ALTER TABLE ONLY tailnet_peers ADD CONSTRAINT tailnet_peers_pkey PRIMARY KEY (id, coordinator_id);
UniqueTailnetTunnelsPkey UniqueConstraint = "tailnet_tunnels_pkey" // ALTER TABLE ONLY tailnet_tunnels ADD CONSTRAINT tailnet_tunnels_pkey PRIMARY KEY (coordinator_id, src_id, dst_id);
UniqueTelemetryItemsPkey UniqueConstraint = "telemetry_items_pkey" // ALTER TABLE ONLY telemetry_items ADD CONSTRAINT telemetry_items_pkey PRIMARY KEY (key);
UniqueTemplateUsageStatsPkey UniqueConstraint = "template_usage_stats_pkey" // ALTER TABLE ONLY template_usage_stats ADD CONSTRAINT template_usage_stats_pkey PRIMARY KEY (start_time, template_id, user_id);
UniqueTemplateVersionParametersTemplateVersionIDNameKey UniqueConstraint = "template_version_parameters_template_version_id_name_key" // ALTER TABLE ONLY template_version_parameters ADD CONSTRAINT template_version_parameters_template_version_id_name_key UNIQUE (template_version_id, name);
UniqueTemplateVersionVariablesTemplateVersionIDNameKey UniqueConstraint = "template_version_variables_template_version_id_name_key" // ALTER TABLE ONLY template_version_variables ADD CONSTRAINT template_version_variables_template_version_id_name_key UNIQUE (template_version_id, name);
+2
View File
@@ -465,7 +465,9 @@ func UserRBACSubject(ctx context.Context, db database.Store, userID uuid.UUID, s
}
actor := rbac.Subject{
Type: rbac.SubjectTypeUser,
FriendlyName: roles.Username,
Email: roles.Email,
ID: userID.String(),
Roles: rbacRoles,
Groups: roles.Groups,
-76
View File
@@ -1,76 +0,0 @@
package httpmw
import (
"context"
"fmt"
"net/http"
"time"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/tracing"
)
func Logger(log slog.Logger) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
start := time.Now()
sw, ok := rw.(*tracing.StatusWriter)
if !ok {
panic(fmt.Sprintf("ResponseWriter not a *tracing.StatusWriter; got %T", rw))
}
httplog := log.With(
slog.F("host", httpapi.RequestHost(r)),
slog.F("path", r.URL.Path),
slog.F("proto", r.Proto),
slog.F("remote_addr", r.RemoteAddr),
// Include the start timestamp in the log so that we have the
// source of truth. There is at least a theoretical chance that
// there can be a delay between `next.ServeHTTP` ending and us
// actually logging the request. This can also be useful when
// filtering logs that started at a certain time (compared to
// trying to compute the value).
slog.F("start", start),
)
next.ServeHTTP(sw, r)
end := time.Now()
// Don't log successful health check requests.
if r.URL.Path == "/api/v2" && sw.Status == http.StatusOK {
return
}
httplog = httplog.With(
slog.F("took", end.Sub(start)),
slog.F("status_code", sw.Status),
slog.F("latency_ms", float64(end.Sub(start)/time.Millisecond)),
)
// For status codes 400 and higher we
// want to log the response body.
if sw.Status >= http.StatusInternalServerError {
httplog = httplog.With(
slog.F("response_body", string(sw.ResponseBody())),
)
}
// We should not log at level ERROR for 5xx status codes because 5xx
// includes proxy errors etc. It also causes slogtest to fail
// instantly without an error message by default.
logLevelFn := httplog.Debug
if sw.Status >= http.StatusInternalServerError {
logLevelFn = httplog.Warn
}
// We already capture most of this information in the span (minus
// the response body which we don't want to capture anyways).
tracing.RunWithoutSpan(r.Context(), func(ctx context.Context) {
logLevelFn(ctx, r.Method)
})
})
}
}
+203
View File
@@ -0,0 +1,203 @@
package loggermw
import (
"context"
"fmt"
"net/http"
"sync"
"time"
"github.com/go-chi/chi/v5"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/tracing"
)
func Logger(log slog.Logger) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
start := time.Now()
sw, ok := rw.(*tracing.StatusWriter)
if !ok {
panic(fmt.Sprintf("ResponseWriter not a *tracing.StatusWriter; got %T", rw))
}
httplog := log.With(
slog.F("host", httpapi.RequestHost(r)),
slog.F("path", r.URL.Path),
slog.F("proto", r.Proto),
slog.F("remote_addr", r.RemoteAddr),
// Include the start timestamp in the log so that we have the
// source of truth. There is at least a theoretical chance that
// there can be a delay between `next.ServeHTTP` ending and us
// actually logging the request. This can also be useful when
// filtering logs that started at a certain time (compared to
// trying to compute the value).
slog.F("start", start),
)
logContext := NewRequestLogger(httplog, r.Method, start)
ctx := WithRequestLogger(r.Context(), logContext)
next.ServeHTTP(sw, r.WithContext(ctx))
// Don't log successful health check requests.
if r.URL.Path == "/api/v2" && sw.Status == http.StatusOK {
return
}
// For status codes 500 and higher we
// want to log the response body.
if sw.Status >= http.StatusInternalServerError {
logContext.WithFields(
slog.F("response_body", string(sw.ResponseBody())),
)
}
logContext.WriteLog(r.Context(), sw.Status)
})
}
}
type RequestLogger interface {
WithFields(fields ...slog.Field)
WriteLog(ctx context.Context, status int)
WithAuthContext(actor rbac.Subject)
}
type SlogRequestLogger struct {
log slog.Logger
written bool
message string
start time.Time
// Protects actors map for concurrent writes.
mu sync.RWMutex
actors map[rbac.SubjectType]rbac.Subject
}
var _ RequestLogger = &SlogRequestLogger{}
func NewRequestLogger(log slog.Logger, message string, start time.Time) RequestLogger {
return &SlogRequestLogger{
log: log,
written: false,
message: message,
start: start,
actors: make(map[rbac.SubjectType]rbac.Subject),
}
}
func (c *SlogRequestLogger) WithFields(fields ...slog.Field) {
c.log = c.log.With(fields...)
}
func (c *SlogRequestLogger) WithAuthContext(actor rbac.Subject) {
c.mu.Lock()
defer c.mu.Unlock()
c.actors[actor.Type] = actor
}
func (c *SlogRequestLogger) addAuthContextFields() {
c.mu.RLock()
defer c.mu.RUnlock()
usr, ok := c.actors[rbac.SubjectTypeUser]
if ok {
c.log = c.log.With(
slog.F("requestor_id", usr.ID),
slog.F("requestor_name", usr.FriendlyName),
slog.F("requestor_email", usr.Email),
)
} else {
// If there is no user, we log the requestor name for the first
// actor in a defined order.
for _, v := range actorLogOrder {
subj, ok := c.actors[v]
if !ok {
continue
}
c.log = c.log.With(
slog.F("requestor_name", subj.FriendlyName),
)
break
}
}
}
var actorLogOrder = []rbac.SubjectType{
rbac.SubjectTypeAutostart,
rbac.SubjectTypeCryptoKeyReader,
rbac.SubjectTypeCryptoKeyRotator,
rbac.SubjectTypeHangDetector,
rbac.SubjectTypeNotifier,
rbac.SubjectTypePrebuildsOrchestrator,
rbac.SubjectTypeProvisionerd,
rbac.SubjectTypeResourceMonitor,
rbac.SubjectTypeSystemReadProvisionerDaemons,
rbac.SubjectTypeSystemRestricted,
}
func (c *SlogRequestLogger) WriteLog(ctx context.Context, status int) {
if c.written {
return
}
c.written = true
end := time.Now()
// Right before we write the log, we try to find the user in the actors
// and add the fields to the log.
c.addAuthContextFields()
logger := c.log.With(
slog.F("took", end.Sub(c.start)),
slog.F("status_code", status),
slog.F("latency_ms", float64(end.Sub(c.start)/time.Millisecond)),
)
// If the request is routed, add the route parameters to the log.
if chiCtx := chi.RouteContext(ctx); chiCtx != nil {
urlParams := chiCtx.URLParams
routeParamsFields := make([]slog.Field, 0, len(urlParams.Keys))
for k, v := range urlParams.Keys {
if urlParams.Values[k] != "" {
routeParamsFields = append(routeParamsFields, slog.F("params_"+v, urlParams.Values[k]))
}
}
if len(routeParamsFields) > 0 {
logger = logger.With(routeParamsFields...)
}
}
// We already capture most of this information in the span (minus
// the response body which we don't want to capture anyways).
tracing.RunWithoutSpan(ctx, func(ctx context.Context) {
// We should not log at level ERROR for 5xx status codes because 5xx
// includes proxy errors etc. It also causes slogtest to fail
// instantly without an error message by default.
if status >= http.StatusInternalServerError {
logger.Warn(ctx, c.message)
} else {
logger.Debug(ctx, c.message)
}
})
}
type logContextKey struct{}
func WithRequestLogger(ctx context.Context, rl RequestLogger) context.Context {
return context.WithValue(ctx, logContextKey{}, rl)
}
func RequestLoggerFromContext(ctx context.Context) RequestLogger {
val := ctx.Value(logContextKey{})
if logCtx, ok := val.(RequestLogger); ok {
return logCtx
}
return nil
}
@@ -0,0 +1,311 @@
package loggermw
import (
"context"
"net/http"
"net/http/httptest"
"slices"
"strings"
"sync"
"testing"
"time"
"github.com/go-chi/chi/v5"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/tracing"
"github.com/coder/coder/v2/testutil"
"github.com/coder/websocket"
)
func TestRequestLogger_WriteLog(t *testing.T) {
t.Parallel()
ctx := context.Background()
sink := &fakeSink{}
logger := slog.Make(sink)
logger = logger.Leveled(slog.LevelDebug)
logCtx := NewRequestLogger(logger, "GET", time.Now())
// Add custom fields
logCtx.WithFields(
slog.F("custom_field", "custom_value"),
)
// Write log for 200 status
logCtx.WriteLog(ctx, http.StatusOK)
require.Len(t, sink.entries, 1, "log was written twice")
require.Equal(t, sink.entries[0].Message, "GET")
require.Equal(t, sink.entries[0].Fields[0].Value, "custom_value")
// Attempt to write again (should be skipped).
logCtx.WriteLog(ctx, http.StatusInternalServerError)
require.Len(t, sink.entries, 1, "log was written twice")
}
func TestLoggerMiddleware_SingleRequest(t *testing.T) {
t.Parallel()
sink := &fakeSink{}
logger := slog.Make(sink)
logger = logger.Leveled(slog.LevelDebug)
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel()
// Create a test handler to simulate an HTTP request
testHandler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write([]byte("OK"))
})
// Wrap the test handler with the Logger middleware
loggerMiddleware := Logger(logger)
wrappedHandler := loggerMiddleware(testHandler)
// Create a test HTTP request
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "/test-path", nil)
require.NoError(t, err, "failed to create request")
sw := &tracing.StatusWriter{ResponseWriter: httptest.NewRecorder()}
// Serve the request
wrappedHandler.ServeHTTP(sw, req)
require.Len(t, sink.entries, 1, "log was written twice")
require.Equal(t, sink.entries[0].Message, "GET")
fieldsMap := make(map[string]any)
for _, field := range sink.entries[0].Fields {
fieldsMap[field.Name] = field.Value
}
// Check that the log contains the expected fields
requiredFields := []string{"host", "path", "proto", "remote_addr", "start", "took", "status_code", "latency_ms"}
for _, field := range requiredFields {
_, exists := fieldsMap[field]
require.True(t, exists, "field %q is missing in log fields", field)
}
require.Len(t, sink.entries[0].Fields, len(requiredFields), "log should contain only the required fields")
// Check value of the status code
require.Equal(t, fieldsMap["status_code"], http.StatusOK)
}
func TestLoggerMiddleware_WebSocket(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel()
sink := &fakeSink{
newEntries: make(chan slog.SinkEntry, 2),
}
logger := slog.Make(sink)
logger = logger.Leveled(slog.LevelDebug)
done := make(chan struct{})
wg := sync.WaitGroup{}
// Create a test handler to simulate a WebSocket connection
testHandler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
conn, err := websocket.Accept(rw, r, nil)
if !assert.NoError(t, err, "failed to accept websocket") {
return
}
defer conn.Close(websocket.StatusGoingAway, "")
requestLgr := RequestLoggerFromContext(r.Context())
requestLgr.WriteLog(r.Context(), http.StatusSwitchingProtocols)
// Block so we can be sure the end of the middleware isn't being called.
wg.Wait()
})
// Wrap the test handler with the Logger middleware
loggerMiddleware := Logger(logger)
wrappedHandler := loggerMiddleware(testHandler)
// RequestLogger expects the ResponseWriter to be *tracing.StatusWriter
customHandler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
defer close(done)
sw := &tracing.StatusWriter{ResponseWriter: rw}
wrappedHandler.ServeHTTP(sw, r)
})
srv := httptest.NewServer(customHandler)
defer srv.Close()
wg.Add(1)
// nolint: bodyclose
conn, _, err := websocket.Dial(ctx, srv.URL, nil)
require.NoError(t, err, "failed to dial WebSocket")
defer conn.Close(websocket.StatusNormalClosure, "")
// Wait for the log from within the handler
newEntry := testutil.RequireRecvCtx(ctx, t, sink.newEntries)
require.Equal(t, newEntry.Message, "GET")
// Signal the websocket handler to return (and read to handle the close frame)
wg.Done()
_, _, err = conn.Read(ctx)
require.ErrorAs(t, err, &websocket.CloseError{}, "websocket read should fail with close error")
// Wait for the request to finish completely and verify we only logged once
_ = testutil.RequireRecvCtx(ctx, t, done)
require.Len(t, sink.entries, 1, "log was written twice")
}
func TestRequestLogger_HTTPRouteParams(t *testing.T) {
t.Parallel()
sink := &fakeSink{}
logger := slog.Make(sink)
logger = logger.Leveled(slog.LevelDebug)
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel()
chiCtx := chi.NewRouteContext()
chiCtx.URLParams.Add("workspace", "test-workspace")
chiCtx.URLParams.Add("agent", "test-agent")
ctx = context.WithValue(ctx, chi.RouteCtxKey, chiCtx)
// Create a test handler to simulate an HTTP request
testHandler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write([]byte("OK"))
})
// Wrap the test handler with the Logger middleware
loggerMiddleware := Logger(logger)
wrappedHandler := loggerMiddleware(testHandler)
// Create a test HTTP request
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "/test-path/}", nil)
require.NoError(t, err, "failed to create request")
sw := &tracing.StatusWriter{ResponseWriter: httptest.NewRecorder()}
// Serve the request
wrappedHandler.ServeHTTP(sw, req)
fieldsMap := make(map[string]any)
for _, field := range sink.entries[0].Fields {
fieldsMap[field.Name] = field.Value
}
// Check that the log contains the expected fields
requiredFields := []string{"workspace", "agent"}
for _, field := range requiredFields {
_, exists := fieldsMap["params_"+field]
require.True(t, exists, "field %q is missing in log fields", field)
}
}
func TestRequestLogger_RouteParamsLogging(t *testing.T) {
t.Parallel()
tests := []struct {
name string
params map[string]string
expectedFields []string
}{
{
name: "EmptyParams",
params: map[string]string{},
expectedFields: []string{},
},
{
name: "SingleParam",
params: map[string]string{
"workspace": "test-workspace",
},
expectedFields: []string{"params_workspace"},
},
{
name: "MultipleParams",
params: map[string]string{
"workspace": "test-workspace",
"agent": "test-agent",
"user": "test-user",
},
expectedFields: []string{"params_workspace", "params_agent", "params_user"},
},
{
name: "EmptyValueParam",
params: map[string]string{
"workspace": "test-workspace",
"agent": "",
},
expectedFields: []string{"params_workspace"},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
sink := &fakeSink{}
logger := slog.Make(sink)
logger = logger.Leveled(slog.LevelDebug)
// Create a route context with the test parameters
chiCtx := chi.NewRouteContext()
for key, value := range tt.params {
chiCtx.URLParams.Add(key, value)
}
ctx := context.WithValue(context.Background(), chi.RouteCtxKey, chiCtx)
logCtx := NewRequestLogger(logger, "GET", time.Now())
// Write the log
logCtx.WriteLog(ctx, http.StatusOK)
require.Len(t, sink.entries, 1, "expected exactly one log entry")
// Convert fields to map for easier checking
fieldsMap := make(map[string]any)
for _, field := range sink.entries[0].Fields {
fieldsMap[field.Name] = field.Value
}
// Verify expected fields are present
for _, field := range tt.expectedFields {
value, exists := fieldsMap[field]
require.True(t, exists, "field %q should be present in log", field)
require.Equal(t, tt.params[strings.TrimPrefix(field, "params_")], value, "field %q has incorrect value", field)
}
// Verify no unexpected fields are present
for field := range fieldsMap {
if field == "took" || field == "status_code" || field == "latency_ms" {
continue // Skip standard fields
}
require.True(t, slices.Contains(tt.expectedFields, field), "unexpected field %q in log", field)
}
})
}
}
type fakeSink struct {
entries []slog.SinkEntry
newEntries chan slog.SinkEntry
}
func (s *fakeSink) LogEntry(_ context.Context, e slog.SinkEntry) {
s.entries = append(s.entries, e)
if s.newEntries != nil {
select {
case s.newEntries <- e:
default:
}
}
}
func (*fakeSink) Sync() {}
@@ -0,0 +1,83 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/coder/coder/v2/coderd/httpmw/loggermw (interfaces: RequestLogger)
//
// Generated by this command:
//
// mockgen -destination=loggermock/loggermock.go -package=loggermock . RequestLogger
//
// Package loggermock is a generated GoMock package.
package loggermock
import (
context "context"
reflect "reflect"
slog "cdr.dev/slog"
rbac "github.com/coder/coder/v2/coderd/rbac"
gomock "go.uber.org/mock/gomock"
)
// MockRequestLogger is a mock of RequestLogger interface.
type MockRequestLogger struct {
ctrl *gomock.Controller
recorder *MockRequestLoggerMockRecorder
isgomock struct{}
}
// MockRequestLoggerMockRecorder is the mock recorder for MockRequestLogger.
type MockRequestLoggerMockRecorder struct {
mock *MockRequestLogger
}
// NewMockRequestLogger creates a new mock instance.
func NewMockRequestLogger(ctrl *gomock.Controller) *MockRequestLogger {
mock := &MockRequestLogger{ctrl: ctrl}
mock.recorder = &MockRequestLoggerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockRequestLogger) EXPECT() *MockRequestLoggerMockRecorder {
return m.recorder
}
// WithAuthContext mocks base method.
func (m *MockRequestLogger) WithAuthContext(actor rbac.Subject) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "WithAuthContext", actor)
}
// WithAuthContext indicates an expected call of WithAuthContext.
func (mr *MockRequestLoggerMockRecorder) WithAuthContext(actor any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithAuthContext", reflect.TypeOf((*MockRequestLogger)(nil).WithAuthContext), actor)
}
// WithFields mocks base method.
func (m *MockRequestLogger) WithFields(fields ...slog.Field) {
m.ctrl.T.Helper()
varargs := []any{}
for _, a := range fields {
varargs = append(varargs, a)
}
m.ctrl.Call(m, "WithFields", varargs...)
}
// WithFields indicates an expected call of WithFields.
func (mr *MockRequestLoggerMockRecorder) WithFields(fields ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithFields", reflect.TypeOf((*MockRequestLogger)(nil).WithFields), fields...)
}
// WriteLog mocks base method.
func (m *MockRequestLogger) WriteLog(ctx context.Context, status int) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "WriteLog", ctx, status)
}
// WriteLog indicates an expected call of WriteLog.
func (mr *MockRequestLoggerMockRecorder) WriteLog(ctx, status any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteLog", reflect.TypeOf((*MockRequestLogger)(nil).WriteLog), ctx, status)
}
+42 -10
View File
@@ -3,6 +3,7 @@ package httpmw
import (
"net/http"
"strconv"
"strings"
"time"
"github.com/go-chi/chi/v5"
@@ -22,18 +23,18 @@ func Prometheus(register prometheus.Registerer) func(http.Handler) http.Handler
Name: "requests_processed_total",
Help: "The total number of processed API requests",
}, []string{"code", "method", "path"})
requestsConcurrent := factory.NewGauge(prometheus.GaugeOpts{
requestsConcurrent := factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "coderd",
Subsystem: "api",
Name: "concurrent_requests",
Help: "The number of concurrent API requests.",
})
websocketsConcurrent := factory.NewGauge(prometheus.GaugeOpts{
}, []string{"method", "path"})
websocketsConcurrent := factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "coderd",
Subsystem: "api",
Name: "concurrent_websockets",
Help: "The total number of concurrent API websockets.",
})
}, []string{"path"})
websocketsDist := factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "coderd",
Subsystem: "api",
@@ -61,7 +62,6 @@ func Prometheus(register prometheus.Registerer) func(http.Handler) http.Handler
var (
start = time.Now()
method = r.Method
rctx = chi.RouteContext(r.Context())
)
sw, ok := w.(*tracing.StatusWriter)
@@ -72,16 +72,18 @@ func Prometheus(register prometheus.Registerer) func(http.Handler) http.Handler
var (
dist *prometheus.HistogramVec
distOpts []string
path = getRoutePattern(r)
)
// We want to count WebSockets separately.
if httpapi.IsWebsocketUpgrade(r) {
websocketsConcurrent.Inc()
defer websocketsConcurrent.Dec()
websocketsConcurrent.WithLabelValues(path).Inc()
defer websocketsConcurrent.WithLabelValues(path).Dec()
dist = websocketsDist
} else {
requestsConcurrent.Inc()
defer requestsConcurrent.Dec()
requestsConcurrent.WithLabelValues(method, path).Inc()
defer requestsConcurrent.WithLabelValues(method, path).Dec()
dist = requestsDist
distOpts = []string{method}
@@ -89,7 +91,6 @@ func Prometheus(register prometheus.Registerer) func(http.Handler) http.Handler
next.ServeHTTP(w, r)
path := rctx.RoutePattern()
distOpts = append(distOpts, path)
statusStr := strconv.Itoa(sw.Status)
@@ -98,3 +99,34 @@ func Prometheus(register prometheus.Registerer) func(http.Handler) http.Handler
})
}
}
func getRoutePattern(r *http.Request) string {
rctx := chi.RouteContext(r.Context())
if rctx == nil {
return ""
}
if pattern := rctx.RoutePattern(); pattern != "" {
// Pattern is already available
return pattern
}
routePath := r.URL.Path
if r.URL.RawPath != "" {
routePath = r.URL.RawPath
}
tctx := chi.NewRouteContext()
routes := rctx.Routes
if routes != nil && !routes.Match(tctx, r.Method, routePath) {
// No matching pattern. /api/* requests will be matched as "UNKNOWN"
// All other ones will be matched as "STATIC".
if strings.HasPrefix(routePath, "/api/") {
return "UNKNOWN"
}
return "STATIC"
}
// tctx has the updated pattern, since Match mutates it
return tctx.RoutePattern()
}
+91
View File
@@ -8,14 +8,19 @@ import (
"github.com/go-chi/chi/v5"
"github.com/prometheus/client_golang/prometheus"
cm "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/tracing"
"github.com/coder/coder/v2/testutil"
"github.com/coder/websocket"
)
func TestPrometheus(t *testing.T) {
t.Parallel()
t.Run("All", func(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", "/", nil)
@@ -29,4 +34,90 @@ func TestPrometheus(t *testing.T) {
require.NoError(t, err)
require.Greater(t, len(metrics), 0)
})
t.Run("Concurrent", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel()
reg := prometheus.NewRegistry()
promMW := httpmw.Prometheus(reg)
// Create a test handler to simulate a WebSocket connection
testHandler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
conn, err := websocket.Accept(rw, r, nil)
if !assert.NoError(t, err, "failed to accept websocket") {
return
}
defer conn.Close(websocket.StatusGoingAway, "")
})
wrappedHandler := promMW(testHandler)
r := chi.NewRouter()
r.Use(tracing.StatusWriterMiddleware, promMW)
r.Get("/api/v2/build/{build}/logs", func(rw http.ResponseWriter, r *http.Request) {
wrappedHandler.ServeHTTP(rw, r)
})
srv := httptest.NewServer(r)
defer srv.Close()
// nolint: bodyclose
conn, _, err := websocket.Dial(ctx, srv.URL+"/api/v2/build/1/logs", nil)
require.NoError(t, err, "failed to dial WebSocket")
defer conn.Close(websocket.StatusNormalClosure, "")
metrics, err := reg.Gather()
require.NoError(t, err)
require.Greater(t, len(metrics), 0)
metricLabels := getMetricLabels(metrics)
concurrentWebsockets, ok := metricLabels["coderd_api_concurrent_websockets"]
require.True(t, ok, "coderd_api_concurrent_websockets metric not found")
require.Equal(t, "/api/v2/build/{build}/logs", concurrentWebsockets["path"])
})
t.Run("UserRoute", func(t *testing.T) {
t.Parallel()
reg := prometheus.NewRegistry()
promMW := httpmw.Prometheus(reg)
r := chi.NewRouter()
r.With(promMW).Get("/api/v2/users/{user}", func(w http.ResponseWriter, r *http.Request) {})
req := httptest.NewRequest("GET", "/api/v2/users/john", nil)
sw := &tracing.StatusWriter{ResponseWriter: httptest.NewRecorder()}
r.ServeHTTP(sw, req)
metrics, err := reg.Gather()
require.NoError(t, err)
require.Greater(t, len(metrics), 0)
metricLabels := getMetricLabels(metrics)
reqProcessed, ok := metricLabels["coderd_api_requests_processed_total"]
require.True(t, ok, "coderd_api_requests_processed_total metric not found")
require.Equal(t, "/api/v2/users/{user}", reqProcessed["path"])
require.Equal(t, "GET", reqProcessed["method"])
concurrentRequests, ok := metricLabels["coderd_api_concurrent_requests"]
require.True(t, ok, "coderd_api_concurrent_requests metric not found")
require.Equal(t, "/api/v2/users/{user}", concurrentRequests["path"])
require.Equal(t, "GET", concurrentRequests["method"])
})
}
func getMetricLabels(metrics []*cm.MetricFamily) map[string]map[string]string {
metricLabels := map[string]map[string]string{}
for _, metricFamily := range metrics {
metricName := metricFamily.GetName()
metricLabels[metricName] = map[string]string{}
for _, metric := range metricFamily.GetMetric() {
for _, labelPair := range metric.GetLabel() {
metricLabels[metricName][labelPair.GetName()] = labelPair.GetValue()
}
}
}
return metricLabels
}
+11
View File
@@ -6,8 +6,11 @@ import (
"github.com/go-chi/chi/v5"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/codersdk"
)
@@ -81,6 +84,14 @@ func ExtractWorkspaceAgentParam(db database.Store) func(http.Handler) http.Handl
ctx = context.WithValue(ctx, workspaceAgentParamContextKey{}, agent)
chi.RouteContext(ctx).URLParams.Add("workspace", build.WorkspaceID.String())
if rlogger := loggermw.RequestLoggerFromContext(ctx); rlogger != nil {
rlogger.WithFields(
slog.F("workspace_name", resource.Name),
slog.F("agent_name", agent.Name),
)
}
next.ServeHTTP(rw, r.WithContext(ctx))
})
}
+15
View File
@@ -9,8 +9,11 @@ import (
"github.com/go-chi/chi/v5"
"github.com/google/uuid"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/codersdk"
)
@@ -48,6 +51,11 @@ func ExtractWorkspaceParam(db database.Store) func(http.Handler) http.Handler {
}
ctx = context.WithValue(ctx, workspaceParamContextKey{}, workspace)
if rlogger := loggermw.RequestLoggerFromContext(ctx); rlogger != nil {
rlogger.WithFields(slog.F("workspace_name", workspace.Name))
}
next.ServeHTTP(rw, r.WithContext(ctx))
})
}
@@ -154,6 +162,13 @@ func ExtractWorkspaceAndAgentParam(db database.Store) func(http.Handler) http.Ha
ctx = context.WithValue(ctx, workspaceParamContextKey{}, workspace)
ctx = context.WithValue(ctx, workspaceAgentParamContextKey{}, agent)
if rlogger := loggermw.RequestLoggerFromContext(ctx); rlogger != nil {
rlogger.WithFields(
slog.F("workspace_name", workspace.Name),
slog.F("agent_name", agent.Name),
)
}
next.ServeHTTP(rw, r.WithContext(ctx))
})
}
+1 -1
View File
@@ -30,7 +30,7 @@ func (AGPLIDPSync) GroupSyncEntitled() bool {
return false
}
func (s AGPLIDPSync) UpdateGroupSettings(ctx context.Context, orgID uuid.UUID, db database.Store, settings GroupSyncSettings) error {
func (s AGPLIDPSync) UpdateGroupSyncSettings(ctx context.Context, orgID uuid.UUID, db database.Store, settings GroupSyncSettings) error {
orgResolver := s.Manager.OrganizationResolver(db, orgID)
err := s.SyncSettings.Group.SetRuntimeValue(ctx, orgResolver, &settings)
if err != nil {
+6 -3
View File
@@ -26,7 +26,7 @@ import (
type IDPSync interface {
OrganizationSyncEntitled() bool
OrganizationSyncSettings(ctx context.Context, db database.Store) (*OrganizationSyncSettings, error)
UpdateOrganizationSettings(ctx context.Context, db database.Store, settings OrganizationSyncSettings) error
UpdateOrganizationSyncSettings(ctx context.Context, db database.Store, settings OrganizationSyncSettings) error
// OrganizationSyncEnabled returns true if all OIDC users are assigned
// to organizations via org sync settings.
// This is used to know when to disable manual org membership assignment.
@@ -48,7 +48,7 @@ type IDPSync interface {
// on the settings used by IDPSync. This entry is thread safe and can be
// accessed concurrently. The settings are stored in the database.
GroupSyncSettings(ctx context.Context, orgID uuid.UUID, db database.Store) (*GroupSyncSettings, error)
UpdateGroupSettings(ctx context.Context, orgID uuid.UUID, db database.Store, settings GroupSyncSettings) error
UpdateGroupSyncSettings(ctx context.Context, orgID uuid.UUID, db database.Store, settings GroupSyncSettings) error
// RoleSyncEntitled returns true if the deployment is entitled to role syncing.
RoleSyncEntitled() bool
@@ -61,7 +61,7 @@ type IDPSync interface {
// RoleSyncSettings is similar to GroupSyncSettings. See GroupSyncSettings for
// rational.
RoleSyncSettings(ctx context.Context, orgID uuid.UUID, db database.Store) (*RoleSyncSettings, error)
UpdateRoleSettings(ctx context.Context, orgID uuid.UUID, db database.Store, settings RoleSyncSettings) error
UpdateRoleSyncSettings(ctx context.Context, orgID uuid.UUID, db database.Store, settings RoleSyncSettings) error
// ParseRoleClaims takes claims from an OIDC provider, and returns the params
// for role syncing. Most of the logic happens in SyncRoles.
ParseRoleClaims(ctx context.Context, mergedClaims jwt.MapClaims) (RoleParams, *HTTPError)
@@ -70,6 +70,9 @@ type IDPSync interface {
SyncRoles(ctx context.Context, db database.Store, user database.User, params RoleParams) error
}
// AGPLIDPSync implements the IDPSync interface
var _ IDPSync = AGPLIDPSync{}
// AGPLIDPSync is the configuration for syncing user information from an external
// IDP. All related code to syncing user information should be in this package.
type AGPLIDPSync struct {
+3 -1
View File
@@ -34,7 +34,7 @@ func (AGPLIDPSync) OrganizationSyncEnabled(_ context.Context, _ database.Store)
return false
}
func (s AGPLIDPSync) UpdateOrganizationSettings(ctx context.Context, db database.Store, settings OrganizationSyncSettings) error {
func (s AGPLIDPSync) UpdateOrganizationSyncSettings(ctx context.Context, db database.Store, settings OrganizationSyncSettings) error {
rlv := s.Manager.Resolver(db)
err := s.SyncSettings.Organization.SetRuntimeValue(ctx, rlv, &settings)
if err != nil {
@@ -45,6 +45,8 @@ func (s AGPLIDPSync) UpdateOrganizationSettings(ctx context.Context, db database
}
func (s AGPLIDPSync) OrganizationSyncSettings(ctx context.Context, db database.Store) (*OrganizationSyncSettings, error) {
// If this logic is ever updated, make sure to update the corresponding
// checkIDPOrgSync in coderd/telemetry/telemetry.go.
rlv := s.Manager.Resolver(db)
orgSettings, err := s.SyncSettings.Organization.Resolve(ctx, rlv)
if err != nil {
+1 -1
View File
@@ -42,7 +42,7 @@ func (AGPLIDPSync) SiteRoleSyncEnabled() bool {
return false
}
func (s AGPLIDPSync) UpdateRoleSettings(ctx context.Context, orgID uuid.UUID, db database.Store, settings RoleSyncSettings) error {
func (s AGPLIDPSync) UpdateRoleSyncSettings(ctx context.Context, orgID uuid.UUID, db database.Store, settings RoleSyncSettings) error {
orgResolver := s.Manager.OrganizationResolver(db, orgID)
err := s.SyncSettings.Role.SetRuntimeValue(ctx, orgResolver, &settings)
if err != nil {
+4
View File
@@ -20,6 +20,7 @@ import (
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/rbac/policy"
"github.com/coder/coder/v2/coderd/util/slice"
@@ -536,6 +537,9 @@ func (f *logFollower) follow() {
return
}
// Log the request immediately instead of after it completes.
loggermw.RequestLoggerFromContext(f.ctx).WriteLog(f.ctx, http.StatusAccepted)
// no need to wait if the job is done
if f.complete {
return
+7
View File
@@ -19,6 +19,8 @@ import (
"github.com/coder/coder/v2/coderd/database/dbmock"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/database/pubsub"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/coderd/httpmw/loggermw/loggermock"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/provisionersdk"
"github.com/coder/coder/v2/testutil"
@@ -305,11 +307,16 @@ func Test_logFollower_EndOfLogs(t *testing.T) {
JobStatus: database.ProvisionerJobStatusRunning,
}
mockLogger := loggermock.NewMockRequestLogger(ctrl)
mockLogger.EXPECT().WriteLog(gomock.Any(), http.StatusAccepted).Times(1)
ctx = loggermw.WithRequestLogger(ctx, mockLogger)
// we need an HTTP server to get a websocket
srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
uut := newLogFollower(ctx, logger, mDB, ps, rw, r, job, 0)
uut.follow()
}))
defer srv.Close()
// job was incomplete when we create the logFollower, and still incomplete when it queries
+25
View File
@@ -57,6 +57,23 @@ func hashAuthorizeCall(actor Subject, action policy.Action, object Object) [32]b
return hashOut
}
// SubjectType represents the type of subject in the RBAC system.
type SubjectType string
const (
SubjectTypeUser SubjectType = "user"
SubjectTypeProvisionerd SubjectType = "provisionerd"
SubjectTypeAutostart SubjectType = "autostart"
SubjectTypeHangDetector SubjectType = "hang_detector"
SubjectTypeResourceMonitor SubjectType = "resource_monitor"
SubjectTypeCryptoKeyRotator SubjectType = "crypto_key_rotator"
SubjectTypeCryptoKeyReader SubjectType = "crypto_key_reader"
SubjectTypePrebuildsOrchestrator SubjectType = "prebuilds_orchestrator"
SubjectTypeSystemReadProvisionerDaemons SubjectType = "system_read_provisioner_daemons"
SubjectTypeSystemRestricted SubjectType = "system_restricted"
SubjectTypeNotifier SubjectType = "notifier"
)
// Subject is a struct that contains all the elements of a subject in an rbac
// authorize.
type Subject struct {
@@ -66,6 +83,14 @@ type Subject struct {
// external workspace proxy or other service type actor.
FriendlyName string
// Email is entirely optional and is used for logging and debugging
// It is not used in any functional way.
Email string
// Type indicates what kind of subject this is (user, system, provisioner, etc.)
// It is not used in any functional way, only for logging.
Type SubjectType
ID string
Roles ExpandableRoles
Groups []string
+6
View File
@@ -12,6 +12,9 @@ import (
"github.com/coder/coder/v2/coderd/database"
)
// NoopResolver implements the Resolver interface
var _ Resolver = &NoopResolver{}
// NoopResolver is a useful test device.
type NoopResolver struct{}
@@ -31,6 +34,9 @@ func (NoopResolver) DeleteRuntimeConfig(context.Context, string) error {
return ErrEntryNotFound
}
// StoreResolver implements the Resolver interface
var _ Resolver = &StoreResolver{}
// StoreResolver uses the database as the underlying store for runtime settings.
type StoreResolver struct {
db Store
+202 -9
View File
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"crypto/sha256"
"database/sql"
"encoding/json"
"errors"
"fmt"
@@ -14,6 +15,7 @@ import (
"regexp"
"runtime"
"slices"
"strconv"
"strings"
"sync"
"time"
@@ -41,6 +43,7 @@ const (
)
type Options struct {
Disabled bool
Database database.Store
Logger slog.Logger
// URL is an endpoint to direct telemetry towards!
@@ -115,8 +118,8 @@ type remoteReporter struct {
shutdownAt *time.Time
}
func (*remoteReporter) Enabled() bool {
return true
func (r *remoteReporter) Enabled() bool {
return !r.options.Disabled
}
func (r *remoteReporter) Report(snapshot *Snapshot) {
@@ -160,10 +163,12 @@ func (r *remoteReporter) Close() {
close(r.closed)
now := dbtime.Now()
r.shutdownAt = &now
// Report a final collection of telemetry prior to close!
// This could indicate final actions a user has taken, and
// the time the deployment was shutdown.
r.reportWithDeployment()
if r.Enabled() {
// Report a final collection of telemetry prior to close!
// This could indicate final actions a user has taken, and
// the time the deployment was shutdown.
r.reportWithDeployment()
}
r.closeFunc()
}
@@ -176,7 +181,74 @@ func (r *remoteReporter) isClosed() bool {
}
}
// See the corresponding test in telemetry_test.go for a truth table.
func ShouldReportTelemetryDisabled(recordedTelemetryEnabled *bool, telemetryEnabled bool) bool {
return recordedTelemetryEnabled != nil && *recordedTelemetryEnabled && !telemetryEnabled
}
// RecordTelemetryStatus records the telemetry status in the database.
// If the status changed from enabled to disabled, returns a snapshot to
// be sent to the telemetry server.
func RecordTelemetryStatus( //nolint:revive
ctx context.Context,
logger slog.Logger,
db database.Store,
telemetryEnabled bool,
) (*Snapshot, error) {
item, err := db.GetTelemetryItem(ctx, string(TelemetryItemKeyTelemetryEnabled))
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, xerrors.Errorf("get telemetry enabled: %w", err)
}
var recordedTelemetryEnabled *bool
if !errors.Is(err, sql.ErrNoRows) {
value, err := strconv.ParseBool(item.Value)
if err != nil {
logger.Debug(ctx, "parse telemetry enabled", slog.Error(err))
}
// If ParseBool fails, value will default to false.
// This may happen if an admin manually edits the telemetry item
// in the database.
recordedTelemetryEnabled = &value
}
if err := db.UpsertTelemetryItem(ctx, database.UpsertTelemetryItemParams{
Key: string(TelemetryItemKeyTelemetryEnabled),
Value: strconv.FormatBool(telemetryEnabled),
}); err != nil {
return nil, xerrors.Errorf("upsert telemetry enabled: %w", err)
}
shouldReport := ShouldReportTelemetryDisabled(recordedTelemetryEnabled, telemetryEnabled)
if !shouldReport {
return nil, nil //nolint:nilnil
}
// If any of the following calls fail, we will never report that telemetry changed
// from enabled to disabled. This is okay. We only want to ping the telemetry server
// once, and never again. If that attempt fails, so be it.
item, err = db.GetTelemetryItem(ctx, string(TelemetryItemKeyTelemetryEnabled))
if err != nil {
return nil, xerrors.Errorf("get telemetry enabled after upsert: %w", err)
}
return &Snapshot{
TelemetryItems: []TelemetryItem{
ConvertTelemetryItem(item),
},
}, nil
}
func (r *remoteReporter) runSnapshotter() {
telemetryDisabledSnapshot, err := RecordTelemetryStatus(r.ctx, r.options.Logger, r.options.Database, r.Enabled())
if err != nil {
r.options.Logger.Debug(r.ctx, "record and maybe report telemetry status", slog.Error(err))
}
if telemetryDisabledSnapshot != nil {
r.reportSync(telemetryDisabledSnapshot)
}
r.options.Logger.Debug(r.ctx, "finished telemetry status check")
if !r.Enabled() {
return
}
first := true
ticker := time.NewTicker(r.options.SnapshotFrequency)
defer ticker.Stop()
@@ -244,6 +316,11 @@ func (r *remoteReporter) deployment() error {
return xerrors.Errorf("install source must be <=64 chars: %s", installSource)
}
idpOrgSync, err := checkIDPOrgSync(r.ctx, r.options.Database, r.options.DeploymentConfig)
if err != nil {
r.options.Logger.Debug(r.ctx, "check IDP org sync", slog.Error(err))
}
data, err := json.Marshal(&Deployment{
ID: r.options.DeploymentID,
Architecture: sysInfo.Architecture,
@@ -263,6 +340,7 @@ func (r *remoteReporter) deployment() error {
MachineID: sysInfo.UniqueID,
StartedAt: r.startedAt,
ShutdownAt: r.shutdownAt,
IDPOrgSync: &idpOrgSync,
})
if err != nil {
return xerrors.Errorf("marshal deployment: %w", err)
@@ -284,6 +362,45 @@ func (r *remoteReporter) deployment() error {
return nil
}
// idpOrgSyncConfig is a subset of
// https://github.com/coder/coder/blob/5c6578d84e2940b9cfd04798c45e7c8042c3fe0e/coderd/idpsync/organization.go#L148
type idpOrgSyncConfig struct {
Field string `json:"field"`
}
// checkIDPOrgSync inspects the server flags and the runtime config. It's based on
// the OrganizationSyncEnabled function from enterprise/coderd/enidpsync/organizations.go.
// It has one distinct difference: it doesn't check if the license entitles to the
// feature, it only checks if the feature is configured.
//
// The above function is not used because it's very hard to make it available in
// the telemetry package due to coder/coder package structure and initialization
// order of the coder server.
//
// We don't check license entitlements because it's also hard to do from the
// telemetry package, and the config check should be sufficient for telemetry purposes.
//
// While this approach duplicates code, it's simpler than the alternative.
//
// See https://github.com/coder/coder/pull/16323 for more details.
func checkIDPOrgSync(ctx context.Context, db database.Store, values *codersdk.DeploymentValues) (bool, error) {
// key based on https://github.com/coder/coder/blob/5c6578d84e2940b9cfd04798c45e7c8042c3fe0e/coderd/idpsync/idpsync.go#L168
syncConfigRaw, err := db.GetRuntimeConfig(ctx, "organization-sync-settings")
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
// If the runtime config is not set, we check if the deployment config
// has the organization field set.
return values != nil && values.OIDC.OrganizationField != "", nil
}
return false, xerrors.Errorf("get runtime config: %w", err)
}
syncConfig := idpOrgSyncConfig{}
if err := json.Unmarshal([]byte(syncConfigRaw), &syncConfig); err != nil {
return false, xerrors.Errorf("unmarshal runtime config: %w", err)
}
return syncConfig.Field != "", nil
}
// createSnapshot collects a full snapshot from the database.
func (r *remoteReporter) createSnapshot() (*Snapshot, error) {
var (
@@ -518,6 +635,32 @@ func (r *remoteReporter) createSnapshot() (*Snapshot, error) {
}
return nil
})
eg.Go(func() error {
// Warning: When an organization is deleted, it's completely removed from
// the database. It will no longer be reported, and there will be no other
// indicator that it was deleted. This requires special handling when
// interpreting the telemetry data later.
orgs, err := r.options.Database.GetOrganizations(r.ctx, database.GetOrganizationsParams{})
if err != nil {
return xerrors.Errorf("get organizations: %w", err)
}
snapshot.Organizations = make([]Organization, 0, len(orgs))
for _, org := range orgs {
snapshot.Organizations = append(snapshot.Organizations, ConvertOrganization(org))
}
return nil
})
eg.Go(func() error {
items, err := r.options.Database.GetTelemetryItems(ctx)
if err != nil {
return xerrors.Errorf("get telemetry items: %w", err)
}
snapshot.TelemetryItems = make([]TelemetryItem, 0, len(items))
for _, item := range items {
snapshot.TelemetryItems = append(snapshot.TelemetryItems, ConvertTelemetryItem(item))
}
return nil
})
err := eg.Wait()
if err != nil {
@@ -916,6 +1059,23 @@ func ConvertExternalProvisioner(id uuid.UUID, tags map[string]string, provisione
}
}
func ConvertOrganization(org database.Organization) Organization {
return Organization{
ID: org.ID,
CreatedAt: org.CreatedAt,
IsDefault: org.IsDefault,
}
}
func ConvertTelemetryItem(item database.TelemetryItem) TelemetryItem {
return TelemetryItem{
Key: item.Key,
Value: item.Value,
CreatedAt: item.CreatedAt,
UpdatedAt: item.UpdatedAt,
}
}
// Snapshot represents a point-in-time anonymized database dump.
// Data is aggregated by latest on the server-side, so partial data
// can be sent without issue.
@@ -942,6 +1102,8 @@ type Snapshot struct {
WorkspaceModules []WorkspaceModule `json:"workspace_modules"`
Workspaces []Workspace `json:"workspaces"`
NetworkEvents []NetworkEvent `json:"network_events"`
Organizations []Organization `json:"organizations"`
TelemetryItems []TelemetryItem `json:"telemetry_items"`
}
// Deployment contains information about the host running Coder.
@@ -964,6 +1126,9 @@ type Deployment struct {
MachineID string `json:"machine_id"`
StartedAt time.Time `json:"started_at"`
ShutdownAt *time.Time `json:"shutdown_at"`
// While IDPOrgSync will always be set, it's nullable to make
// the struct backwards compatible with older coder versions.
IDPOrgSync *bool `json:"idp_org_sync"`
}
type APIKey struct {
@@ -1457,8 +1622,36 @@ func NetworkEventFromProto(proto *tailnetproto.TelemetryEvent) (NetworkEvent, er
}, nil
}
type Organization struct {
ID uuid.UUID `json:"id"`
IsDefault bool `json:"is_default"`
CreatedAt time.Time `json:"created_at"`
}
type telemetryItemKey string
// The comment below gets rid of the warning that the name "TelemetryItemKey" has
// the "Telemetry" prefix, and that stutters when you use it outside the package
// (telemetry.TelemetryItemKey...). "TelemetryItem" is the name of a database table,
// so it makes sense to use the "Telemetry" prefix.
//
//revive:disable:exported
const (
TelemetryItemKeyHTMLFirstServedAt telemetryItemKey = "html_first_served_at"
TelemetryItemKeyTelemetryEnabled telemetryItemKey = "telemetry_enabled"
)
type TelemetryItem struct {
Key string `json:"key"`
Value string `json:"value"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
type noopReporter struct{}
func (*noopReporter) Report(_ *Snapshot) {}
func (*noopReporter) Enabled() bool { return false }
func (*noopReporter) Close() {}
func (*noopReporter) Report(_ *Snapshot) {}
func (*noopReporter) Enabled() bool { return false }
func (*noopReporter) Close() {}
func (*noopReporter) RunSnapshotter() {}
func (*noopReporter) ReportDisabledIfNeeded() error { return nil }
+200 -10
View File
@@ -22,7 +22,10 @@ import (
"github.com/coder/coder/v2/coderd/database/dbmem"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/idpsync"
"github.com/coder/coder/v2/coderd/runtimeconfig"
"github.com/coder/coder/v2/coderd/telemetry"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/testutil"
)
@@ -40,27 +43,42 @@ func TestTelemetry(t *testing.T) {
db := dbmem.New()
ctx := testutil.Context(t, testutil.WaitMedium)
org, err := db.GetDefaultOrganization(ctx)
require.NoError(t, err)
_, _ = dbgen.APIKey(t, db, database.APIKey{})
_ = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
Provisioner: database.ProvisionerTypeTerraform,
StorageMethod: database.ProvisionerStorageMethodFile,
Type: database.ProvisionerJobTypeTemplateVersionDryRun,
Provisioner: database.ProvisionerTypeTerraform,
StorageMethod: database.ProvisionerStorageMethodFile,
Type: database.ProvisionerJobTypeTemplateVersionDryRun,
OrganizationID: org.ID,
})
_ = dbgen.Template(t, db, database.Template{
Provisioner: database.ProvisionerTypeTerraform,
Provisioner: database.ProvisionerTypeTerraform,
OrganizationID: org.ID,
})
sourceExampleID := uuid.NewString()
_ = dbgen.TemplateVersion(t, db, database.TemplateVersion{
SourceExampleID: sql.NullString{String: sourceExampleID, Valid: true},
OrganizationID: org.ID,
})
_ = dbgen.TemplateVersion(t, db, database.TemplateVersion{
OrganizationID: org.ID,
})
_ = dbgen.TemplateVersion(t, db, database.TemplateVersion{})
user := dbgen.User(t, db, database.User{})
_ = dbgen.Workspace(t, db, database.WorkspaceTable{})
_ = dbgen.Workspace(t, db, database.WorkspaceTable{
OrganizationID: org.ID,
})
_ = dbgen.WorkspaceApp(t, db, database.WorkspaceApp{
SharingLevel: database.AppSharingLevelOwner,
Health: database.WorkspaceAppHealthDisabled,
OpenIn: database.WorkspaceAppOpenInSlimWindow,
})
_ = dbgen.TelemetryItem(t, db, database.TelemetryItem{
Key: string(telemetry.TelemetryItemKeyHTMLFirstServedAt),
Value: time.Now().Format(time.RFC3339),
})
group := dbgen.Group(t, db, database.Group{})
_ = dbgen.GroupMember(t, db, database.GroupMemberTable{UserID: user.ID, GroupID: group.ID})
wsagent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{})
@@ -112,7 +130,9 @@ func TestTelemetry(t *testing.T) {
require.Len(t, snapshot.WorkspaceAgentStats, 1)
require.Len(t, snapshot.WorkspaceProxies, 1)
require.Len(t, snapshot.WorkspaceModules, 1)
require.Len(t, snapshot.Organizations, 1)
// We create one item manually above. The other is TelemetryEnabled, created by the snapshotter.
require.Len(t, snapshot.TelemetryItems, 2)
wsa := snapshot.WorkspaceAgents[0]
require.Len(t, wsa.Subsystems, 2)
require.Equal(t, string(database.WorkspaceAgentSubsystemEnvbox), wsa.Subsystems[0])
@@ -128,6 +148,19 @@ func TestTelemetry(t *testing.T) {
})
require.Equal(t, tvs[0].SourceExampleID, &sourceExampleID)
require.Nil(t, tvs[1].SourceExampleID)
for _, entity := range snapshot.Workspaces {
require.Equal(t, entity.OrganizationID, org.ID)
}
for _, entity := range snapshot.ProvisionerJobs {
require.Equal(t, entity.OrganizationID, org.ID)
}
for _, entity := range snapshot.TemplateVersions {
require.Equal(t, entity.OrganizationID, org.ID)
}
for _, entity := range snapshot.Templates {
require.Equal(t, entity.OrganizationID, org.ID)
}
})
t.Run("HashedEmail", func(t *testing.T) {
t.Parallel()
@@ -243,6 +276,41 @@ func TestTelemetry(t *testing.T) {
require.Equal(t, c.want, telemetry.GetModuleSourceType(c.source))
}
})
t.Run("IDPOrgSync", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitMedium)
db, _ := dbtestutil.NewDB(t)
// 1. No org sync settings
deployment, _ := collectSnapshot(t, db, nil)
require.False(t, *deployment.IDPOrgSync)
// 2. Org sync settings set in server flags
deployment, _ = collectSnapshot(t, db, func(opts telemetry.Options) telemetry.Options {
opts.DeploymentConfig = &codersdk.DeploymentValues{
OIDC: codersdk.OIDCConfig{
OrganizationField: "organizations",
},
}
return opts
})
require.True(t, *deployment.IDPOrgSync)
// 3. Org sync settings set in runtime config
org, err := db.GetDefaultOrganization(ctx)
require.NoError(t, err)
sync := idpsync.NewAGPLSync(testutil.Logger(t), runtimeconfig.NewManager(), idpsync.DeploymentSyncSettings{})
err = sync.UpdateOrganizationSyncSettings(ctx, db, idpsync.OrganizationSyncSettings{
Field: "organizations",
Mapping: map[string][]uuid.UUID{
"first": {org.ID},
},
AssignDefault: true,
})
require.NoError(t, err)
deployment, _ = collectSnapshot(t, db, nil)
require.True(t, *deployment.IDPOrgSync)
})
}
// nolint:paralleltest
@@ -253,31 +321,153 @@ func TestTelemetryInstallSource(t *testing.T) {
require.Equal(t, "aws_marketplace", deployment.InstallSource)
}
func collectSnapshot(t *testing.T, db database.Store, addOptionsFn func(opts telemetry.Options) telemetry.Options) (*telemetry.Deployment, *telemetry.Snapshot) {
func TestTelemetryItem(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitMedium)
db, _ := dbtestutil.NewDB(t)
key := testutil.GetRandomName(t)
value := time.Now().Format(time.RFC3339)
err := db.InsertTelemetryItemIfNotExists(ctx, database.InsertTelemetryItemIfNotExistsParams{
Key: key,
Value: value,
})
require.NoError(t, err)
item, err := db.GetTelemetryItem(ctx, key)
require.NoError(t, err)
require.Equal(t, item.Key, key)
require.Equal(t, item.Value, value)
// Inserting a new value should not update the existing value
err = db.InsertTelemetryItemIfNotExists(ctx, database.InsertTelemetryItemIfNotExistsParams{
Key: key,
Value: "new_value",
})
require.NoError(t, err)
item, err = db.GetTelemetryItem(ctx, key)
require.NoError(t, err)
require.Equal(t, item.Value, value)
// Upserting a new value should update the existing value
err = db.UpsertTelemetryItem(ctx, database.UpsertTelemetryItemParams{
Key: key,
Value: "new_value",
})
require.NoError(t, err)
item, err = db.GetTelemetryItem(ctx, key)
require.NoError(t, err)
require.Equal(t, item.Value, "new_value")
}
func TestShouldReportTelemetryDisabled(t *testing.T) {
t.Parallel()
// Description | telemetryEnabled (db) | telemetryEnabled (is) | Report Telemetry Disabled |
//----------------------------------------|-----------------------|-----------------------|---------------------------|
// New deployment | <null> | true | No |
// New deployment with telemetry disabled | <null> | false | No |
// Telemetry was enabled, and still is | true | true | No |
// Telemetry was enabled but now disabled | true | false | Yes |
// Telemetry was disabled, now is enabled | false | true | No |
// Telemetry was disabled, still disabled | false | false | No |
boolTrue := true
boolFalse := false
require.False(t, telemetry.ShouldReportTelemetryDisabled(nil, true))
require.False(t, telemetry.ShouldReportTelemetryDisabled(nil, false))
require.False(t, telemetry.ShouldReportTelemetryDisabled(&boolTrue, true))
require.True(t, telemetry.ShouldReportTelemetryDisabled(&boolTrue, false))
require.False(t, telemetry.ShouldReportTelemetryDisabled(&boolFalse, true))
require.False(t, telemetry.ShouldReportTelemetryDisabled(&boolFalse, false))
}
func TestRecordTelemetryStatus(t *testing.T) {
t.Parallel()
for _, testCase := range []struct {
name string
recordedTelemetryEnabled string
telemetryEnabled bool
shouldReport bool
}{
{name: "New deployment", recordedTelemetryEnabled: "nil", telemetryEnabled: true, shouldReport: false},
{name: "Telemetry disabled", recordedTelemetryEnabled: "nil", telemetryEnabled: false, shouldReport: false},
{name: "Telemetry was enabled and still is", recordedTelemetryEnabled: "true", telemetryEnabled: true, shouldReport: false},
{name: "Telemetry was enabled but now disabled", recordedTelemetryEnabled: "true", telemetryEnabled: false, shouldReport: true},
{name: "Telemetry was disabled now is enabled", recordedTelemetryEnabled: "false", telemetryEnabled: true, shouldReport: false},
{name: "Telemetry was disabled still disabled", recordedTelemetryEnabled: "false", telemetryEnabled: false, shouldReport: false},
{name: "Telemetry was disabled still disabled, invalid value", recordedTelemetryEnabled: "invalid", telemetryEnabled: false, shouldReport: false},
} {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
db, _ := dbtestutil.NewDB(t)
ctx := testutil.Context(t, testutil.WaitMedium)
logger := testutil.Logger(t)
if testCase.recordedTelemetryEnabled != "nil" {
db.UpsertTelemetryItem(ctx, database.UpsertTelemetryItemParams{
Key: string(telemetry.TelemetryItemKeyTelemetryEnabled),
Value: testCase.recordedTelemetryEnabled,
})
}
snapshot1, err := telemetry.RecordTelemetryStatus(ctx, logger, db, testCase.telemetryEnabled)
require.NoError(t, err)
if testCase.shouldReport {
require.NotNil(t, snapshot1)
require.Equal(t, snapshot1.TelemetryItems[0].Key, string(telemetry.TelemetryItemKeyTelemetryEnabled))
require.Equal(t, snapshot1.TelemetryItems[0].Value, "false")
} else {
require.Nil(t, snapshot1)
}
for i := 0; i < 3; i++ {
// Whatever happens, subsequent calls should not report if telemetryEnabled didn't change
snapshot2, err := telemetry.RecordTelemetryStatus(ctx, logger, db, testCase.telemetryEnabled)
require.NoError(t, err)
require.Nil(t, snapshot2)
}
})
}
}
func mockTelemetryServer(t *testing.T) (*url.URL, chan *telemetry.Deployment, chan *telemetry.Snapshot) {
t.Helper()
deployment := make(chan *telemetry.Deployment, 64)
snapshot := make(chan *telemetry.Snapshot, 64)
r := chi.NewRouter()
r.Post("/deployment", func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, buildinfo.Version(), r.Header.Get(telemetry.VersionHeader))
w.WriteHeader(http.StatusAccepted)
dd := &telemetry.Deployment{}
err := json.NewDecoder(r.Body).Decode(dd)
require.NoError(t, err)
deployment <- dd
// Ensure the header is sent only after deployment is sent
w.WriteHeader(http.StatusAccepted)
})
r.Post("/snapshot", func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, buildinfo.Version(), r.Header.Get(telemetry.VersionHeader))
w.WriteHeader(http.StatusAccepted)
ss := &telemetry.Snapshot{}
err := json.NewDecoder(r.Body).Decode(ss)
require.NoError(t, err)
snapshot <- ss
// Ensure the header is sent only after snapshot is sent
w.WriteHeader(http.StatusAccepted)
})
server := httptest.NewServer(r)
t.Cleanup(server.Close)
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
return serverURL, deployment, snapshot
}
func collectSnapshot(t *testing.T, db database.Store, addOptionsFn func(opts telemetry.Options) telemetry.Options) (*telemetry.Deployment, *telemetry.Snapshot) {
t.Helper()
serverURL, deployment, snapshot := mockTelemetryServer(t)
options := telemetry.Options{
Database: db,
Logger: testutil.Logger(t),
+21 -7
View File
@@ -918,6 +918,7 @@ func (api *API) putUserStatus(status database.UserStatus) func(rw http.ResponseW
func (api *API) notifyUserStatusChanged(ctx context.Context, actingUserName string, targetUser database.User, status database.UserStatus) error {
var labels map[string]string
var data map[string]any
var adminTemplateID, personalTemplateID uuid.UUID
switch status {
case database.UserStatusSuspended:
@@ -926,6 +927,9 @@ func (api *API) notifyUserStatusChanged(ctx context.Context, actingUserName stri
"suspended_account_user_name": targetUser.Name,
"initiator": actingUserName,
}
data = map[string]any{
"user": map[string]any{"id": targetUser.ID, "name": targetUser.Name, "email": targetUser.Email},
}
adminTemplateID = notifications.TemplateUserAccountSuspended
personalTemplateID = notifications.TemplateYourAccountSuspended
case database.UserStatusActive:
@@ -934,6 +938,9 @@ func (api *API) notifyUserStatusChanged(ctx context.Context, actingUserName stri
"activated_account_user_name": targetUser.Name,
"initiator": actingUserName,
}
data = map[string]any{
"user": map[string]any{"id": targetUser.ID, "name": targetUser.Name, "email": targetUser.Email},
}
adminTemplateID = notifications.TemplateUserAccountActivated
personalTemplateID = notifications.TemplateYourAccountActivated
default:
@@ -949,16 +956,16 @@ func (api *API) notifyUserStatusChanged(ctx context.Context, actingUserName stri
// Send notifications to user admins and affected user
for _, u := range userAdmins {
// nolint:gocritic // Need notifier actor to enqueue notifications
if _, err := api.NotificationsEnqueuer.Enqueue(dbauthz.AsNotifier(ctx), u.ID, adminTemplateID,
labels, "api-put-user-status",
if _, err := api.NotificationsEnqueuer.EnqueueWithData(dbauthz.AsNotifier(ctx), u.ID, adminTemplateID,
labels, data, "api-put-user-status",
targetUser.ID,
); err != nil {
api.Logger.Warn(ctx, "unable to notify about changed user's status", slog.F("affected_user", targetUser.Username), slog.Error(err))
}
}
// nolint:gocritic // Need notifier actor to enqueue notifications
if _, err := api.NotificationsEnqueuer.Enqueue(dbauthz.AsNotifier(ctx), targetUser.ID, personalTemplateID,
labels, "api-put-user-status",
if _, err := api.NotificationsEnqueuer.EnqueueWithData(dbauthz.AsNotifier(ctx), targetUser.ID, personalTemplateID,
labels, data, "api-put-user-status",
targetUser.ID,
); err != nil {
api.Logger.Warn(ctx, "unable to notify user about status change of their account", slog.F("affected_user", targetUser.Username), slog.Error(err))
@@ -1424,13 +1431,20 @@ func (api *API) CreateUser(ctx context.Context, store database.Store, req Create
}
for _, u := range userAdmins {
// nolint:gocritic // Need notifier actor to enqueue notifications
if _, err := api.NotificationsEnqueuer.Enqueue(dbauthz.AsNotifier(ctx), u.ID, notifications.TemplateUserAccountCreated,
if _, err := api.NotificationsEnqueuer.EnqueueWithData(
// nolint:gocritic // Need notifier actor to enqueue notifications
dbauthz.AsNotifier(ctx),
u.ID,
notifications.TemplateUserAccountCreated,
map[string]string{
"created_account_name": user.Username,
"created_account_user_name": user.Name,
"initiator": req.accountCreatorName,
}, "api-users-create",
},
map[string]any{
"user": map[string]any{"id": user.ID, "name": user.Name, "email": user.Email},
},
"api-users-create",
user.ID,
); err != nil {
api.Logger.Warn(ctx, "unable to notify about created user", slog.F("created_user", user.Username), slog.Error(err))
+20 -6
View File
@@ -392,12 +392,19 @@ func TestNotifyUserStatusChanged(t *testing.T) {
// Validate that each expected notification is present in notifyEnq.Sent()
for _, expected := range expectedNotifications {
found := false
for _, sent := range notifyEnq.Sent() {
for _, sent := range notifyEnq.Sent(notificationstest.WithTemplateID(expected.TemplateID)) {
if sent.TemplateID == expected.TemplateID &&
sent.UserID == expected.UserID &&
slices.Contains(sent.Targets, member.ID) &&
sent.Labels[label] == member.Username {
found = true
require.IsType(t, map[string]any{}, sent.Data["user"])
userData := sent.Data["user"].(map[string]any)
require.Equal(t, member.ID, userData["id"])
require.Equal(t, member.Name, userData["name"])
require.Equal(t, member.Email, userData["email"])
break
}
}
@@ -858,11 +865,18 @@ func TestNotifyCreatedUser(t *testing.T) {
require.NoError(t, err)
// then
require.Len(t, notifyEnq.Sent(), 1)
require.Equal(t, notifications.TemplateUserAccountCreated, notifyEnq.Sent()[0].TemplateID)
require.Equal(t, firstUser.UserID, notifyEnq.Sent()[0].UserID)
require.Contains(t, notifyEnq.Sent()[0].Targets, user.ID)
require.Equal(t, user.Username, notifyEnq.Sent()[0].Labels["created_account_name"])
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateUserAccountCreated))
require.Len(t, sent, 1)
require.Equal(t, notifications.TemplateUserAccountCreated, sent[0].TemplateID)
require.Equal(t, firstUser.UserID, sent[0].UserID)
require.Contains(t, sent[0].Targets, user.ID)
require.Equal(t, user.Username, sent[0].Labels["created_account_name"])
require.IsType(t, map[string]any{}, sent[0].Data["user"])
userData := sent[0].Data["user"].(map[string]any)
require.Equal(t, user.ID, userData["id"])
require.Equal(t, user.Name, userData["name"])
require.Equal(t, user.Email, userData["email"])
})
t.Run("UserAdminNotified", func(t *testing.T) {
+10
View File
@@ -31,6 +31,7 @@ import (
"github.com/coder/coder/v2/coderd/externalauth"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/coderd/jwtutils"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/rbac/policy"
@@ -462,6 +463,9 @@ func (api *API) workspaceAgentLogs(rw http.ResponseWriter, r *http.Request) {
t := time.NewTicker(recheckInterval)
defer t.Stop()
// Log the request immediately instead of after it completes.
loggermw.RequestLoggerFromContext(ctx).WriteLog(ctx, http.StatusAccepted)
go func() {
defer func() {
logger.Debug(ctx, "end log streaming loop")
@@ -742,6 +746,9 @@ func (api *API) derpMapUpdates(rw http.ResponseWriter, r *http.Request) {
encoder := wsjson.NewEncoder[*tailcfg.DERPMap](ws, websocket.MessageBinary)
defer encoder.Close(websocket.StatusGoingAway)
// Log the request immediately instead of after it completes.
loggermw.RequestLoggerFromContext(ctx).WriteLog(ctx, http.StatusAccepted)
go func(ctx context.Context) {
// TODO(mafredri): Is this too frequent? Use separate ping disconnect timeout?
t := time.NewTicker(api.AgentConnectionUpdateFrequency)
@@ -1105,6 +1112,9 @@ func (api *API) watchWorkspaceAgentMetadata(rw http.ResponseWriter, r *http.Requ
sendTicker := time.NewTicker(sendInterval)
defer sendTicker.Stop()
// Log the request immediately instead of after it completes.
loggermw.RequestLoggerFromContext(ctx).WriteLog(ctx, http.StatusAccepted)
// Send initial metadata.
sendMetadata()
+1 -1
View File
@@ -527,7 +527,7 @@ func (api *API) notifyWorkspaceUpdated(
"workspace": map[string]any{"id": workspace.ID, "name": workspace.Name},
"template": map[string]any{"id": template.ID, "name": template.Name},
"template_version": map[string]any{"id": version.ID, "name": version.Name},
"owner": map[string]any{"id": owner.ID, "name": owner.Name},
"owner": map[string]any{"id": owner.ID, "name": owner.Name, "email": owner.Email},
"parameters": buildParameters,
},
"api-workspaces-updated",
+7 -1
View File
@@ -648,7 +648,7 @@ func TestWorkspaceBuildWithUpdatedTemplateVersionSendsNotification(t *testing.T)
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true, NotificationsEnqueuer: notify})
first := coderdtest.CreateFirstUser(t, client)
templateAdminClient, templateAdmin := coderdtest.CreateAnotherUser(t, client, first.OrganizationID, rbac.RoleTemplateAdmin())
userClient, _ := coderdtest.CreateAnotherUser(t, client, first.OrganizationID)
userClient, user := coderdtest.CreateAnotherUser(t, client, first.OrganizationID)
// Create a template with an initial version
version := coderdtest.CreateTemplateVersion(t, templateAdminClient, first.OrganizationID, nil)
@@ -684,6 +684,12 @@ func TestWorkspaceBuildWithUpdatedTemplateVersionSendsNotification(t *testing.T)
require.Contains(t, sent[0].Targets, workspace.ID)
require.Contains(t, sent[0].Targets, workspace.OrganizationID)
require.Contains(t, sent[0].Targets, workspace.OwnerID)
owner, ok := sent[0].Data["owner"].(map[string]any)
require.True(t, ok, "notification data should have owner")
require.Equal(t, user.ID, owner["id"])
require.Equal(t, user.Name, owner["name"])
require.Equal(t, user.Email, owner["email"])
})
}
+1 -1
View File
@@ -809,7 +809,7 @@ func (api *API) notifyWorkspaceCreated(
"workspace": map[string]any{"id": workspace.ID, "name": workspace.Name},
"template": map[string]any{"id": template.ID, "name": template.Name},
"template_version": map[string]any{"id": version.ID, "name": version.Name},
"owner": map[string]any{"id": owner.ID, "name": owner.Name},
"owner": map[string]any{"id": owner.ID, "name": owner.Name, "email": owner.Email},
"parameters": buildParameters,
},
"api-workspaces-create",
+6
View File
@@ -639,6 +639,12 @@ func TestPostWorkspacesByOrganization(t *testing.T) {
require.Contains(t, sent[0].Targets, workspace.ID)
require.Contains(t, sent[0].Targets, workspace.OrganizationID)
require.Contains(t, sent[0].Targets, workspace.OwnerID)
owner, ok := sent[0].Data["owner"].(map[string]any)
require.True(t, ok, "notification data should have owner")
require.Equal(t, memberUser.ID, owner["id"])
require.Equal(t, memberUser.Name, owner["name"])
require.Equal(t, memberUser.Email, owner["email"])
})
t.Run("CreateWithAuditLogs", func(t *testing.T) {
+124
View File
@@ -12,6 +12,13 @@ import (
"golang.org/x/xerrors"
)
type IDPSyncMapping[ResourceIdType uuid.UUID | string] struct {
// The IdP claim the user has
Given string
// The ID of the Coder resource the user should be added to
Gets ResourceIdType
}
type GroupSyncSettings struct {
// Field is the name of the claim field that specifies what groups a user
// should be in. If empty, no groups will be synced.
@@ -61,6 +68,46 @@ func (c *Client) PatchGroupIDPSyncSettings(ctx context.Context, orgID string, re
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
type PatchGroupIDPSyncConfigRequest struct {
Field string `json:"field"`
RegexFilter *regexp.Regexp `json:"regex_filter"`
AutoCreateMissing bool `json:"auto_create_missing_groups"`
}
func (c *Client) PatchGroupIDPSyncConfig(ctx context.Context, orgID string, req PatchGroupIDPSyncConfigRequest) (GroupSyncSettings, error) {
res, err := c.Request(ctx, http.MethodPatch, fmt.Sprintf("/api/v2/organizations/%s/settings/idpsync/groups/config", orgID), req)
if err != nil {
return GroupSyncSettings{}, xerrors.Errorf("make request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return GroupSyncSettings{}, ReadBodyAsError(res)
}
var resp GroupSyncSettings
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
// If the same mapping is present in both Add and Remove, Remove will take presidence.
type PatchGroupIDPSyncMappingRequest struct {
Add []IDPSyncMapping[uuid.UUID]
Remove []IDPSyncMapping[uuid.UUID]
}
func (c *Client) PatchGroupIDPSyncMapping(ctx context.Context, orgID string, req PatchGroupIDPSyncMappingRequest) (GroupSyncSettings, error) {
res, err := c.Request(ctx, http.MethodPatch, fmt.Sprintf("/api/v2/organizations/%s/settings/idpsync/groups/mapping", orgID), req)
if err != nil {
return GroupSyncSettings{}, xerrors.Errorf("make request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return GroupSyncSettings{}, ReadBodyAsError(res)
}
var resp GroupSyncSettings
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
type RoleSyncSettings struct {
// Field is the name of the claim field that specifies what organization roles
// a user should be given. If empty, no roles will be synced.
@@ -97,6 +144,44 @@ func (c *Client) PatchRoleIDPSyncSettings(ctx context.Context, orgID string, req
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
type PatchRoleIDPSyncConfigRequest struct {
Field string `json:"field"`
}
func (c *Client) PatchRoleIDPSyncConfig(ctx context.Context, orgID string, req PatchRoleIDPSyncConfigRequest) (RoleSyncSettings, error) {
res, err := c.Request(ctx, http.MethodPatch, fmt.Sprintf("/api/v2/organizations/%s/settings/idpsync/roles/config", orgID), req)
if err != nil {
return RoleSyncSettings{}, xerrors.Errorf("make request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return RoleSyncSettings{}, ReadBodyAsError(res)
}
var resp RoleSyncSettings
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
// If the same mapping is present in both Add and Remove, Remove will take presidence.
type PatchRoleIDPSyncMappingRequest struct {
Add []IDPSyncMapping[string]
Remove []IDPSyncMapping[string]
}
func (c *Client) PatchRoleIDPSyncMapping(ctx context.Context, orgID string, req PatchRoleIDPSyncMappingRequest) (RoleSyncSettings, error) {
res, err := c.Request(ctx, http.MethodPatch, fmt.Sprintf("/api/v2/organizations/%s/settings/idpsync/roles/mapping", orgID), req)
if err != nil {
return RoleSyncSettings{}, xerrors.Errorf("make request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return RoleSyncSettings{}, ReadBodyAsError(res)
}
var resp RoleSyncSettings
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
type OrganizationSyncSettings struct {
// Field selects the claim field to be used as the created user's
// organizations. If the field is the empty string, then no organization
@@ -137,6 +222,45 @@ func (c *Client) PatchOrganizationIDPSyncSettings(ctx context.Context, req Organ
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
type PatchOrganizationIDPSyncConfigRequest struct {
Field string `json:"field"`
AssignDefault bool `json:"assign_default"`
}
func (c *Client) PatchOrganizationIDPSyncConfig(ctx context.Context, req PatchOrganizationIDPSyncConfigRequest) (OrganizationSyncSettings, error) {
res, err := c.Request(ctx, http.MethodPatch, "/api/v2/settings/idpsync/organization/config", req)
if err != nil {
return OrganizationSyncSettings{}, xerrors.Errorf("make request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return OrganizationSyncSettings{}, ReadBodyAsError(res)
}
var resp OrganizationSyncSettings
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
// If the same mapping is present in both Add and Remove, Remove will take presidence.
type PatchOrganizationIDPSyncMappingRequest struct {
Add []IDPSyncMapping[uuid.UUID]
Remove []IDPSyncMapping[uuid.UUID]
}
func (c *Client) PatchOrganizationIDPSyncMapping(ctx context.Context, req PatchOrganizationIDPSyncMappingRequest) (OrganizationSyncSettings, error) {
res, err := c.Request(ctx, http.MethodPatch, "/api/v2/settings/idpsync/organization/mapping", req)
if err != nil {
return OrganizationSyncSettings{}, xerrors.Errorf("make request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return OrganizationSyncSettings{}, ReadBodyAsError(res)
}
var resp OrganizationSyncSettings
return resp, json.NewDecoder(res.Body).Decode(&resp)
}
func (c *Client) GetAvailableIDPSyncFields(ctx context.Context) ([]string, error) {
res, err := c.Request(ctx, http.MethodGet, "/api/v2/settings/idpsync/available-fields", nil)
if err != nil {
+193 -228
View File
@@ -1,13 +1,39 @@
<!-- markdownlint-disable MD024 -->
# IDP Sync
# IdP Sync
<blockquote class="info">
IDP sync is an Enterprise and Premium feature.
IdP sync is an Enterprise and Premium feature.
[Learn more](https://coder.com/pricing#compare-plans).
</blockquote>
IdP (Identity provider) sync allows you to use OpenID Connect (OIDC) to
synchronize Coder groups, roles, and organizations based on claims from your IdP.
## Prerequisites
### Confirm that OIDC provider sends claims
To confirm that your OIDC provider is sending claims, log in with OIDC and visit
the following URL with an `Owner` account:
```text
https://[coder.example.com]/api/v2/debug/[your-username]/debug-link
```
You should see a field in either `id_token_claims`, `user_info_claims` or
both followed by a list of the user's OIDC groups in the response.
This is the [claim](https://openid.net/specs/openid-connect-core-1_0.html#Claims)
sent by the OIDC provider.
Depending on the OIDC provider, this claim might be called something else.
Common names include `groups`, `memberOf`, and `roles`.
See the [troubleshooting section](#troubleshooting-grouproleorganization-sync)
for help troubleshooting common issues.
## Group Sync
If your OpenID Connect provider supports group claims, you can configure Coder
@@ -21,115 +47,36 @@ If group sync is enabled, the user's groups will be controlled by the OIDC
provider. This means manual group additions/removals will be overwritten on the
next user login.
There are two ways you can configure group sync:
For deployments with multiple [organizations](./organizations.md), configure
group sync for each organization.
<div class="tabs">
## Server Flags
### Dashboard
1. Confirm that your OIDC provider is sending claims.
Log in with OIDC and visit the following URL with an `Owner` account:
1. Fetch the corresponding group IDs using the following endpoint:
```text
https://[coder.example.com]/api/v2/debug/[your-username]/debug-link
https://[coder.example.com]/api/v2/groups
```
You should see a field in either `id_token_claims`, `user_info_claims` or
both followed by a list of the user's OIDC groups in the response. This is
the [claim](https://openid.net/specs/openid-connect-core-1_0.html#Claims)
sent by the OIDC provider.
1. As an Owner or Organization Admin, go to **Admin settings**, select
**Organizations**, then **IdP Sync**:
See [Troubleshooting](#troubleshooting-grouproleorganization-sync) to debug
this.
![IdP Sync - Group sync settings](../../images/admin/users/organizations/group-sync-empty.png)
Depending on the OIDC provider, this claim may be called something else.
Common names include `groups`, `memberOf`, and `roles`.
1. Enter the **Group sync field** and an optional **Regex filter**, then select
**Save**.
1. Configure the Coder server to read groups from the claim name with the
[OIDC group field](../../reference/cli/server.md#--oidc-group-field) server
flag:
1. Select **Auto create missing groups** to automatically create groups
returned by the OIDC provider if they do not exist in Coder.
- Environment variable:
1. Enter the **IdP group name** and **Coder group**, then **Add IdP group**.
```sh
CODER_OIDC_GROUP_FIELD=groups
```
- As a flag:
```sh
--oidc-group-field groups
```
On login, users will automatically be assigned to groups that have matching
names in Coder and removed from groups that the user no longer belongs to.
For cases when an OIDC provider only returns group IDs or you want to have
different group names in Coder than in your OIDC provider, you can configure
mapping between the two with the
[OIDC group mapping](../../reference/cli/server.md#--oidc-group-mapping) server
flag:
- Environment variable:
```sh
CODER_OIDC_GROUP_MAPPING='{"myOIDCGroupID": "myCoderGroupName"}'
```
- As a flag:
```sh
--oidc-group-mapping '{"myOIDCGroupID": "myCoderGroupName"}'
```
Below is an example mapping in the Coder Helm chart:
```yaml
coder:
env:
- name: CODER_OIDC_GROUP_MAPPING
value: >
{"myOIDCGroupID": "myCoderGroupName"}
```
From the example above, users that belong to the `myOIDCGroupID` group in your
OIDC provider will be added to the `myCoderGroupName` group in Coder.
## Runtime (Organizations)
<blockquote class="admonition note">
You must have a Premium license with Organizations enabled to use this.
[Contact your account team](https://coder.com/contact) for more details.
</blockquote>
For deployments with multiple [organizations](./organizations.md), you must
configure group sync at the organization level. In future Coder versions, you
will be able to configure this in the UI. For now, you must use CLI commands.
### CLI
1. Confirm you have the [Coder CLI](../../install/index.md) installed and are
logged in with a user who is an Owner or Organization Admin role.
1. Confirm that your OIDC provider is sending a groups claim.
Log in with OIDC and visit the following URL:
```text
https://[coder.example.com]/api/v2/debug/[your-username]/debug-link
```
You should see a field in either `id_token_claims`, `user_info_claims` or
both followed by a list of the user's OIDC groups in the response. This is
the [claim](https://openid.net/specs/openid-connect-core-1_0.html#Claims)
sent by the OIDC provider.
See [Troubleshooting](#troubleshooting-grouproleorganization-sync) to debug
this.
Depending on the OIDC provider, this claim may be called something else.
Common names include `groups`, `memberOf`, and `roles`.
logged in with a user who is an Owner or has an Organization Admin role.
1. To fetch the current group sync settings for an organization, run the
following:
@@ -165,7 +112,7 @@ Below is an example that uses the `groups` claim and maps all groups prefixed by
<blockquote class="admonition note">
You much specify Coder group IDs instead of group names. The fastest way to find
You must specify Coder group IDs instead of group names. The fastest way to find
the ID for a corresponding group is by visiting
`https://coder.example.com/api/v2/groups`.
@@ -200,7 +147,67 @@ coder organizations settings set group-sync \
Visit the Coder UI to confirm these changes:
![IDP Sync](../../images/admin/users/organizations/group-sync.png)
![IdP Sync](../../images/admin/users/organizations/group-sync.png)
### Server Flags
<blockquote class="admonition note">
Use server flags only with Coder deployments with a single organization.
You can use the dashboard to configure group sync instead.
</blockquote>
1. Configure the Coder server to read groups from the claim name with the
[OIDC group field](../../reference/cli/server.md#--oidc-group-field) server
flag:
- Environment variable:
```sh
CODER_OIDC_GROUP_FIELD=groups
```
- As a flag:
```sh
--oidc-group-field groups
```
1. On login, users will automatically be assigned to groups that have matching
names in Coder and removed from groups that the user no longer belongs to.
1. For cases when an OIDC provider only returns group IDs or you want to have
different group names in Coder than in your OIDC provider, you can configure
mapping between the two with the
[OIDC group mapping](../../reference/cli/server.md#--oidc-group-mapping) server
flag:
- Environment variable:
```sh
CODER_OIDC_GROUP_MAPPING='{"myOIDCGroupID": "myCoderGroupName"}'
```
- As a flag:
```sh
--oidc-group-mapping '{"myOIDCGroupID": "myCoderGroupName"}'
```
Below is an example mapping in the Coder Helm chart:
```yaml
coder:
env:
- name: CODER_OIDC_GROUP_MAPPING
value: >
{"myOIDCGroupID": "myCoderGroupName"}
```
From this example, users that belong to the `myOIDCGroupID` group in your
OIDC provider will be added to the `myCoderGroupName` group in Coder.
</div>
@@ -214,88 +221,36 @@ Users who are not in a matching group will see the following error:
## Role Sync
<blockquote class="info">
Role sync is an Enterprise and Premium feature.
[Learn more](https://coder.com/pricing#compare-plans).
</blockquote>
If your OpenID Connect provider supports roles claims, you can configure Coder
to synchronize roles in your auth provider to roles within Coder.
There are 2 ways to do role sync. Server Flags assign site wide roles, and
runtime org role sync assigns organization roles
<blockquote class="admonition note">
You must have a Premium license with Organizations enabled to use this.
[Contact your account team](https://coder.com/contact) for more details.
</blockquote>
For deployments with multiple [organizations](./organizations.md), configure
role sync at the organization level.
<div class="tabs">
## Server Flags
### Dashboard
1. Confirm that your OIDC provider is sending a roles claim by logging in with
OIDC and visiting the following URL with an `Owner` account:
1. As an Owner or Organization Admin, go to **Admin settings**, select
**Organizations**, then **IdP Sync**.
```text
https://[coder.example.com]/api/v2/debug/[your-username]/debug-link
```
1. Select the **Role sync settings** tab:
You should see a field in either `id_token_claims`, `user_info_claims` or
both followed by a list of the user's OIDC roles in the response. This is the
[claim](https://openid.net/specs/openid-connect-core-1_0.html#Claims) sent by
the OIDC provider.
![IdP Sync - Role sync settings](../../images/admin/users/organizations/role-sync-empty.png)
See [Troubleshooting](#troubleshooting-grouproleorganization-sync) to debug
this.
1. Enter the **Role sync field**, then select **Save**.
Depending on the OIDC provider, this claim may be called something else.
1. Enter the **IdP role name** and **Coder role**, then **Add IdP role**.
1. Configure the Coder server to read groups from the claim name with the
[OIDC role field](../../reference/cli/server.md#--oidc-user-role-field)
server flag:
To add a new custom role, select **Roles** from the sidebar, then
**Create custom role**.
1. Set the following in your Coder server [configuration](../setup/index.md).
Visit the [groups and roles documentation](./groups-roles.md) for more information.
```env
# Depending on your identity provider configuration, you may need to explicitly request a "roles" scope
CODER_OIDC_SCOPES=openid,profile,email,roles
### CLI
# The following fields are required for role sync:
CODER_OIDC_USER_ROLE_FIELD=roles
CODER_OIDC_USER_ROLE_MAPPING='{"TemplateAuthor":["template-admin","user-admin"]}'
```
One role from your identity provider can be mapped to many roles in Coder. The
example above maps to two roles in Coder.
## Runtime (Organizations)
For deployments with multiple [organizations](./organizations.md), you can
configure role sync at the organization level. In future Coder versions, you
will be able to configure this in the UI. For now, you must use CLI commands.
1. Confirm that your OIDC provider is sending a roles claim.
Log in with OIDC and visit the following URL with an `Owner` account:
```text
https://[coder.example.com]/api/v2/debug/[your-username]/debug-link
```
You should see a field in either `id_token_claims`, `user_info_claims` or
both followed by a list of the user's OIDC roles in the response. This is the
[claim](https://openid.net/specs/openid-connect-core-1_0.html#Claims) sent by
the OIDC provider.
See [Troubleshooting](#troubleshooting-grouproleorganization-sync) to debug
this.
Depending on the OIDC provider, this claim may be called something else.
1. Confirm you have the [Coder CLI](../../install/index.md) installed and are
logged in with a user who is an Owner or has an Organization Admin role.
1. To fetch the current group sync settings for an organization, run the
following:
@@ -316,7 +271,7 @@ will be able to configure this in the UI. For now, you must use CLI commands.
```
Below is an example that uses the `roles` claim and maps `coder-admins` from the
IDP as an `Organization Admin` and also maps to a custom `provisioner-admin`
IdP as an `Organization Admin` and also maps to a custom `provisioner-admin`
role:
```json
@@ -332,7 +287,7 @@ role:
<blockquote class="admonition note">
Be sure to use the `name` field for each role, not the display name. Use
`coder organization roles show --org=<your-org>` to see roles for your
`coder organization roles show --org=<your-org>` to see roles for your
organization.
</blockquote>
@@ -347,19 +302,40 @@ coder organizations settings set role-sync \
Visit the Coder UI to confirm these changes:
![IDP Sync](../../images/admin/users/organizations/role-sync.png)
![IdP Sync](../../images/admin/users/organizations/role-sync.png)
### Server Flags
<blockquote class="admonition note">
Use server flags only with Coder deployments with a single organization.
You can use the dashboard to configure role sync instead.
</blockquote>
1. Configure the Coder server to read groups from the claim name with the
[OIDC role field](../../reference/cli/server.md#--oidc-user-role-field)
server flag:
1. Set the following in your Coder server [configuration](../setup/index.md).
```env
# Depending on your identity provider configuration, you may need to explicitly request a "roles" scope
CODER_OIDC_SCOPES=openid,profile,email,roles
# The following fields are required for role sync:
CODER_OIDC_USER_ROLE_FIELD=roles
CODER_OIDC_USER_ROLE_MAPPING='{"TemplateAuthor":["template-admin","user-admin"]}'
```
One role from your identity provider can be mapped to many roles in Coder. The
example above maps to two roles in Coder.
</div>
## Organization Sync
<blockquote class="info">
Organization sync is an Enterprise and Premium feature.
[Learn more](https://coder.com/pricing#compare-plans).
</blockquote>
If your OpenID Connect provider supports groups/role claims, you can configure
Coder to synchronize claims in your auth provider to organizations within Coder.
@@ -370,28 +346,11 @@ Organization sync works across all organizations. On user login, the sync will
add and remove the user from organizations based on their IdP claims. After the
sync, the user's state should match that of the IdP.
You can initiate an organization sync through the CLI or through the Coder
dashboard:
You can initiate an organization sync through the Coder dashboard or CLI:
<div class="tabs">
## Dashboard
1. Confirm that your OIDC provider is sending claims. Log in with OIDC and visit
the following URL with an `Owner` account:
```text
https://[coder.example.com]/api/v2/debug/[your-username]/debug-link
```
You should see a field in either `id_token_claims`, `user_info_claims` or
both followed by a list of the user's OIDC groups in the response. This is
the [claim](https://openid.net/specs/openid-connect-core-1_0.html#Claims)
sent by the OIDC provider. See
[Troubleshooting](#troubleshooting-grouproleorganization-sync) to debug this.
Depending on the OIDC provider, this claim may be called something else.
Common names include `groups`, `memberOf`, and `roles`.
### Dashboard
1. Fetch the corresponding organization IDs using the following endpoint:
@@ -400,7 +359,7 @@ dashboard:
```
1. As a Coder organization user admin or site-wide user admin, go to
**Settings** > **IdP organization sync**.
**Admin settings** > **Deployment** and select **IdP organization sync**.
1. In the **Organization sync field** text box, enter the organization claim,
then select **Save**.
@@ -415,7 +374,7 @@ dashboard:
![IdP organization sync](../../images/admin/users/organizations/idp-org-sync.png)
## CLI
### CLI
Use the Coder CLI to show and adjust the settings.
@@ -467,11 +426,11 @@ settings, a user's memberships will update when they log out and log back in.
## Troubleshooting group/role/organization sync
Some common issues when enabling group/role sync.
Some common issues when enabling group, role, or organization sync.
### General guidelines
If you are running into issues with group/role sync:
If you are running into issues with a sync:
1. View your Coder server logs and enable
[verbose mode](../../reference/cli/index.md#-v---verbose).
@@ -487,7 +446,7 @@ If you are running into issues with group/role sync:
1. Attempt to log in, preferably with a user who has the `Owner` role.
The logs for a successful group sync look like this (human-readable):
The logs for a successful sync look like this (human-readable):
```sh
[debu] coderd.userauth: got oidc claims request_id=49e86507-6842-4b0b-94d4-f245e62e49f3 source=id_token claim_fields="[aio aud email exp groups iat idp iss name nbf oid preferred_username rh sub tid uti ver]" blank=[]
@@ -552,7 +511,7 @@ The application '<oidc_application>' asked for scope 'groups' that doesn't exist
This can happen because the identity provider has a different name for the
scope. For example, Azure AD uses `GroupMember.Read.All` instead of `groups`.
You can find the correct scope name in the IDP's documentation. Some IDP's allow
You can find the correct scope name in the IdP's documentation. Some IdPs allow
configuring the name of this scope.
The solution is to update the value of `CODER_OIDC_SCOPES` to the correct value
@@ -562,15 +521,15 @@ for the identity provider.
Steps to troubleshoot.
1. Ensure the user is a part of a group in the IDP. If the user has 0 groups, no
1. Ensure the user is a part of a group in the IdP. If the user has 0 groups, no
`groups` claim will be sent.
2. Check if another claim appears to be the correct claim with a different name.
A common name is `memberOf` instead of `groups`. If this is present, update
`CODER_OIDC_GROUP_FIELD=memberOf`.
3. Make sure the number of groups being sent is under the limit of the IDP. Some
IDPs will return an error, while others will just omit the `groups` claim. A
3. Make sure the number of groups being sent is under the limit of the IdP. Some
IdPs will return an error, while others will just omit the `groups` claim. A
common solution is to create a filter on the identity provider that returns
less than the limit for your IDP.
less than the limit for your IdP.
- [Azure AD limit is 200, and omits groups if exceeded.](https://learn.microsoft.com/en-us/azure/active-directory/hybrid/connect/how-to-connect-fed-group-claims#options-for-applications-to-consume-group-information)
- [Okta limit is 100, and returns an error if exceeded.](https://developer.okta.com/docs/reference/api/oidc/#scope-dependent-claims-not-always-returned)
@@ -582,32 +541,37 @@ Below are some details specific to individual OIDC providers.
> **Note:** Tested on ADFS 4.0, Windows Server 2019
1. In your Federation Server, create a new application group for Coder. Follow
the steps as described
[here.](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/development/msal/adfs-msal-web-app-web-api#app-registration-in-ad-fs)
1. In your Federation Server, create a new application group for Coder.
Follow the steps as described in the [Windows Server documentation]
(https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/development/msal/adfs-msal-web-app-web-api#app-registration-in-ad-fs).
- **Server Application**: Note the Client ID.
- **Configure Application Credentials**: Note the Client Secret.
- **Configure Web API**: Set the Client ID as the relying party identifier.
- **Application Permissions**: Allow access to the claims `openid`, `email`,
`profile`, and `allatclaims`.
1. Visit your ADFS server's `/.well-known/openid-configuration` URL and note the
value for `issuer`.
> **Note:** This is usually of the form
> `https://adfs.corp/adfs/.well-known/openid-configuration`
This will look something like
`https://adfs.corp/adfs/.well-known/openid-configuration`.
1. In Coder's configuration file (or Helm values as appropriate), set the
following environment variables or their corresponding CLI arguments:
- `CODER_OIDC_ISSUER_URL`: the `issuer` value from the previous step.
- `CODER_OIDC_CLIENT_ID`: the Client ID from step 1.
- `CODER_OIDC_CLIENT_SECRET`: the Client Secret from step 1.
- `CODER_OIDC_ISSUER_URL`: `issuer` value from the previous step.
- `CODER_OIDC_CLIENT_ID`: Client ID from step 1.
- `CODER_OIDC_CLIENT_SECRET`: Client Secret from step 1.
- `CODER_OIDC_AUTH_URL_PARAMS`: set to
```console
```json
{"resource":"$CLIENT_ID"}
```
where `$CLIENT_ID` is the Client ID from step 1
([see here](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/overview/ad-fs-openid-connect-oauth-flows-scenarios#:~:text=scope%E2%80%AFopenid.-,resource,-optional)).
Where `$CLIENT_ID` is the Client ID from step 1.
Consult the Microsoft [AD FS OpenID Connect/OAuth flows and Application Scenarios documentation](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/overview/ad-fs-openid-connect-oauth-flows-scenarios#:~:text=scope%E2%80%AFopenid.-,resource,-optional) for more information.
This is required for the upstream OIDC provider to return the requested
claims.
@@ -615,34 +579,35 @@ Below are some details specific to individual OIDC providers.
1. Configure
[Issuance Transform Rules](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-rule-to-send-ldap-attributes-as-claims)
on your federation server to send the following claims:
on your Federation Server to send the following claims:
- `preferred_username`: You can use e.g. "Display Name" as required.
- `email`: You can use e.g. the LDAP attribute "E-Mail-Addresses" as
required.
- `email_verified`: Create a custom claim rule:
```console
```json
=> issue(Type = "email_verified", Value = "true")
```
- (Optional) If using Group Sync, send the required groups in the configured
groups claim field. See [here](https://stackoverflow.com/a/55570286) for an
example.
groups claim field.
Use [this answer from Stack Overflow](https://stackoverflow.com/a/55570286) for an example.
### Keycloak
The access_type parameter has two possible values: "online" and "offline." By
default, the value is set to "offline". This means that when a user
authenticates using OIDC, the application requests offline access to the user's
resources, including the ability to refresh access tokens without requiring the
user to reauthenticate.
The `access_type` parameter has two possible values: `online` and `offline`.
By default, the value is set to `offline`.
To enable the `offline_access` scope, which allows for the refresh token
This means that when a user authenticates using OIDC, the application requests
offline access to the user's resources, including the ability to refresh access
tokens without requiring the user to reauthenticate.
To enable the `offline_access` scope which allows for the refresh token
functionality, you need to add it to the list of requested scopes during the
authentication flow. Including the `offline_access` scope in the requested
scopes ensures that the user is granted the necessary permissions to obtain
refresh tokens.
authentication flow.
Including the `offline_access` scope in the requested scopes ensures that the
user is granted the necessary permissions to obtain refresh tokens.
By combining the `{"access_type":"offline"}` parameter in the OIDC Auth URL with
the `offline_access` scope, you can achieve the desired behavior of obtaining
Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 262 KiB

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 237 KiB

After

Width:  |  Height:  |  Size: 97 KiB

+1 -1
View File
@@ -54,7 +54,7 @@ RUN mkdir -p /opt/terraform
# The below step is optional if you wish to keep the existing version.
# See https://github.com/coder/coder/blob/main/provisioner/terraform/install.go#L23-L24
# for supported Terraform versions.
ARG TERRAFORM_VERSION=1.9.8
ARG TERRAFORM_VERSION=1.10.5
RUN apk update && \
apk del terraform && \
curl -LOs https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip \
+1 -1
View File
@@ -284,7 +284,7 @@
"state": ["enterprise", "premium"]
},
{
"title": "IDP Sync",
"title": "IdP Sync",
"path": "./admin/users/idp-sync.md",
"state": ["enterprise", "premium"]
},
+378
View File
@@ -1953,6 +1953,141 @@ curl -X PATCH http://coder-server:8080/api/v2/organizations/{organization}/setti
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Update group IdP Sync config
### Code samples
```shell
# Example request using curl
curl -X PATCH http://coder-server:8080/api/v2/organizations/{organization}/settings/idpsync/groups/config \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
`PATCH /organizations/{organization}/settings/idpsync/groups/config`
> Body parameter
```json
{
"auto_create_missing_groups": true,
"field": "string",
"regex_filter": {}
}
```
### Parameters
| Name | In | Type | Required | Description |
|----------------|------|----------------------------------------------------------------------------------------------|----------|-------------------------|
| `organization` | path | string(uuid) | true | Organization ID or name |
| `body` | body | [codersdk.PatchGroupIDPSyncConfigRequest](schemas.md#codersdkpatchgroupidpsyncconfigrequest) | true | New config values |
### Example responses
> 200 Response
```json
{
"auto_create_missing_groups": true,
"field": "string",
"legacy_group_name_mapping": {
"property1": "string",
"property2": "string"
},
"mapping": {
"property1": [
"string"
],
"property2": [
"string"
]
},
"regex_filter": {}
}
```
### Responses
| Status | Meaning | Description | Schema |
|--------|---------------------------------------------------------|-------------|--------------------------------------------------------------------|
| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.GroupSyncSettings](schemas.md#codersdkgroupsyncsettings) |
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Update group IdP Sync mapping
### Code samples
```shell
# Example request using curl
curl -X PATCH http://coder-server:8080/api/v2/organizations/{organization}/settings/idpsync/groups/mapping \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
`PATCH /organizations/{organization}/settings/idpsync/groups/mapping`
> Body parameter
```json
{
"add": [
{
"gets": "string",
"given": "string"
}
],
"remove": [
{
"gets": "string",
"given": "string"
}
]
}
```
### Parameters
| Name | In | Type | Required | Description |
|----------------|------|------------------------------------------------------------------------------------------------|----------|-----------------------------------------------|
| `organization` | path | string(uuid) | true | Organization ID or name |
| `body` | body | [codersdk.PatchGroupIDPSyncMappingRequest](schemas.md#codersdkpatchgroupidpsyncmappingrequest) | true | Description of the mappings to add and remove |
### Example responses
> 200 Response
```json
{
"auto_create_missing_groups": true,
"field": "string",
"legacy_group_name_mapping": {
"property1": "string",
"property2": "string"
},
"mapping": {
"property1": [
"string"
],
"property2": [
"string"
]
},
"regex_filter": {}
}
```
### Responses
| Status | Meaning | Description | Schema |
|--------|---------------------------------------------------------|-------------|--------------------------------------------------------------------|
| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.GroupSyncSettings](schemas.md#codersdkgroupsyncsettings) |
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Get role IdP Sync settings by organization
### Code samples
@@ -2061,6 +2196,127 @@ curl -X PATCH http://coder-server:8080/api/v2/organizations/{organization}/setti
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Update role IdP Sync config
### Code samples
```shell
# Example request using curl
curl -X PATCH http://coder-server:8080/api/v2/organizations/{organization}/settings/idpsync/roles/config \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
`PATCH /organizations/{organization}/settings/idpsync/roles/config`
> Body parameter
```json
{
"field": "string"
}
```
### Parameters
| Name | In | Type | Required | Description |
|----------------|------|--------------------------------------------------------------------------------------------|----------|-------------------------|
| `organization` | path | string(uuid) | true | Organization ID or name |
| `body` | body | [codersdk.PatchRoleIDPSyncConfigRequest](schemas.md#codersdkpatchroleidpsyncconfigrequest) | true | New config values |
### Example responses
> 200 Response
```json
{
"field": "string",
"mapping": {
"property1": [
"string"
],
"property2": [
"string"
]
}
}
```
### Responses
| Status | Meaning | Description | Schema |
|--------|---------------------------------------------------------|-------------|------------------------------------------------------------------|
| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.RoleSyncSettings](schemas.md#codersdkrolesyncsettings) |
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Update role IdP Sync mapping
### Code samples
```shell
# Example request using curl
curl -X PATCH http://coder-server:8080/api/v2/organizations/{organization}/settings/idpsync/roles/mapping \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
`PATCH /organizations/{organization}/settings/idpsync/roles/mapping`
> Body parameter
```json
{
"add": [
{
"gets": "string",
"given": "string"
}
],
"remove": [
{
"gets": "string",
"given": "string"
}
]
}
```
### Parameters
| Name | In | Type | Required | Description |
|----------------|------|----------------------------------------------------------------------------------------------|----------|-----------------------------------------------|
| `organization` | path | string(uuid) | true | Organization ID or name |
| `body` | body | [codersdk.PatchRoleIDPSyncMappingRequest](schemas.md#codersdkpatchroleidpsyncmappingrequest) | true | Description of the mappings to add and remove |
### Example responses
> 200 Response
```json
{
"field": "string",
"mapping": {
"property1": [
"string"
],
"property2": [
"string"
]
}
}
```
### Responses
| Status | Meaning | Description | Schema |
|--------|---------------------------------------------------------|-------------|------------------------------------------------------------------|
| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.RoleSyncSettings](schemas.md#codersdkrolesyncsettings) |
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Fetch provisioner key details
### Code samples
@@ -2677,6 +2933,128 @@ curl -X PATCH http://coder-server:8080/api/v2/settings/idpsync/organization \
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Update organization IdP Sync config
### Code samples
```shell
# Example request using curl
curl -X PATCH http://coder-server:8080/api/v2/settings/idpsync/organization/config \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
`PATCH /settings/idpsync/organization/config`
> Body parameter
```json
{
"assign_default": true,
"field": "string"
}
```
### Parameters
| Name | In | Type | Required | Description |
|--------|------|------------------------------------------------------------------------------------------------------------|----------|-------------------|
| `body` | body | [codersdk.PatchOrganizationIDPSyncConfigRequest](schemas.md#codersdkpatchorganizationidpsyncconfigrequest) | true | New config values |
### Example responses
> 200 Response
```json
{
"field": "string",
"mapping": {
"property1": [
"string"
],
"property2": [
"string"
]
},
"organization_assign_default": true
}
```
### Responses
| Status | Meaning | Description | Schema |
|--------|---------------------------------------------------------|-------------|----------------------------------------------------------------------------------|
| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.OrganizationSyncSettings](schemas.md#codersdkorganizationsyncsettings) |
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Update organization IdP Sync mapping
### Code samples
```shell
# Example request using curl
curl -X PATCH http://coder-server:8080/api/v2/settings/idpsync/organization/mapping \
-H 'Content-Type: application/json' \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
`PATCH /settings/idpsync/organization/mapping`
> Body parameter
```json
{
"add": [
{
"gets": "string",
"given": "string"
}
],
"remove": [
{
"gets": "string",
"given": "string"
}
]
}
```
### Parameters
| Name | In | Type | Required | Description |
|--------|------|--------------------------------------------------------------------------------------------------------------|----------|-----------------------------------------------|
| `body` | body | [codersdk.PatchOrganizationIDPSyncMappingRequest](schemas.md#codersdkpatchorganizationidpsyncmappingrequest) | true | Description of the mappings to add and remove |
### Example responses
> 200 Response
```json
{
"field": "string",
"mapping": {
"property1": [
"string"
],
"property2": [
"string"
]
},
"organization_assign_default": true
}
```
### Responses
| Status | Meaning | Description | Schema |
|--------|---------------------------------------------------------|-------------|----------------------------------------------------------------------------------|
| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.OrganizationSyncSettings](schemas.md#codersdkorganizationsyncsettings) |
To perform this operation, you must be authenticated. [Learn more](authentication.md).
## Get template ACLs
### Code samples
+138
View File
@@ -4152,6 +4152,54 @@ Git clone makes use of this by parsing the URL from: 'Username for "https://gith
| » `[any property]` | array of string | false | | |
| `organization_assign_default` | boolean | false | | Organization assign default will ensure the default org is always included for every user, regardless of their claims. This preserves legacy behavior. |
## codersdk.PatchGroupIDPSyncConfigRequest
```json
{
"auto_create_missing_groups": true,
"field": "string",
"regex_filter": {}
}
```
### Properties
| Name | Type | Required | Restrictions | Description |
|------------------------------|--------------------------------|----------|--------------|-------------|
| `auto_create_missing_groups` | boolean | false | | |
| `field` | string | false | | |
| `regex_filter` | [regexp.Regexp](#regexpregexp) | false | | |
## codersdk.PatchGroupIDPSyncMappingRequest
```json
{
"add": [
{
"gets": "string",
"given": "string"
}
],
"remove": [
{
"gets": "string",
"given": "string"
}
]
}
```
### Properties
| Name | Type | Required | Restrictions | Description |
|-----------|-----------------|----------|--------------|----------------------------------------------------------|
| `add` | array of object | false | | |
| `» gets` | string | false | | The ID of the Coder resource the user should be added to |
| `» given` | string | false | | The IdP claim the user has |
| `remove` | array of object | false | | |
| `» gets` | string | false | | The ID of the Coder resource the user should be added to |
| `» given` | string | false | | The IdP claim the user has |
## codersdk.PatchGroupRequest
```json
@@ -4180,6 +4228,96 @@ Git clone makes use of this by parsing the URL from: 'Username for "https://gith
| `quota_allowance` | integer | false | | |
| `remove_users` | array of string | false | | |
## codersdk.PatchOrganizationIDPSyncConfigRequest
```json
{
"assign_default": true,
"field": "string"
}
```
### Properties
| Name | Type | Required | Restrictions | Description |
|------------------|---------|----------|--------------|-------------|
| `assign_default` | boolean | false | | |
| `field` | string | false | | |
## codersdk.PatchOrganizationIDPSyncMappingRequest
```json
{
"add": [
{
"gets": "string",
"given": "string"
}
],
"remove": [
{
"gets": "string",
"given": "string"
}
]
}
```
### Properties
| Name | Type | Required | Restrictions | Description |
|-----------|-----------------|----------|--------------|----------------------------------------------------------|
| `add` | array of object | false | | |
| `» gets` | string | false | | The ID of the Coder resource the user should be added to |
| `» given` | string | false | | The IdP claim the user has |
| `remove` | array of object | false | | |
| `» gets` | string | false | | The ID of the Coder resource the user should be added to |
| `» given` | string | false | | The IdP claim the user has |
## codersdk.PatchRoleIDPSyncConfigRequest
```json
{
"field": "string"
}
```
### Properties
| Name | Type | Required | Restrictions | Description |
|---------|--------|----------|--------------|-------------|
| `field` | string | false | | |
## codersdk.PatchRoleIDPSyncMappingRequest
```json
{
"add": [
{
"gets": "string",
"given": "string"
}
],
"remove": [
{
"gets": "string",
"given": "string"
}
]
}
```
### Properties
| Name | Type | Required | Restrictions | Description |
|-----------|-----------------|----------|--------------|----------------------------------------------------------|
| `add` | array of object | false | | |
| `» gets` | string | false | | The ID of the Coder resource the user should be added to |
| `» given` | string | false | | The IdP claim the user has |
| `remove` | array of object | false | | |
| `» gets` | string | false | | The ID of the Coder resource the user should be added to |
| `» given` | string | false | | The IdP claim the user has |
## codersdk.PatchTemplateVersionRequest
```json
@@ -94,17 +94,6 @@ provider such as Okta. A single claim from the identity provider (like
`memberOf`) can be used to sync site-wide roles, organizations, groups, and
organization roles.
### Planned enhancements
Site-wide role sync is managed via server flags. We plan on changing this to
runtime configuration so Coder does not need a re-deploy:
- Issue [coder/internal#86](https://github.com/coder/internal/issues/86)
Make all sync configurable via the dashboard UI:
- [coder/coder#15290](https://github.com/coder/coder/issues/15290)
Regex filters and mapping can be configured to ensure the proper resources are
allocated in Coder. Learn more about [IDP sync](../../admin/users/idp-sync.md).
+2 -2
View File
@@ -195,9 +195,9 @@ RUN apt-get update --quiet && apt-get install --yes \
# Configure FIPS-compliant policies
update-crypto-policies --set FIPS
# NOTE: In scripts/Dockerfile.base we specifically install Terraform version 1.9.8.
# NOTE: In scripts/Dockerfile.base we specifically install Terraform version 1.10.5.
# Installing the same version here to match.
RUN wget -O /tmp/terraform.zip "https://releases.hashicorp.com/terraform/1.9.8/terraform_1.9.8_linux_amd64.zip" && \
RUN wget -O /tmp/terraform.zip "https://releases.hashicorp.com/terraform/1.10.5/terraform_1.10.5_linux_amd64.zip" && \
unzip /tmp/terraform.zip -d /usr/local/bin && \
rm -f /tmp/terraform.zip && \
chmod +x /usr/local/bin/terraform && \
+10 -1
View File
@@ -295,7 +295,10 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
r.Route("/organization", func(r chi.Router) {
r.Get("/", api.organizationIDPSyncSettings)
r.Patch("/", api.patchOrganizationIDPSyncSettings)
r.Patch("/config", api.patchOrganizationIDPSyncConfig)
r.Patch("/mapping", api.patchOrganizationIDPSyncMapping)
})
r.Get("/available-fields", api.deploymentIDPSyncClaimFields)
r.Get("/field-values", api.deploymentIDPSyncClaimFieldValues)
})
@@ -307,11 +310,17 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
httpmw.ExtractOrganizationParam(api.Database),
)
r.Route("/organizations/{organization}/settings", func(r chi.Router) {
r.Get("/idpsync/available-fields", api.organizationIDPSyncClaimFields)
r.Get("/idpsync/groups", api.groupIDPSyncSettings)
r.Patch("/idpsync/groups", api.patchGroupIDPSyncSettings)
r.Patch("/idpsync/groups/config", api.patchGroupIDPSyncConfig)
r.Patch("/idpsync/groups/mapping", api.patchGroupIDPSyncMapping)
r.Get("/idpsync/roles", api.roleIDPSyncSettings)
r.Patch("/idpsync/roles", api.patchRoleIDPSyncSettings)
r.Patch("/idpsync/roles/config", api.patchRoleIDPSyncConfig)
r.Patch("/idpsync/roles/mapping", api.patchRoleIDPSyncMapping)
r.Get("/idpsync/available-fields", api.organizationIDPSyncClaimFields)
r.Get("/idpsync/field-values", api.organizationIDPSyncClaimFieldValues)
})
})
+2
View File
@@ -7,6 +7,8 @@ import (
"github.com/coder/coder/v2/coderd/runtimeconfig"
)
var _ idpsync.IDPSync = &EnterpriseIDPSync{}
// EnterpriseIDPSync enabled syncing user information from an external IDP.
// The sync is an enterprise feature, so this struct wraps the AGPL implementation
// and extends it with enterprise capabilities. These capabilities can entirely
@@ -19,6 +19,8 @@ func (e EnterpriseIDPSync) OrganizationSyncEnabled(ctx context.Context, db datab
return false
}
// If this logic is ever updated, make sure to update the corresponding
// checkIDPOrgSync in coderd/telemetry/telemetry.go.
settings, err := e.OrganizationSyncSettings(ctx, db)
if err == nil && settings.Field != "" {
return true
@@ -300,7 +300,7 @@ func TestOrganizationSync(t *testing.T) {
// Create a new sync object
sync := enidpsync.NewSync(logger, runtimeconfig.NewManager(), caseData.Entitlements, caseData.Settings)
if caseData.RuntimeSettings != nil {
err := sync.UpdateOrganizationSettings(ctx, rdb, *caseData.RuntimeSettings)
err := sync.UpdateOrganizationSyncSettings(ctx, rdb, *caseData.RuntimeSettings)
require.NoError(t, err)
}
+448 -4
View File
@@ -3,6 +3,7 @@ package coderd
import (
"fmt"
"net/http"
"slices"
"github.com/google/uuid"
@@ -14,6 +15,7 @@ import (
"github.com/coder/coder/v2/coderd/idpsync"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/rbac/policy"
"github.com/coder/coder/v2/coderd/util/slice"
"github.com/coder/coder/v2/codersdk"
)
@@ -59,7 +61,6 @@ func (api *API) patchGroupIDPSyncSettings(rw http.ResponseWriter, r *http.Reques
ctx := r.Context()
org := httpmw.OrganizationParam(r)
auditor := *api.AGPL.Auditor.Load()
aReq, commitAudit := audit.InitRequest[idpsync.GroupSyncSettings](rw, &audit.RequestParams{
Audit: auditor,
Log: api.Logger,
@@ -102,7 +103,7 @@ func (api *API) patchGroupIDPSyncSettings(rw http.ResponseWriter, r *http.Reques
}
aReq.Old = *existing
err = api.IDPSync.UpdateGroupSettings(sysCtx, org.ID, api.Database, idpsync.GroupSyncSettings{
err = api.IDPSync.UpdateGroupSyncSettings(sysCtx, org.ID, api.Database, idpsync.GroupSyncSettings{
Field: req.Field,
Mapping: req.Mapping,
RegexFilter: req.RegexFilter,
@@ -130,6 +131,153 @@ func (api *API) patchGroupIDPSyncSettings(rw http.ResponseWriter, r *http.Reques
})
}
// @Summary Update group IdP Sync config
// @ID update-group-idp-sync-config
// @Security CoderSessionToken
// @Produce json
// @Accept json
// @Tags Enterprise
// @Success 200 {object} codersdk.GroupSyncSettings
// @Param organization path string true "Organization ID or name" format(uuid)
// @Param request body codersdk.PatchGroupIDPSyncConfigRequest true "New config values"
// @Router /organizations/{organization}/settings/idpsync/groups/config [patch]
func (api *API) patchGroupIDPSyncConfig(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
org := httpmw.OrganizationParam(r)
auditor := *api.AGPL.Auditor.Load()
aReq, commitAudit := audit.InitRequest[idpsync.GroupSyncSettings](rw, &audit.RequestParams{
Audit: auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionWrite,
OrganizationID: org.ID,
})
defer commitAudit()
if !api.Authorize(r, policy.ActionUpdate, rbac.ResourceIdpsyncSettings.InOrg(org.ID)) {
httpapi.Forbidden(rw)
return
}
var req codersdk.PatchGroupIDPSyncConfigRequest
if !httpapi.Read(ctx, rw, r, &req) {
return
}
var settings idpsync.GroupSyncSettings
//nolint:gocritic // Requires system context to update runtime config
sysCtx := dbauthz.AsSystemRestricted(ctx)
err := database.ReadModifyUpdate(api.Database, func(tx database.Store) error {
existing, err := api.IDPSync.GroupSyncSettings(sysCtx, org.ID, tx)
if err != nil {
return err
}
aReq.Old = *existing
settings = idpsync.GroupSyncSettings{
Field: req.Field,
RegexFilter: req.RegexFilter,
AutoCreateMissing: req.AutoCreateMissing,
LegacyNameMapping: existing.LegacyNameMapping,
Mapping: existing.Mapping,
}
err = api.IDPSync.UpdateGroupSyncSettings(sysCtx, org.ID, tx, settings)
if err != nil {
return err
}
return nil
})
if err != nil {
httpapi.InternalServerError(rw, err)
return
}
aReq.New = settings
httpapi.Write(ctx, rw, http.StatusOK, codersdk.GroupSyncSettings{
Field: settings.Field,
RegexFilter: settings.RegexFilter,
AutoCreateMissing: settings.AutoCreateMissing,
LegacyNameMapping: settings.LegacyNameMapping,
Mapping: settings.Mapping,
})
}
// @Summary Update group IdP Sync mapping
// @ID update-group-idp-sync-mapping
// @Security CoderSessionToken
// @Produce json
// @Accept json
// @Tags Enterprise
// @Success 200 {object} codersdk.GroupSyncSettings
// @Param organization path string true "Organization ID or name" format(uuid)
// @Param request body codersdk.PatchGroupIDPSyncMappingRequest true "Description of the mappings to add and remove"
// @Router /organizations/{organization}/settings/idpsync/groups/mapping [patch]
func (api *API) patchGroupIDPSyncMapping(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
org := httpmw.OrganizationParam(r)
auditor := *api.AGPL.Auditor.Load()
aReq, commitAudit := audit.InitRequest[idpsync.GroupSyncSettings](rw, &audit.RequestParams{
Audit: auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionWrite,
OrganizationID: org.ID,
})
defer commitAudit()
if !api.Authorize(r, policy.ActionUpdate, rbac.ResourceIdpsyncSettings.InOrg(org.ID)) {
httpapi.Forbidden(rw)
return
}
var req codersdk.PatchGroupIDPSyncMappingRequest
if !httpapi.Read(ctx, rw, r, &req) {
return
}
var settings idpsync.GroupSyncSettings
//nolint:gocritic // Requires system context to update runtime config
sysCtx := dbauthz.AsSystemRestricted(ctx)
err := database.ReadModifyUpdate(api.Database, func(tx database.Store) error {
existing, err := api.IDPSync.GroupSyncSettings(sysCtx, org.ID, tx)
if err != nil {
return err
}
aReq.Old = *existing
newMapping := applyIDPSyncMappingDiff(existing.Mapping, req.Add, req.Remove)
settings = idpsync.GroupSyncSettings{
Field: existing.Field,
RegexFilter: existing.RegexFilter,
AutoCreateMissing: existing.AutoCreateMissing,
LegacyNameMapping: existing.LegacyNameMapping,
Mapping: newMapping,
}
err = api.IDPSync.UpdateGroupSyncSettings(sysCtx, org.ID, tx, settings)
if err != nil {
return err
}
return nil
})
if err != nil {
httpapi.InternalServerError(rw, err)
return
}
aReq.New = settings
httpapi.Write(ctx, rw, http.StatusOK, codersdk.GroupSyncSettings{
Field: settings.Field,
RegexFilter: settings.RegexFilter,
AutoCreateMissing: settings.AutoCreateMissing,
LegacyNameMapping: settings.LegacyNameMapping,
Mapping: settings.Mapping,
})
}
// @Summary Get role IdP Sync settings by organization
// @ID get-role-idp-sync-settings-by-organization
// @Security CoderSessionToken
@@ -201,7 +349,7 @@ func (api *API) patchRoleIDPSyncSettings(rw http.ResponseWriter, r *http.Request
}
aReq.Old = *existing
err = api.IDPSync.UpdateRoleSettings(sysCtx, org.ID, api.Database, idpsync.RoleSyncSettings{
err = api.IDPSync.UpdateRoleSyncSettings(sysCtx, org.ID, api.Database, idpsync.RoleSyncSettings{
Field: req.Field,
Mapping: req.Mapping,
})
@@ -223,6 +371,141 @@ func (api *API) patchRoleIDPSyncSettings(rw http.ResponseWriter, r *http.Request
})
}
// @Summary Update role IdP Sync config
// @ID update-role-idp-sync-config
// @Security CoderSessionToken
// @Produce json
// @Accept json
// @Tags Enterprise
// @Success 200 {object} codersdk.RoleSyncSettings
// @Param organization path string true "Organization ID or name" format(uuid)
// @Param request body codersdk.PatchRoleIDPSyncConfigRequest true "New config values"
// @Router /organizations/{organization}/settings/idpsync/roles/config [patch]
func (api *API) patchRoleIDPSyncConfig(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
org := httpmw.OrganizationParam(r)
auditor := *api.AGPL.Auditor.Load()
aReq, commitAudit := audit.InitRequest[idpsync.RoleSyncSettings](rw, &audit.RequestParams{
Audit: auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionWrite,
OrganizationID: org.ID,
})
defer commitAudit()
if !api.Authorize(r, policy.ActionUpdate, rbac.ResourceIdpsyncSettings.InOrg(org.ID)) {
httpapi.Forbidden(rw)
return
}
var req codersdk.PatchRoleIDPSyncConfigRequest
if !httpapi.Read(ctx, rw, r, &req) {
return
}
var settings idpsync.RoleSyncSettings
//nolint:gocritic // Requires system context to update runtime config
sysCtx := dbauthz.AsSystemRestricted(ctx)
err := database.ReadModifyUpdate(api.Database, func(tx database.Store) error {
existing, err := api.IDPSync.RoleSyncSettings(sysCtx, org.ID, tx)
if err != nil {
return err
}
aReq.Old = *existing
settings = idpsync.RoleSyncSettings{
Field: req.Field,
Mapping: existing.Mapping,
}
err = api.IDPSync.UpdateRoleSyncSettings(sysCtx, org.ID, tx, settings)
if err != nil {
return err
}
return nil
})
if err != nil {
httpapi.InternalServerError(rw, err)
return
}
aReq.New = settings
httpapi.Write(ctx, rw, http.StatusOK, codersdk.RoleSyncSettings{
Field: settings.Field,
Mapping: settings.Mapping,
})
}
// @Summary Update role IdP Sync mapping
// @ID update-role-idp-sync-mapping
// @Security CoderSessionToken
// @Produce json
// @Accept json
// @Tags Enterprise
// @Success 200 {object} codersdk.RoleSyncSettings
// @Param organization path string true "Organization ID or name" format(uuid)
// @Param request body codersdk.PatchRoleIDPSyncMappingRequest true "Description of the mappings to add and remove"
// @Router /organizations/{organization}/settings/idpsync/roles/mapping [patch]
func (api *API) patchRoleIDPSyncMapping(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
org := httpmw.OrganizationParam(r)
auditor := *api.AGPL.Auditor.Load()
aReq, commitAudit := audit.InitRequest[idpsync.RoleSyncSettings](rw, &audit.RequestParams{
Audit: auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionWrite,
OrganizationID: org.ID,
})
defer commitAudit()
if !api.Authorize(r, policy.ActionUpdate, rbac.ResourceIdpsyncSettings.InOrg(org.ID)) {
httpapi.Forbidden(rw)
return
}
var req codersdk.PatchRoleIDPSyncMappingRequest
if !httpapi.Read(ctx, rw, r, &req) {
return
}
var settings idpsync.RoleSyncSettings
//nolint:gocritic // Requires system context to update runtime config
sysCtx := dbauthz.AsSystemRestricted(ctx)
err := database.ReadModifyUpdate(api.Database, func(tx database.Store) error {
existing, err := api.IDPSync.RoleSyncSettings(sysCtx, org.ID, tx)
if err != nil {
return err
}
aReq.Old = *existing
newMapping := applyIDPSyncMappingDiff(existing.Mapping, req.Add, req.Remove)
settings = idpsync.RoleSyncSettings{
Field: existing.Field,
Mapping: newMapping,
}
err = api.IDPSync.UpdateRoleSyncSettings(sysCtx, org.ID, tx, settings)
if err != nil {
return err
}
return nil
})
if err != nil {
httpapi.InternalServerError(rw, err)
return
}
aReq.New = settings
httpapi.Write(ctx, rw, http.StatusOK, codersdk.RoleSyncSettings{
Field: settings.Field,
Mapping: settings.Mapping,
})
}
// @Summary Get organization IdP Sync settings
// @ID get-organization-idp-sync-settings
// @Security CoderSessionToken
@@ -292,7 +575,7 @@ func (api *API) patchOrganizationIDPSyncSettings(rw http.ResponseWriter, r *http
}
aReq.Old = *existing
err = api.IDPSync.UpdateOrganizationSettings(sysCtx, api.Database, idpsync.OrganizationSyncSettings{
err = api.IDPSync.UpdateOrganizationSyncSettings(sysCtx, api.Database, idpsync.OrganizationSyncSettings{
Field: req.Field,
// We do not check if the mappings point to actual organizations.
Mapping: req.Mapping,
@@ -317,6 +600,139 @@ func (api *API) patchOrganizationIDPSyncSettings(rw http.ResponseWriter, r *http
})
}
// @Summary Update organization IdP Sync config
// @ID update-organization-idp-sync-config
// @Security CoderSessionToken
// @Produce json
// @Accept json
// @Tags Enterprise
// @Success 200 {object} codersdk.OrganizationSyncSettings
// @Param request body codersdk.PatchOrganizationIDPSyncConfigRequest true "New config values"
// @Router /settings/idpsync/organization/config [patch]
func (api *API) patchOrganizationIDPSyncConfig(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
auditor := *api.AGPL.Auditor.Load()
aReq, commitAudit := audit.InitRequest[idpsync.OrganizationSyncSettings](rw, &audit.RequestParams{
Audit: auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionWrite,
})
defer commitAudit()
if !api.Authorize(r, policy.ActionUpdate, rbac.ResourceIdpsyncSettings) {
httpapi.Forbidden(rw)
return
}
var req codersdk.PatchOrganizationIDPSyncConfigRequest
if !httpapi.Read(ctx, rw, r, &req) {
return
}
var settings idpsync.OrganizationSyncSettings
//nolint:gocritic // Requires system context to update runtime config
sysCtx := dbauthz.AsSystemRestricted(ctx)
err := database.ReadModifyUpdate(api.Database, func(tx database.Store) error {
existing, err := api.IDPSync.OrganizationSyncSettings(sysCtx, tx)
if err != nil {
return err
}
aReq.Old = *existing
settings = idpsync.OrganizationSyncSettings{
Field: req.Field,
AssignDefault: req.AssignDefault,
Mapping: existing.Mapping,
}
err = api.IDPSync.UpdateOrganizationSyncSettings(sysCtx, tx, settings)
if err != nil {
return err
}
return nil
})
if err != nil {
httpapi.InternalServerError(rw, err)
return
}
aReq.New = settings
httpapi.Write(ctx, rw, http.StatusOK, codersdk.OrganizationSyncSettings{
Field: settings.Field,
Mapping: settings.Mapping,
AssignDefault: settings.AssignDefault,
})
}
// @Summary Update organization IdP Sync mapping
// @ID update-organization-idp-sync-mapping
// @Security CoderSessionToken
// @Produce json
// @Accept json
// @Tags Enterprise
// @Success 200 {object} codersdk.OrganizationSyncSettings
// @Param request body codersdk.PatchOrganizationIDPSyncMappingRequest true "Description of the mappings to add and remove"
// @Router /settings/idpsync/organization/mapping [patch]
func (api *API) patchOrganizationIDPSyncMapping(rw http.ResponseWriter, r *http.Request) {
ctx := r.Context()
auditor := *api.AGPL.Auditor.Load()
aReq, commitAudit := audit.InitRequest[idpsync.OrganizationSyncSettings](rw, &audit.RequestParams{
Audit: auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionWrite,
})
defer commitAudit()
if !api.Authorize(r, policy.ActionUpdate, rbac.ResourceIdpsyncSettings) {
httpapi.Forbidden(rw)
return
}
var req codersdk.PatchOrganizationIDPSyncMappingRequest
if !httpapi.Read(ctx, rw, r, &req) {
return
}
var settings idpsync.OrganizationSyncSettings
//nolint:gocritic // Requires system context to update runtime config
sysCtx := dbauthz.AsSystemRestricted(ctx)
err := database.ReadModifyUpdate(api.Database, func(tx database.Store) error {
existing, err := api.IDPSync.OrganizationSyncSettings(sysCtx, tx)
if err != nil {
return err
}
aReq.Old = *existing
newMapping := applyIDPSyncMappingDiff(existing.Mapping, req.Add, req.Remove)
settings = idpsync.OrganizationSyncSettings{
Field: existing.Field,
Mapping: newMapping,
AssignDefault: existing.AssignDefault,
}
err = api.IDPSync.UpdateOrganizationSyncSettings(sysCtx, tx, settings)
if err != nil {
return err
}
return nil
})
if err != nil {
httpapi.InternalServerError(rw, err)
return
}
aReq.New = settings
httpapi.Write(ctx, rw, http.StatusOK, codersdk.OrganizationSyncSettings{
Field: settings.Field,
Mapping: settings.Mapping,
AssignDefault: settings.AssignDefault,
})
}
// @Summary Get the available organization idp sync claim fields
// @ID get-the-available-organization-idp-sync-claim-fields
// @Security CoderSessionToken
@@ -423,3 +839,31 @@ func (api *API) idpSyncClaimFieldValues(orgID uuid.UUID, rw http.ResponseWriter,
httpapi.Write(ctx, rw, http.StatusOK, fieldValues)
}
func applyIDPSyncMappingDiff[IDType uuid.UUID | string](
previous map[string][]IDType,
add, remove []codersdk.IDPSyncMapping[IDType],
) map[string][]IDType {
next := make(map[string][]IDType)
// Copy existing mapping
for key, ids := range previous {
next[key] = append(next[key], ids...)
}
// Add unique entries
for _, mapping := range add {
if !slice.Contains(next[mapping.Given], mapping.Gets) {
next[mapping.Given] = append(next[mapping.Given], mapping.Gets)
}
}
// Remove entries
for _, mapping := range remove {
next[mapping.Given] = slices.DeleteFunc(next[mapping.Given], func(u IDType) bool {
return u == mapping.Gets
})
}
return next
}
+117
View File
@@ -0,0 +1,117 @@
package coderd
import (
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/codersdk"
)
func TestApplyIDPSyncMappingDiff(t *testing.T) {
t.Parallel()
t.Run("with UUIDs", func(t *testing.T) {
t.Parallel()
id := []uuid.UUID{
uuid.MustParse("00000000-b8bd-46bb-bb6c-6c2b2c0dd2ea"),
uuid.MustParse("01000000-fbe8-464c-9429-fe01a03f3644"),
uuid.MustParse("02000000-0926-407b-9998-39af62e3d0c5"),
uuid.MustParse("03000000-92f6-4bfd-bba6-0f54667b131c"),
}
mapping := applyIDPSyncMappingDiff(map[string][]uuid.UUID{},
[]codersdk.IDPSyncMapping[uuid.UUID]{
{Given: "wibble", Gets: id[0]},
{Given: "wibble", Gets: id[1]},
{Given: "wobble", Gets: id[0]},
{Given: "wobble", Gets: id[1]},
{Given: "wobble", Gets: id[2]},
{Given: "wobble", Gets: id[3]},
{Given: "wooble", Gets: id[0]},
},
// Remove takes priority over Add, so `3` should not actually be added.
[]codersdk.IDPSyncMapping[uuid.UUID]{
{Given: "wobble", Gets: id[3]},
},
)
expected := map[string][]uuid.UUID{
"wibble": {id[0], id[1]},
"wobble": {id[0], id[1], id[2]},
"wooble": {id[0]},
}
require.Equal(t, expected, mapping)
mapping = applyIDPSyncMappingDiff(mapping,
[]codersdk.IDPSyncMapping[uuid.UUID]{
{Given: "wibble", Gets: id[2]},
{Given: "wobble", Gets: id[3]},
{Given: "wooble", Gets: id[0]},
},
[]codersdk.IDPSyncMapping[uuid.UUID]{
{Given: "wibble", Gets: id[0]},
{Given: "wobble", Gets: id[1]},
},
)
expected = map[string][]uuid.UUID{
"wibble": {id[1], id[2]},
"wobble": {id[0], id[2], id[3]},
"wooble": {id[0]},
}
require.Equal(t, expected, mapping)
})
t.Run("with strings", func(t *testing.T) {
t.Parallel()
mapping := applyIDPSyncMappingDiff(map[string][]string{},
[]codersdk.IDPSyncMapping[string]{
{Given: "wibble", Gets: "group-00"},
{Given: "wibble", Gets: "group-01"},
{Given: "wobble", Gets: "group-00"},
{Given: "wobble", Gets: "group-01"},
{Given: "wobble", Gets: "group-02"},
{Given: "wobble", Gets: "group-03"},
{Given: "wooble", Gets: "group-00"},
},
// Remove takes priority over Add, so `3` should not actually be added.
[]codersdk.IDPSyncMapping[string]{
{Given: "wobble", Gets: "group-03"},
},
)
expected := map[string][]string{
"wibble": {"group-00", "group-01"},
"wobble": {"group-00", "group-01", "group-02"},
"wooble": {"group-00"},
}
require.Equal(t, expected, mapping)
mapping = applyIDPSyncMappingDiff(mapping,
[]codersdk.IDPSyncMapping[string]{
{Given: "wibble", Gets: "group-02"},
{Given: "wobble", Gets: "group-03"},
{Given: "wooble", Gets: "group-00"},
},
[]codersdk.IDPSyncMapping[string]{
{Given: "wibble", Gets: "group-00"},
{Given: "wobble", Gets: "group-01"},
},
)
expected = map[string][]string{
"wibble": {"group-01", "group-02"},
"wobble": {"group-00", "group-02", "group-03"},
"wooble": {"group-00"},
}
require.Equal(t, expected, mapping)
})
}
+548 -4
View File
@@ -5,6 +5,7 @@ import (
"regexp"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/coderd/coderdtest"
@@ -19,7 +20,7 @@ import (
"github.com/coder/serpent"
)
func TestGetGroupSyncConfig(t *testing.T) {
func TestGetGroupSyncSettings(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
@@ -82,7 +83,7 @@ func TestGetGroupSyncConfig(t *testing.T) {
})
}
func TestPostGroupSyncConfig(t *testing.T) {
func TestPatchGroupSyncSettings(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
@@ -140,7 +141,172 @@ func TestPostGroupSyncConfig(t *testing.T) {
})
}
func TestGetRoleSyncConfig(t *testing.T) {
func TestPatchGroupSyncConfig(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
orgID := user.OrganizationID
orgAdmin, _ := coderdtest.CreateAnotherUser(t, owner, orgID, rbac.ScopedRoleOrgAdmin(user.OrganizationID))
mapping := map[string][]uuid.UUID{"wibble": {uuid.New()}}
ctx := testutil.Context(t, testutil.WaitShort)
_, err := orgAdmin.PatchGroupIDPSyncSettings(ctx, orgID.String(), codersdk.GroupSyncSettings{
Field: "wibble",
RegexFilter: regexp.MustCompile("wib{2,}le"),
AutoCreateMissing: false,
Mapping: mapping,
})
require.NoError(t, err)
fetchedSettings, err := orgAdmin.GroupIDPSyncSettings(ctx, orgID.String())
require.NoError(t, err)
require.Equal(t, "wibble", fetchedSettings.Field)
require.Equal(t, "wib{2,}le", fetchedSettings.RegexFilter.String())
require.Equal(t, false, fetchedSettings.AutoCreateMissing)
require.Equal(t, mapping, fetchedSettings.Mapping)
ctx = testutil.Context(t, testutil.WaitShort)
settings, err := orgAdmin.PatchGroupIDPSyncConfig(ctx, orgID.String(), codersdk.PatchGroupIDPSyncConfigRequest{
Field: "wobble",
RegexFilter: regexp.MustCompile("wob{2,}le"),
AutoCreateMissing: true,
})
require.NoError(t, err)
require.Equal(t, "wobble", settings.Field)
require.Equal(t, "wob{2,}le", settings.RegexFilter.String())
require.Equal(t, true, settings.AutoCreateMissing)
require.Equal(t, mapping, settings.Mapping)
fetchedSettings, err = orgAdmin.GroupIDPSyncSettings(ctx, orgID.String())
require.NoError(t, err)
require.Equal(t, "wobble", fetchedSettings.Field)
require.Equal(t, "wob{2,}le", fetchedSettings.RegexFilter.String())
require.Equal(t, true, fetchedSettings.AutoCreateMissing)
require.Equal(t, mapping, fetchedSettings.Mapping)
})
t.Run("NotAuthorized", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
member, _ := coderdtest.CreateAnotherUser(t, owner, user.OrganizationID)
ctx := testutil.Context(t, testutil.WaitShort)
_, err := member.PatchGroupIDPSyncConfig(ctx, user.OrganizationID.String(), codersdk.PatchGroupIDPSyncConfigRequest{})
var apiError *codersdk.Error
require.ErrorAs(t, err, &apiError)
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
})
}
func TestPatchGroupSyncMapping(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
orgID := user.OrganizationID
orgAdmin, _ := coderdtest.CreateAnotherUser(t, owner, orgID, rbac.ScopedRoleOrgAdmin(user.OrganizationID))
// These IDs are easier to visually diff if the test fails than truly random
// ones.
orgs := []uuid.UUID{
uuid.MustParse("00000000-b8bd-46bb-bb6c-6c2b2c0dd2ea"),
uuid.MustParse("01000000-fbe8-464c-9429-fe01a03f3644"),
uuid.MustParse("02000000-0926-407b-9998-39af62e3d0c5"),
}
ctx := testutil.Context(t, testutil.WaitShort)
_, err := orgAdmin.PatchGroupIDPSyncSettings(ctx, orgID.String(), codersdk.GroupSyncSettings{
Field: "wibble",
RegexFilter: regexp.MustCompile("wib{2,}le"),
AutoCreateMissing: true,
Mapping: map[string][]uuid.UUID{"wobble": {orgs[0]}},
})
require.NoError(t, err)
ctx = testutil.Context(t, testutil.WaitShort)
settings, err := orgAdmin.PatchGroupIDPSyncMapping(ctx, orgID.String(), codersdk.PatchGroupIDPSyncMappingRequest{
Add: []codersdk.IDPSyncMapping[uuid.UUID]{
{Given: "wibble", Gets: orgs[0]},
{Given: "wobble", Gets: orgs[1]},
{Given: "wobble", Gets: orgs[2]},
},
// Remove takes priority over Add, so "3" should not actually be added to wooble.
Remove: []codersdk.IDPSyncMapping[uuid.UUID]{
{Given: "wobble", Gets: orgs[1]},
},
})
expected := map[string][]uuid.UUID{
"wibble": {orgs[0]},
"wobble": {orgs[0], orgs[2]},
}
require.NoError(t, err)
require.Equal(t, expected, settings.Mapping)
fetchedSettings, err := orgAdmin.GroupIDPSyncSettings(ctx, orgID.String())
require.NoError(t, err)
require.Equal(t, "wibble", fetchedSettings.Field)
require.Equal(t, "wib{2,}le", fetchedSettings.RegexFilter.String())
require.Equal(t, true, fetchedSettings.AutoCreateMissing)
require.Equal(t, expected, fetchedSettings.Mapping)
})
t.Run("NotAuthorized", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
member, _ := coderdtest.CreateAnotherUser(t, owner, user.OrganizationID)
ctx := testutil.Context(t, testutil.WaitShort)
_, err := member.PatchGroupIDPSyncMapping(ctx, user.OrganizationID.String(), codersdk.PatchGroupIDPSyncMappingRequest{})
var apiError *codersdk.Error
require.ErrorAs(t, err, &apiError)
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
})
}
func TestGetRoleSyncSettings(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
@@ -174,7 +340,7 @@ func TestGetRoleSyncConfig(t *testing.T) {
})
}
func TestPostRoleSyncConfig(t *testing.T) {
func TestPatchRoleSyncSettings(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
@@ -231,3 +397,381 @@ func TestPostRoleSyncConfig(t *testing.T) {
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
})
}
func TestPatchRoleSyncConfig(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
orgID := user.OrganizationID
orgAdmin, _ := coderdtest.CreateAnotherUser(t, owner, orgID, rbac.ScopedRoleOrgAdmin(user.OrganizationID))
mapping := map[string][]string{"wibble": {"group-01"}}
ctx := testutil.Context(t, testutil.WaitShort)
_, err := orgAdmin.PatchRoleIDPSyncSettings(ctx, orgID.String(), codersdk.RoleSyncSettings{
Field: "wibble",
Mapping: mapping,
})
require.NoError(t, err)
fetchedSettings, err := orgAdmin.RoleIDPSyncSettings(ctx, orgID.String())
require.NoError(t, err)
require.Equal(t, "wibble", fetchedSettings.Field)
require.Equal(t, mapping, fetchedSettings.Mapping)
ctx = testutil.Context(t, testutil.WaitShort)
settings, err := orgAdmin.PatchRoleIDPSyncConfig(ctx, orgID.String(), codersdk.PatchRoleIDPSyncConfigRequest{
Field: "wobble",
})
require.NoError(t, err)
require.Equal(t, "wobble", settings.Field)
require.Equal(t, mapping, settings.Mapping)
fetchedSettings, err = orgAdmin.RoleIDPSyncSettings(ctx, orgID.String())
require.NoError(t, err)
require.Equal(t, "wobble", fetchedSettings.Field)
require.Equal(t, mapping, fetchedSettings.Mapping)
})
t.Run("NotAuthorized", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
member, _ := coderdtest.CreateAnotherUser(t, owner, user.OrganizationID)
ctx := testutil.Context(t, testutil.WaitShort)
_, err := member.PatchGroupIDPSyncConfig(ctx, user.OrganizationID.String(), codersdk.PatchGroupIDPSyncConfigRequest{})
var apiError *codersdk.Error
require.ErrorAs(t, err, &apiError)
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
})
}
func TestPatchRoleSyncMapping(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
orgID := user.OrganizationID
orgAdmin, _ := coderdtest.CreateAnotherUser(t, owner, orgID, rbac.ScopedRoleOrgAdmin(user.OrganizationID))
ctx := testutil.Context(t, testutil.WaitShort)
_, err := orgAdmin.PatchRoleIDPSyncSettings(ctx, orgID.String(), codersdk.RoleSyncSettings{
Field: "wibble",
Mapping: map[string][]string{"wobble": {"group-00"}},
})
require.NoError(t, err)
ctx = testutil.Context(t, testutil.WaitShort)
settings, err := orgAdmin.PatchRoleIDPSyncMapping(ctx, orgID.String(), codersdk.PatchRoleIDPSyncMappingRequest{
Add: []codersdk.IDPSyncMapping[string]{
{Given: "wibble", Gets: "group-00"},
{Given: "wobble", Gets: "group-01"},
{Given: "wobble", Gets: "group-02"},
},
// Remove takes priority over Add, so "3" should not actually be added to wooble.
Remove: []codersdk.IDPSyncMapping[string]{
{Given: "wobble", Gets: "group-01"},
},
})
expected := map[string][]string{
"wibble": {"group-00"},
"wobble": {"group-00", "group-02"},
}
require.NoError(t, err)
require.Equal(t, expected, settings.Mapping)
fetchedSettings, err := orgAdmin.RoleIDPSyncSettings(ctx, orgID.String())
require.NoError(t, err)
require.Equal(t, "wibble", fetchedSettings.Field)
require.Equal(t, expected, fetchedSettings.Mapping)
})
t.Run("NotAuthorized", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
member, _ := coderdtest.CreateAnotherUser(t, owner, user.OrganizationID)
ctx := testutil.Context(t, testutil.WaitShort)
_, err := member.PatchGroupIDPSyncMapping(ctx, user.OrganizationID.String(), codersdk.PatchGroupIDPSyncMappingRequest{})
var apiError *codersdk.Error
require.ErrorAs(t, err, &apiError)
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
})
}
func TestGetOrganizationSyncSettings(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
owner, _, _, user := coderdenttest.NewWithAPI(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
expected := map[string][]uuid.UUID{"foo": {user.OrganizationID}}
ctx := testutil.Context(t, testutil.WaitShort)
settings, err := owner.PatchOrganizationIDPSyncSettings(ctx, codersdk.OrganizationSyncSettings{
Field: "august",
Mapping: expected,
})
require.NoError(t, err)
require.Equal(t, "august", settings.Field)
require.Equal(t, expected, settings.Mapping)
settings, err = owner.OrganizationIDPSyncSettings(ctx)
require.NoError(t, err)
require.Equal(t, "august", settings.Field)
require.Equal(t, expected, settings.Mapping)
})
}
func TestPatchOrganizationSyncSettings(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
owner, _ := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
ctx := testutil.Context(t, testutil.WaitShort)
//nolint:gocritic // Only owners can change Organization IdP sync settings
settings, err := owner.PatchOrganizationIDPSyncSettings(ctx, codersdk.OrganizationSyncSettings{
Field: "august",
})
require.NoError(t, err)
require.Equal(t, "august", settings.Field)
fetchedSettings, err := owner.OrganizationIDPSyncSettings(ctx)
require.NoError(t, err)
require.Equal(t, "august", fetchedSettings.Field)
})
t.Run("NotAuthorized", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
member, _ := coderdtest.CreateAnotherUser(t, owner, user.OrganizationID)
ctx := testutil.Context(t, testutil.WaitShort)
_, err := member.PatchRoleIDPSyncSettings(ctx, user.OrganizationID.String(), codersdk.RoleSyncSettings{
Field: "august",
})
var apiError *codersdk.Error
require.ErrorAs(t, err, &apiError)
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
_, err = member.RoleIDPSyncSettings(ctx, user.OrganizationID.String())
require.ErrorAs(t, err, &apiError)
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
})
}
func TestPatchOrganizationSyncConfig(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
mapping := map[string][]uuid.UUID{"wibble": {user.OrganizationID}}
ctx := testutil.Context(t, testutil.WaitShort)
//nolint:gocritic // Only owners can change Organization IdP sync settings
_, err := owner.PatchOrganizationIDPSyncSettings(ctx, codersdk.OrganizationSyncSettings{
Field: "wibble",
AssignDefault: true,
Mapping: mapping,
})
require.NoError(t, err)
fetchedSettings, err := owner.OrganizationIDPSyncSettings(ctx)
require.NoError(t, err)
require.Equal(t, "wibble", fetchedSettings.Field)
require.Equal(t, true, fetchedSettings.AssignDefault)
require.Equal(t, mapping, fetchedSettings.Mapping)
ctx = testutil.Context(t, testutil.WaitShort)
settings, err := owner.PatchOrganizationIDPSyncConfig(ctx, codersdk.PatchOrganizationIDPSyncConfigRequest{
Field: "wobble",
})
require.NoError(t, err)
require.Equal(t, "wobble", settings.Field)
require.Equal(t, false, settings.AssignDefault)
require.Equal(t, mapping, settings.Mapping)
fetchedSettings, err = owner.OrganizationIDPSyncSettings(ctx)
require.NoError(t, err)
require.Equal(t, "wobble", fetchedSettings.Field)
require.Equal(t, false, fetchedSettings.AssignDefault)
require.Equal(t, mapping, fetchedSettings.Mapping)
})
t.Run("NotAuthorized", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
member, _ := coderdtest.CreateAnotherUser(t, owner, user.OrganizationID)
ctx := testutil.Context(t, testutil.WaitShort)
_, err := member.PatchOrganizationIDPSyncConfig(ctx, codersdk.PatchOrganizationIDPSyncConfigRequest{})
var apiError *codersdk.Error
require.ErrorAs(t, err, &apiError)
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
})
}
func TestPatchOrganizationSyncMapping(t *testing.T) {
t.Parallel()
t.Run("OK", func(t *testing.T) {
t.Parallel()
owner, _ := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
// These IDs are easier to visually diff if the test fails than truly random
// ones.
orgs := []uuid.UUID{
uuid.MustParse("00000000-b8bd-46bb-bb6c-6c2b2c0dd2ea"),
uuid.MustParse("01000000-fbe8-464c-9429-fe01a03f3644"),
uuid.MustParse("02000000-0926-407b-9998-39af62e3d0c5"),
}
ctx := testutil.Context(t, testutil.WaitShort)
//nolint:gocritic // Only owners can change Organization IdP sync settings
settings, err := owner.PatchOrganizationIDPSyncMapping(ctx, codersdk.PatchOrganizationIDPSyncMappingRequest{
Add: []codersdk.IDPSyncMapping[uuid.UUID]{
{Given: "wibble", Gets: orgs[0]},
{Given: "wobble", Gets: orgs[0]},
{Given: "wobble", Gets: orgs[1]},
{Given: "wobble", Gets: orgs[2]},
},
Remove: []codersdk.IDPSyncMapping[uuid.UUID]{
{Given: "wobble", Gets: orgs[1]},
},
})
expected := map[string][]uuid.UUID{
"wibble": {orgs[0]},
"wobble": {orgs[0], orgs[2]},
}
require.NoError(t, err)
require.Equal(t, expected, settings.Mapping)
fetchedSettings, err := owner.OrganizationIDPSyncSettings(ctx)
require.NoError(t, err)
require.Equal(t, expected, fetchedSettings.Mapping)
})
t.Run("NotAuthorized", func(t *testing.T) {
t.Parallel()
owner, user := coderdenttest.New(t, &coderdenttest.Options{
LicenseOptions: &coderdenttest.LicenseOptions{
Features: license.Features{
codersdk.FeatureCustomRoles: 1,
codersdk.FeatureMultipleOrganizations: 1,
},
},
})
member, _ := coderdtest.CreateAnotherUser(t, owner, user.OrganizationID)
ctx := testutil.Context(t, testutil.WaitShort)
_, err := member.PatchOrganizationIDPSyncMapping(ctx, codersdk.PatchOrganizationIDPSyncMappingRequest{})
var apiError *codersdk.Error
require.ErrorAs(t, err, &apiError)
require.Equal(t, http.StatusForbidden, apiError.StatusCode())
})
}
+5
View File
@@ -24,6 +24,7 @@ import (
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/coderd/provisionerdserver"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/rbac/policy"
@@ -381,6 +382,10 @@ func (api *API) provisionerDaemonServe(rw http.ResponseWriter, r *http.Request)
logger.Debug(ctx, "drpc server error", slog.Error(err))
},
})
// Log the request immediately instead of after it completes.
loggermw.RequestLoggerFromContext(ctx).WriteLog(ctx, http.StatusAccepted)
err = server.Serve(ctx, session)
srvCancel()
logger.Info(ctx, "provisioner daemon disconnected", slog.Error(err))
+2 -1
View File
@@ -32,6 +32,7 @@ import (
"github.com/coder/coder/v2/coderd/cryptokeys"
"github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/httpmw/loggermw"
"github.com/coder/coder/v2/coderd/tracing"
"github.com/coder/coder/v2/coderd/workspaceapps"
"github.com/coder/coder/v2/codersdk"
@@ -336,7 +337,7 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
tracing.Middleware(s.TracerProvider),
httpmw.AttachRequestID,
httpmw.ExtractRealIP(s.Options.RealIPConfig),
httpmw.Logger(s.Logger),
loggermw.Logger(s.Logger),
prometheusMW,
corsMW,
+1 -1
View File
@@ -88,7 +88,7 @@ require (
github.com/chromedp/chromedp v0.11.0
github.com/cli/safeexec v1.0.1
github.com/coder/flog v1.1.0
github.com/coder/guts v1.0.0
github.com/coder/guts v1.0.1
github.com/coder/pretty v0.0.0-20230908205945-e89ba86370e0
github.com/coder/quartz v0.1.2
github.com/coder/retry v1.5.1
+2 -2
View File
@@ -226,8 +226,8 @@ github.com/coder/go-httpstat v0.0.0-20230801153223-321c88088322 h1:m0lPZjlQ7vdVp
github.com/coder/go-httpstat v0.0.0-20230801153223-321c88088322/go.mod h1:rOLFDDVKVFiDqZFXoteXc97YXx7kFi9kYqR+2ETPkLQ=
github.com/coder/go-scim/pkg/v2 v2.0.0-20230221055123-1d63c1222136 h1:0RgB61LcNs24WOxc3PBvygSNTQurm0PYPujJjLLOzs0=
github.com/coder/go-scim/pkg/v2 v2.0.0-20230221055123-1d63c1222136/go.mod h1:VkD1P761nykiq75dz+4iFqIQIZka189tx1BQLOp0Skc=
github.com/coder/guts v1.0.0 h1:Ba6TBOeED+96Dv8IdISjbGhCzHKicqSc4SEYVV+4zeE=
github.com/coder/guts v1.0.0/go.mod h1:SfmxjDaSfPjzKJ9mGU4sA/1OHU+u66uRfhFF+y4BARQ=
github.com/coder/guts v1.0.1 h1:tU9pW+1jftCSX1eBxnNHiouQBSBJIej3I+kqfjIyeJU=
github.com/coder/guts v1.0.1/go.mod h1:z8LHbF6vwDOXQOReDvay7Rpwp/jHwCZiZwjd6wfLcJg=
github.com/coder/pq v1.10.5-0.20240813183442-0c420cb5a048 h1:3jzYUlGH7ZELIH4XggXhnTnP05FCYiAFeQpoN+gNR5I=
github.com/coder/pq v1.10.5-0.20240813183442-0c420cb5a048/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/coder/pretty v0.0.0-20230908205945-e89ba86370e0 h1:3A0ES21Ke+FxEM8CXx9n47SZOKOpgSE1bbJzlE4qPVs=
+1 -1
View File
@@ -273,7 +273,7 @@ EOF
main() {
MAINLINE=1
STABLE=0
TERRAFORM_VERSION="1.9.8"
TERRAFORM_VERSION="1.10.5"
if [ "${TRACE-}" ]; then
set -x
+2 -2
View File
@@ -20,10 +20,10 @@ var (
// when Terraform is not available on the system.
// NOTE: Keep this in sync with the version in scripts/Dockerfile.base.
// NOTE: Keep this in sync with the version in install.sh.
TerraformVersion = version.Must(version.NewVersion("1.9.8"))
TerraformVersion = version.Must(version.NewVersion("1.10.5"))
minTerraformVersion = version.Must(version.NewVersion("1.1.0"))
maxTerraformVersion = version.Must(version.NewVersion("1.9.9")) // use .9 to automatically allow patch releases
maxTerraformVersion = version.Must(version.NewVersion("1.10.9")) // use .9 to automatically allow patch releases
terraformMinorVersionMismatch = xerrors.New("Terraform binary minor version mismatch.")
)
@@ -2,7 +2,7 @@ terraform {
required_providers {
coder = {
source = "coder/coder"
version = "0.22.0"
version = ">=2.0.0"
}
}
}
+7 -13
View File
@@ -1,6 +1,6 @@
{
"format_version": "1.2",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"planned_values": {
"root_module": {
"resources": [
@@ -10,23 +10,20 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
"connection_timeout": 120,
"dir": null,
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -90,16 +87,13 @@
"connection_timeout": 120,
"dir": null,
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"after_unknown": {
@@ -177,7 +171,7 @@
"coder": {
"name": "coder",
"full_name": "registry.terraform.io/coder/coder",
"version_constraint": "0.22.0"
"version_constraint": ">= 2.0.0"
},
"module.module:null": {
"name": "null",
@@ -201,7 +195,7 @@
"constant_value": "linux"
}
},
"schema_version": 0
"schema_version": 1
}
],
"module_calls": {
@@ -260,7 +254,7 @@
]
}
],
"timestamp": "2024-10-28T20:07:49Z",
"timestamp": "2025-03-04T19:25:00Z",
"applyable": true,
"complete": true,
"errored": false
+7 -10
View File
@@ -1,6 +1,6 @@
{
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -10,7 +10,7 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
@@ -26,19 +26,16 @@
}
],
"env": null,
"id": "04d66dc4-e25a-4f65-af6f-a9af6b907430",
"id": "8632f695-0881-4df5-999c-ff105e2a62a4",
"init_script": "",
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"token": "10fbd765-b0cc-4d6f-b5de-e5a036b2cb4b",
"startup_script_behavior": "non-blocking",
"token": "18782be6-3080-42a8-bec4-b2e0cb4caf93",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -69,7 +66,7 @@
"outputs": {
"script": ""
},
"random": "7917595776755902204"
"random": "735568859568633344"
},
"sensitive_values": {
"inputs": {},
@@ -84,7 +81,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "2669991968036854745",
"id": "280446487996139212",
"triggers": null
},
"sensitive_values": {},
@@ -2,7 +2,7 @@ terraform {
required_providers {
coder = {
source = "coder/coder"
version = "0.22.0"
version = ">=2.0.0"
}
}
}
@@ -1,6 +1,6 @@
{
"format_version": "1.2",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"planned_values": {
"root_module": {
"resources": [
@@ -10,23 +10,20 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
"connection_timeout": 120,
"dir": null,
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -80,16 +77,13 @@
"connection_timeout": 120,
"dir": null,
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"after_unknown": {
@@ -155,7 +149,7 @@
"coder": {
"name": "coder",
"full_name": "registry.terraform.io/coder/coder",
"version_constraint": "0.22.0"
"version_constraint": ">= 2.0.0"
},
"null": {
"name": "null",
@@ -178,7 +172,7 @@
"constant_value": "linux"
}
},
"schema_version": 0
"schema_version": 1
},
{
"address": "null_resource.a",
@@ -205,7 +199,7 @@
]
}
},
"timestamp": "2024-10-28T20:07:50Z",
"timestamp": "2025-03-04T19:25:02Z",
"applyable": true,
"complete": true,
"errored": false
@@ -1,6 +1,6 @@
{
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -10,7 +10,7 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
@@ -26,19 +26,16 @@
}
],
"env": null,
"id": "bcf4bae1-0870-48e9-8bb4-af2f652c4d54",
"id": "c58b518a-428d-44b2-bfd7-1ac17188c528",
"init_script": "",
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"token": "afe98f25-25a2-4892-b921-be04bcd71efc",
"startup_script_behavior": "non-blocking",
"token": "085fd3ad-9462-4f9c-8f0f-05941d6cbc90",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -57,7 +54,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "6598177855275264799",
"id": "2593580341963886034",
"triggers": null
},
"sensitive_values": {},
@@ -74,7 +71,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "4663187895457986148",
"id": "8775084967398626100",
"triggers": null
},
"sensitive_values": {},
@@ -2,7 +2,7 @@ terraform {
required_providers {
coder = {
source = "coder/coder"
version = "0.22.0"
version = ">=2.0.0"
}
}
}
@@ -1,6 +1,6 @@
{
"format_version": "1.2",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"planned_values": {
"root_module": {
"resources": [
@@ -10,23 +10,20 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
"connection_timeout": 120,
"dir": null,
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -80,16 +77,13 @@
"connection_timeout": 120,
"dir": null,
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"after_unknown": {
@@ -155,7 +149,7 @@
"coder": {
"name": "coder",
"full_name": "registry.terraform.io/coder/coder",
"version_constraint": "0.22.0"
"version_constraint": ">= 2.0.0"
},
"null": {
"name": "null",
@@ -178,7 +172,7 @@
"constant_value": "linux"
}
},
"schema_version": 0
"schema_version": 1
},
{
"address": "null_resource.first",
@@ -205,7 +199,7 @@
]
}
},
"timestamp": "2024-10-28T20:07:52Z",
"timestamp": "2025-03-04T19:25:04Z",
"applyable": true,
"complete": true,
"errored": false
@@ -1,6 +1,6 @@
{
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -10,7 +10,7 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
@@ -26,19 +26,16 @@
}
],
"env": null,
"id": "d047c7b6-b69e-4029-ab82-67468a0364f7",
"id": "bdf2fe69-0a3d-4aac-80c9-0896b0362d81",
"init_script": "",
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"token": "ceff37e3-52b9-4c80-af1b-1f9f99184590",
"startup_script_behavior": "non-blocking",
"token": "1428ac88-6dd9-4520-9c5c-0946fec8466b",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -57,7 +54,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "3120105803817695206",
"id": "3464200430566318947",
"triggers": null
},
"sensitive_values": {},
@@ -73,7 +70,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "2942451035046396496",
"id": "4854441548409483963",
"triggers": null
},
"sensitive_values": {},
@@ -2,7 +2,7 @@ terraform {
required_providers {
coder = {
source = "coder/coder"
version = "0.22.0"
version = ">=2.0.0"
}
}
}
@@ -1,6 +1,6 @@
{
"format_version": "1.2",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"planned_values": {
"root_module": {
"resources": [
@@ -10,7 +10,7 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
@@ -26,16 +26,13 @@
}
],
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -88,16 +85,13 @@
}
],
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"after_unknown": {
@@ -146,7 +140,7 @@
"coder": {
"name": "coder",
"full_name": "registry.terraform.io/coder/coder",
"version_constraint": "0.22.0"
"version_constraint": ">= 2.0.0"
},
"null": {
"name": "null",
@@ -188,7 +182,7 @@
"constant_value": "linux"
}
},
"schema_version": 0
"schema_version": 1
},
{
"address": "null_resource.dev",
@@ -204,7 +198,7 @@
]
}
},
"timestamp": "2024-10-28T20:07:55Z",
"timestamp": "2025-03-04T19:25:06Z",
"applyable": true,
"complete": true,
"errored": false
@@ -1,6 +1,6 @@
{
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -10,7 +10,7 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
@@ -26,19 +26,16 @@
}
],
"env": null,
"id": "6ba13739-4a9c-456f-90cf-feba8f194853",
"id": "a443e5e2-d59e-456d-95ff-b15685a37ebd",
"init_script": "",
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"token": "6e348a4c-ef00-40ab-9732-817fb828045c",
"startup_script_behavior": "non-blocking",
"token": "5407b786-d16b-4e64-abfa-75bc641fa6c3",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -57,7 +54,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "3123606937441446452",
"id": "1848001870879012103",
"triggers": null
},
"sensitive_values": {},
@@ -2,7 +2,7 @@ terraform {
required_providers {
coder = {
source = "coder/coder"
version = "0.22.0"
version = ">=2.0.0"
}
}
}
+7 -13
View File
@@ -1,6 +1,6 @@
{
"format_version": "1.2",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"planned_values": {
"root_module": {
"resources": [
@@ -10,7 +10,7 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
@@ -26,16 +26,13 @@
}
],
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -88,16 +85,13 @@
}
],
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"after_unknown": {
@@ -146,7 +140,7 @@
"coder": {
"name": "coder",
"full_name": "registry.terraform.io/coder/coder",
"version_constraint": "0.22.0"
"version_constraint": ">= 2.0.0"
},
"null": {
"name": "null",
@@ -188,7 +182,7 @@
"constant_value": "linux"
}
},
"schema_version": 0
"schema_version": 1
},
{
"address": "null_resource.dev",
@@ -204,7 +198,7 @@
]
}
},
"timestamp": "2024-10-28T20:07:54Z",
"timestamp": "2025-03-04T19:25:08Z",
"applyable": true,
"complete": true,
"errored": false
+6 -9
View File
@@ -1,6 +1,6 @@
{
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -10,7 +10,7 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
@@ -26,19 +26,16 @@
}
],
"env": null,
"id": "b7e8dd7a-34aa-41e2-977e-e38577ab2476",
"id": "7399a566-8666-4a7b-a916-5043ea8b5a39",
"init_script": "",
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"token": "c6aeeb35-2766-4524-9818-687f7687831d",
"startup_script_behavior": "non-blocking",
"token": "47650163-1c1b-431b-81d6-42991663f53b",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -57,7 +54,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "2407243137316459395",
"id": "1536600762010500828",
"triggers": null
},
"sensitive_values": {},
@@ -2,7 +2,7 @@ terraform {
required_providers {
coder = {
source = "coder/coder"
version = "0.22.0"
version = ">=2.0.0"
}
}
}
@@ -1,6 +1,6 @@
{
"format_version": "1.2",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"planned_values": {
"root_module": {
"resources": [
@@ -10,23 +10,20 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
"connection_timeout": 120,
"dir": null,
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -68,16 +65,13 @@
"connection_timeout": 120,
"dir": null,
"env": null,
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"startup_script_behavior": "non-blocking",
"troubleshooting_url": null
},
"after_unknown": {
@@ -119,7 +113,7 @@
],
"prior_state": {
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -160,7 +154,7 @@
"coder": {
"name": "coder",
"full_name": "registry.terraform.io/coder/coder",
"version_constraint": "0.22.0"
"version_constraint": ">= 2.0.0"
},
"null": {
"name": "null",
@@ -183,7 +177,7 @@
"constant_value": "linux"
}
},
"schema_version": 0
"schema_version": 1
},
{
"address": "null_resource.dev",
@@ -228,7 +222,7 @@
]
}
},
"timestamp": "2024-10-28T20:07:57Z",
"timestamp": "2025-03-04T19:25:10Z",
"applyable": true,
"complete": true,
"errored": false
@@ -1,6 +1,6 @@
{
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -38,7 +38,7 @@
"type": "coder_agent",
"name": "main",
"provider_name": "registry.terraform.io/coder/coder",
"schema_version": 0,
"schema_version": 1,
"values": {
"arch": "amd64",
"auth": "token",
@@ -54,19 +54,16 @@
}
],
"env": null,
"id": "ec5d36c9-8690-4246-8ab3-2d85a3eacee6",
"id": "3af7f86b-3674-4e9d-b92d-d2e8890e5c38",
"init_script": "",
"login_before_ready": true,
"metadata": [],
"motd_file": null,
"order": null,
"os": "linux",
"shutdown_script": null,
"shutdown_script_timeout": 300,
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"token": "78c55fa2-8e3c-4564-950d-e022c76cf05a",
"startup_script_behavior": "non-blocking",
"token": "cf973ff9-17e3-4e08-abc5-7a37d3f74d0f",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -85,7 +82,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "455343782636271645",
"id": "7283290279914631370",
"triggers": null
},
"sensitive_values": {},
@@ -1,6 +1,6 @@
{
"format_version": "1.2",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"planned_values": {
"root_module": {
"resources": [
@@ -119,7 +119,7 @@
],
"prior_state": {
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -223,7 +223,7 @@
]
}
},
"timestamp": "2024-10-28T20:07:58Z",
"timestamp": "2025-03-04T19:25:12Z",
"applyable": true,
"complete": true,
"errored": false
@@ -1,6 +1,6 @@
{
"format_version": "1.0",
"terraform_version": "1.9.8",
"terraform_version": "1.10.5",
"values": {
"root_module": {
"resources": [
@@ -52,7 +52,7 @@
}
],
"env": null,
"id": "ffa1f524-0350-4891-868d-93cad369318a",
"id": "1023a3a5-f8c1-45f6-a0cc-bf3a1e1f3c63",
"init_script": "",
"login_before_ready": true,
"metadata": [],
@@ -64,7 +64,7 @@
"startup_script": null,
"startup_script_behavior": null,
"startup_script_timeout": 300,
"token": "8ba649af-b498-4f20-8055-b6a0b995837e",
"token": "a59f2270-ac62-47bc-9c68-7bbfe2981c2e",
"troubleshooting_url": null
},
"sensitive_values": {
@@ -83,7 +83,7 @@
"provider_name": "registry.terraform.io/hashicorp/null",
"schema_version": 0,
"values": {
"id": "7420557451345159984",
"id": "3284204005710492153",
"triggers": null
},
"sensitive_values": {},

Some files were not shown because too many files have changed in this diff Show More