Compare commits
7 Commits
v2.28.0
...
release/2.17
| Author | SHA1 | Date | |
|---|---|---|---|
| 2ce3dae5b4 | |||
| d1a02c05e4 | |||
| 7011e4bfcf | |||
| dbfadf2b73 | |||
| 0598aecf90 | |||
| 5a6d23a4a3 | |||
| 9a444b3af2 |
+7
-21
@@ -61,7 +61,6 @@ import (
|
||||
"github.com/coder/serpent"
|
||||
"github.com/coder/wgtunnel/tunnelsdk"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/cryptokeys"
|
||||
"github.com/coder/coder/v2/coderd/entitlements"
|
||||
"github.com/coder/coder/v2/coderd/notifications/reports"
|
||||
"github.com/coder/coder/v2/coderd/runtimeconfig"
|
||||
@@ -212,10 +211,16 @@ func enablePrometheus(
|
||||
options.PrometheusRegistry.MustRegister(collectors.NewGoCollector())
|
||||
options.PrometheusRegistry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
|
||||
|
||||
closeUsersFunc, err := prometheusmetrics.ActiveUsers(ctx, options.PrometheusRegistry, options.Database, 0)
|
||||
closeActiveUsersFunc, err := prometheusmetrics.ActiveUsers(ctx, options.Logger.Named("active_user_metrics"), options.PrometheusRegistry, options.Database, 0)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("register active users prometheus metric: %w", err)
|
||||
}
|
||||
afterCtx(ctx, closeActiveUsersFunc)
|
||||
|
||||
closeUsersFunc, err := prometheusmetrics.Users(ctx, options.Logger.Named("user_metrics"), quartz.NewReal(), options.PrometheusRegistry, options.Database, 0)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("register users prometheus metric: %w", err)
|
||||
}
|
||||
afterCtx(ctx, closeUsersFunc)
|
||||
|
||||
closeWorkspacesFunc, err := prometheusmetrics.Workspaces(ctx, options.Logger.Named("workspaces_metrics"), options.PrometheusRegistry, options.Database, 0)
|
||||
@@ -748,25 +753,6 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
return xerrors.Errorf("set deployment id: %w", err)
|
||||
}
|
||||
|
||||
fetcher := &cryptokeys.DBFetcher{
|
||||
DB: options.Database,
|
||||
}
|
||||
|
||||
resumeKeycache, err := cryptokeys.NewSigningCache(ctx,
|
||||
logger,
|
||||
fetcher,
|
||||
codersdk.CryptoKeyFeatureTailnetResume,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Critical(ctx, "failed to properly instantiate tailnet resume signing cache", slog.Error(err))
|
||||
}
|
||||
|
||||
options.CoordinatorResumeTokenProvider = tailnet.NewResumeTokenKeyProvider(
|
||||
resumeKeycache,
|
||||
quartz.NewReal(),
|
||||
tailnet.DefaultResumeTokenExpiry,
|
||||
)
|
||||
|
||||
options.RuntimeConfig = runtimeconfig.NewManager()
|
||||
|
||||
// This should be output before the logs start streaming.
|
||||
|
||||
@@ -197,6 +197,7 @@ func (r *RootCmd) newCreateAdminUserCommand() *serpent.Command {
|
||||
UpdatedAt: dbtime.Now(),
|
||||
RBACRoles: []string{rbac.RoleOwner().String()},
|
||||
LoginType: database.LoginTypePassword,
|
||||
Status: "",
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("insert user: %w", err)
|
||||
|
||||
+69
-3
@@ -106,6 +106,58 @@ Use a YAML configuration file when your server launch become unwieldy.
|
||||
|
||||
Write out the current server config as YAML to stdout.
|
||||
|
||||
EMAIL OPTIONS:
|
||||
Configure how emails are sent.
|
||||
|
||||
--email-force-tls bool, $CODER_EMAIL_FORCE_TLS (default: false)
|
||||
Force a TLS connection to the configured SMTP smarthost.
|
||||
|
||||
--email-from string, $CODER_EMAIL_FROM
|
||||
The sender's address to use.
|
||||
|
||||
--email-hello string, $CODER_EMAIL_HELLO (default: localhost)
|
||||
The hostname identifying the SMTP server.
|
||||
|
||||
--email-smarthost host:port, $CODER_EMAIL_SMARTHOST (default: localhost:587)
|
||||
The intermediary SMTP host through which emails are sent.
|
||||
|
||||
EMAIL / EMAIL AUTHENTICATION OPTIONS:
|
||||
Configure SMTP authentication options.
|
||||
|
||||
--email-auth-identity string, $CODER_EMAIL_AUTH_IDENTITY
|
||||
Identity to use with PLAIN authentication.
|
||||
|
||||
--email-auth-password string, $CODER_EMAIL_AUTH_PASSWORD
|
||||
Password to use with PLAIN/LOGIN authentication.
|
||||
|
||||
--email-auth-password-file string, $CODER_EMAIL_AUTH_PASSWORD_FILE
|
||||
File from which to load password for use with PLAIN/LOGIN
|
||||
authentication.
|
||||
|
||||
--email-auth-username string, $CODER_EMAIL_AUTH_USERNAME
|
||||
Username to use with PLAIN/LOGIN authentication.
|
||||
|
||||
EMAIL / EMAIL TLS OPTIONS:
|
||||
Configure TLS for your SMTP server target.
|
||||
|
||||
--email-tls-ca-cert-file string, $CODER_EMAIL_TLS_CACERTFILE
|
||||
CA certificate file to use.
|
||||
|
||||
--email-tls-cert-file string, $CODER_EMAIL_TLS_CERTFILE
|
||||
Certificate file to use.
|
||||
|
||||
--email-tls-cert-key-file string, $CODER_EMAIL_TLS_CERTKEYFILE
|
||||
Certificate key file to use.
|
||||
|
||||
--email-tls-server-name string, $CODER_EMAIL_TLS_SERVERNAME
|
||||
Server name to verify against the target certificate.
|
||||
|
||||
--email-tls-skip-verify bool, $CODER_EMAIL_TLS_SKIPVERIFY
|
||||
Skip verification of the target server's certificate (insecure).
|
||||
|
||||
--email-tls-starttls bool, $CODER_EMAIL_TLS_STARTTLS
|
||||
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
|
||||
|
||||
INTROSPECTION / HEALTH CHECK OPTIONS:
|
||||
--health-check-refresh duration, $CODER_HEALTH_CHECK_REFRESH (default: 10m0s)
|
||||
Refresh interval for healthchecks.
|
||||
@@ -349,54 +401,68 @@ Configure how notifications are processed and delivered.
|
||||
NOTIFICATIONS / EMAIL OPTIONS:
|
||||
Configure how email notifications are sent.
|
||||
|
||||
--notifications-email-force-tls bool, $CODER_NOTIFICATIONS_EMAIL_FORCE_TLS (default: false)
|
||||
--notifications-email-force-tls bool, $CODER_NOTIFICATIONS_EMAIL_FORCE_TLS
|
||||
Force a TLS connection to the configured SMTP smarthost.
|
||||
DEPRECATED: Use --email-force-tls instead.
|
||||
|
||||
--notifications-email-from string, $CODER_NOTIFICATIONS_EMAIL_FROM
|
||||
The sender's address to use.
|
||||
DEPRECATED: Use --email-from instead.
|
||||
|
||||
--notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO (default: localhost)
|
||||
--notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO
|
||||
The hostname identifying the SMTP server.
|
||||
DEPRECATED: Use --email-hello instead.
|
||||
|
||||
--notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST (default: localhost:587)
|
||||
--notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST
|
||||
The intermediary SMTP host through which emails are sent.
|
||||
DEPRECATED: Use --email-smarthost instead.
|
||||
|
||||
NOTIFICATIONS / EMAIL / EMAIL AUTHENTICATION OPTIONS:
|
||||
Configure SMTP authentication options.
|
||||
|
||||
--notifications-email-auth-identity string, $CODER_NOTIFICATIONS_EMAIL_AUTH_IDENTITY
|
||||
Identity to use with PLAIN authentication.
|
||||
DEPRECATED: Use --email-auth-identity instead.
|
||||
|
||||
--notifications-email-auth-password string, $CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD
|
||||
Password to use with PLAIN/LOGIN authentication.
|
||||
DEPRECATED: Use --email-auth-password instead.
|
||||
|
||||
--notifications-email-auth-password-file string, $CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD_FILE
|
||||
File from which to load password for use with PLAIN/LOGIN
|
||||
authentication.
|
||||
DEPRECATED: Use --email-auth-password-file instead.
|
||||
|
||||
--notifications-email-auth-username string, $CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME
|
||||
Username to use with PLAIN/LOGIN authentication.
|
||||
DEPRECATED: Use --email-auth-username instead.
|
||||
|
||||
NOTIFICATIONS / EMAIL / EMAIL TLS OPTIONS:
|
||||
Configure TLS for your SMTP server target.
|
||||
|
||||
--notifications-email-tls-ca-cert-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CACERTFILE
|
||||
CA certificate file to use.
|
||||
DEPRECATED: Use --email-tls-ca-cert-file instead.
|
||||
|
||||
--notifications-email-tls-cert-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CERTFILE
|
||||
Certificate file to use.
|
||||
DEPRECATED: Use --email-tls-cert-file instead.
|
||||
|
||||
--notifications-email-tls-cert-key-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CERTKEYFILE
|
||||
Certificate key file to use.
|
||||
DEPRECATED: Use --email-tls-cert-key-file instead.
|
||||
|
||||
--notifications-email-tls-server-name string, $CODER_NOTIFICATIONS_EMAIL_TLS_SERVERNAME
|
||||
Server name to verify against the target certificate.
|
||||
DEPRECATED: Use --email-tls-server-name instead.
|
||||
|
||||
--notifications-email-tls-skip-verify bool, $CODER_NOTIFICATIONS_EMAIL_TLS_SKIPVERIFY
|
||||
Skip verification of the target server's certificate (insecure).
|
||||
DEPRECATED: Use --email-tls-skip-verify instead.
|
||||
|
||||
--notifications-email-tls-starttls bool, $CODER_NOTIFICATIONS_EMAIL_TLS_STARTTLS
|
||||
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
|
||||
DEPRECATED: Use --email-tls-starttls instead.
|
||||
|
||||
NOTIFICATIONS / WEBHOOK OPTIONS:
|
||||
--notifications-webhook-endpoint url, $CODER_NOTIFICATIONS_WEBHOOK_ENDPOINT
|
||||
|
||||
+48
-3
@@ -518,6 +518,51 @@ userQuietHoursSchedule:
|
||||
# compatibility reasons, this will be removed in a future release.
|
||||
# (default: false, type: bool)
|
||||
allowWorkspaceRenames: false
|
||||
# Configure how emails are sent.
|
||||
email:
|
||||
# The sender's address to use.
|
||||
# (default: <unset>, type: string)
|
||||
from: ""
|
||||
# The intermediary SMTP host through which emails are sent.
|
||||
# (default: localhost:587, type: host:port)
|
||||
smarthost: localhost:587
|
||||
# The hostname identifying the SMTP server.
|
||||
# (default: localhost, type: string)
|
||||
hello: localhost
|
||||
# Force a TLS connection to the configured SMTP smarthost.
|
||||
# (default: false, type: bool)
|
||||
forceTLS: false
|
||||
# Configure SMTP authentication options.
|
||||
emailAuth:
|
||||
# Identity to use with PLAIN authentication.
|
||||
# (default: <unset>, type: string)
|
||||
identity: ""
|
||||
# Username to use with PLAIN/LOGIN authentication.
|
||||
# (default: <unset>, type: string)
|
||||
username: ""
|
||||
# File from which to load password for use with PLAIN/LOGIN authentication.
|
||||
# (default: <unset>, type: string)
|
||||
passwordFile: ""
|
||||
# Configure TLS for your SMTP server target.
|
||||
emailTLS:
|
||||
# Enable STARTTLS to upgrade insecure SMTP connections using TLS.
|
||||
# (default: <unset>, type: bool)
|
||||
startTLS: false
|
||||
# Server name to verify against the target certificate.
|
||||
# (default: <unset>, type: string)
|
||||
serverName: ""
|
||||
# Skip verification of the target server's certificate (insecure).
|
||||
# (default: <unset>, type: bool)
|
||||
insecureSkipVerify: false
|
||||
# CA certificate file to use.
|
||||
# (default: <unset>, type: string)
|
||||
caCertFile: ""
|
||||
# Certificate file to use.
|
||||
# (default: <unset>, type: string)
|
||||
certFile: ""
|
||||
# Certificate key file to use.
|
||||
# (default: <unset>, type: string)
|
||||
certKeyFile: ""
|
||||
# Configure how notifications are processed and delivered.
|
||||
notifications:
|
||||
# Which delivery method to use (available options: 'smtp', 'webhook').
|
||||
@@ -532,13 +577,13 @@ notifications:
|
||||
# (default: <unset>, type: string)
|
||||
from: ""
|
||||
# The intermediary SMTP host through which emails are sent.
|
||||
# (default: localhost:587, type: host:port)
|
||||
# (default: <unset>, type: host:port)
|
||||
smarthost: localhost:587
|
||||
# The hostname identifying the SMTP server.
|
||||
# (default: localhost, type: string)
|
||||
# (default: <unset>, type: string)
|
||||
hello: localhost
|
||||
# Force a TLS connection to the configured SMTP smarthost.
|
||||
# (default: false, type: bool)
|
||||
# (default: <unset>, type: bool)
|
||||
forceTLS: false
|
||||
# Configure SMTP authentication options.
|
||||
emailAuth:
|
||||
|
||||
Generated
+8
@@ -9896,6 +9896,14 @@ const docTemplate = `{
|
||||
"password": {
|
||||
"type": "string"
|
||||
},
|
||||
"user_status": {
|
||||
"description": "UserStatus defaults to UserStatusDormant.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.UserStatus"
|
||||
}
|
||||
]
|
||||
},
|
||||
"username": {
|
||||
"type": "string"
|
||||
}
|
||||
|
||||
Generated
+8
@@ -8809,6 +8809,14 @@
|
||||
"password": {
|
||||
"type": "string"
|
||||
},
|
||||
"user_status": {
|
||||
"description": "UserStatus defaults to UserStatusDormant.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.UserStatus"
|
||||
}
|
||||
]
|
||||
},
|
||||
"username": {
|
||||
"type": "string"
|
||||
}
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"cdr.dev/slog"
|
||||
)
|
||||
|
||||
type BackgroundSubsystem string
|
||||
|
||||
const (
|
||||
BackgroundSubsystemDormancy BackgroundSubsystem = "dormancy"
|
||||
)
|
||||
|
||||
func BackgroundTaskFields(subsystem BackgroundSubsystem) map[string]string {
|
||||
return map[string]string{
|
||||
"automatic_actor": "coder",
|
||||
"automatic_subsystem": string(subsystem),
|
||||
}
|
||||
}
|
||||
|
||||
func BackgroundTaskFieldsBytes(ctx context.Context, logger slog.Logger, subsystem BackgroundSubsystem) []byte {
|
||||
af := BackgroundTaskFields(subsystem)
|
||||
|
||||
wriBytes, err := json.Marshal(af)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "marshal additional fields for dormancy audit", slog.Error(err))
|
||||
return []byte("{}")
|
||||
}
|
||||
|
||||
return wriBytes
|
||||
}
|
||||
@@ -62,12 +62,13 @@ type BackgroundAuditParams[T Auditable] struct {
|
||||
Audit Auditor
|
||||
Log slog.Logger
|
||||
|
||||
UserID uuid.UUID
|
||||
RequestID uuid.UUID
|
||||
Status int
|
||||
Action database.AuditAction
|
||||
OrganizationID uuid.UUID
|
||||
IP string
|
||||
UserID uuid.UUID
|
||||
RequestID uuid.UUID
|
||||
Status int
|
||||
Action database.AuditAction
|
||||
OrganizationID uuid.UUID
|
||||
IP string
|
||||
// todo: this should automatically marshal an interface{} instead of accepting a raw message.
|
||||
AdditionalFields json.RawMessage
|
||||
|
||||
New T
|
||||
|
||||
+24
-3
@@ -467,7 +467,7 @@ func New(options *Options) *API {
|
||||
codersdk.CryptoKeyFeatureOIDCConvert,
|
||||
)
|
||||
if err != nil {
|
||||
options.Logger.Critical(ctx, "failed to properly instantiate oidc convert signing cache", slog.Error(err))
|
||||
options.Logger.Fatal(ctx, "failed to properly instantiate oidc convert signing cache", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -478,7 +478,7 @@ func New(options *Options) *API {
|
||||
codersdk.CryptoKeyFeatureWorkspaceAppsToken,
|
||||
)
|
||||
if err != nil {
|
||||
options.Logger.Critical(ctx, "failed to properly instantiate app signing key cache", slog.Error(err))
|
||||
options.Logger.Fatal(ctx, "failed to properly instantiate app signing key cache", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -489,10 +489,30 @@ func New(options *Options) *API {
|
||||
codersdk.CryptoKeyFeatureWorkspaceAppsAPIKey,
|
||||
)
|
||||
if err != nil {
|
||||
options.Logger.Critical(ctx, "failed to properly instantiate app encryption key cache", slog.Error(err))
|
||||
options.Logger.Fatal(ctx, "failed to properly instantiate app encryption key cache", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
if options.CoordinatorResumeTokenProvider == nil {
|
||||
fetcher := &cryptokeys.DBFetcher{
|
||||
DB: options.Database,
|
||||
}
|
||||
|
||||
resumeKeycache, err := cryptokeys.NewSigningCache(ctx,
|
||||
options.Logger,
|
||||
fetcher,
|
||||
codersdk.CryptoKeyFeatureTailnetResume,
|
||||
)
|
||||
if err != nil {
|
||||
options.Logger.Fatal(ctx, "failed to properly instantiate tailnet resume signing cache", slog.Error(err))
|
||||
}
|
||||
options.CoordinatorResumeTokenProvider = tailnet.NewResumeTokenKeyProvider(
|
||||
resumeKeycache,
|
||||
options.Clock,
|
||||
tailnet.DefaultResumeTokenExpiry,
|
||||
)
|
||||
}
|
||||
|
||||
// Start a background process that rotates keys. We intentionally start this after the caches
|
||||
// are created to force initial requests for a key to populate the caches. This helps catch
|
||||
// bugs that may only occur when a key isn't precached in tests and the latency cost is minimal.
|
||||
@@ -702,6 +722,7 @@ func New(options *Options) *API {
|
||||
|
||||
apiKeyMiddleware := httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: options.Database,
|
||||
ActivateDormantUser: ActivateDormantUser(options.Logger, &api.Auditor, options.Database),
|
||||
OAuth2Configs: oauthConfigs,
|
||||
RedirectToLogin: false,
|
||||
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
|
||||
|
||||
@@ -718,6 +718,9 @@ func createAnotherUserRetry(t testing.TB, client *codersdk.Client, organizationI
|
||||
Name: RandomName(t),
|
||||
Password: "SomeSecurePassword!",
|
||||
OrganizationIDs: organizationIDs,
|
||||
// Always create users as active in tests to ignore an extra audit log
|
||||
// when logging in.
|
||||
UserStatus: ptr.Ref(codersdk.UserStatusActive),
|
||||
}
|
||||
for _, m := range mutators {
|
||||
m(&req)
|
||||
|
||||
+28
-5
@@ -28,6 +28,7 @@ type Store interface {
|
||||
wrapper
|
||||
|
||||
Ping(ctx context.Context) (time.Duration, error)
|
||||
PGLocks(ctx context.Context) (PGLocks, error)
|
||||
InTx(func(Store) error, *TxOptions) error
|
||||
}
|
||||
|
||||
@@ -48,13 +49,26 @@ type DBTX interface {
|
||||
GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error
|
||||
}
|
||||
|
||||
func WithSerialRetryCount(count int) func(*sqlQuerier) {
|
||||
return func(q *sqlQuerier) {
|
||||
q.serialRetryCount = count
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new database store using a SQL database connection.
|
||||
func New(sdb *sql.DB) Store {
|
||||
func New(sdb *sql.DB, opts ...func(*sqlQuerier)) Store {
|
||||
dbx := sqlx.NewDb(sdb, "postgres")
|
||||
return &sqlQuerier{
|
||||
q := &sqlQuerier{
|
||||
db: dbx,
|
||||
sdb: dbx,
|
||||
// This is an arbitrary number.
|
||||
serialRetryCount: 3,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(q)
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
// TxOptions is used to pass some execution metadata to the callers.
|
||||
@@ -104,6 +118,10 @@ type querier interface {
|
||||
type sqlQuerier struct {
|
||||
sdb *sqlx.DB
|
||||
db DBTX
|
||||
|
||||
// serialRetryCount is the number of times to retry a transaction
|
||||
// if it fails with a serialization error.
|
||||
serialRetryCount int
|
||||
}
|
||||
|
||||
func (*sqlQuerier) Wrappers() []string {
|
||||
@@ -143,11 +161,9 @@ func (q *sqlQuerier) InTx(function func(Store) error, txOpts *TxOptions) error {
|
||||
// If we are in a transaction already, the parent InTx call will handle the retry.
|
||||
// We do not want to duplicate those retries.
|
||||
if !inTx && sqlOpts.Isolation == sql.LevelSerializable {
|
||||
// This is an arbitrarily chosen number.
|
||||
const retryAmount = 3
|
||||
var err error
|
||||
attempts := 0
|
||||
for attempts = 0; attempts < retryAmount; attempts++ {
|
||||
for attempts = 0; attempts < q.serialRetryCount; attempts++ {
|
||||
txOpts.executionCount++
|
||||
err = q.runTx(function, sqlOpts)
|
||||
if err == nil {
|
||||
@@ -203,3 +219,10 @@ func (q *sqlQuerier) runTx(function func(Store) error, txOpts *sql.TxOptions) er
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func safeString(s *string) string {
|
||||
if s == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return *s
|
||||
}
|
||||
|
||||
@@ -603,6 +603,10 @@ func (q *querier) Ping(ctx context.Context) (time.Duration, error) {
|
||||
return q.db.Ping(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) PGLocks(ctx context.Context) (database.PGLocks, error) {
|
||||
return q.db.PGLocks(ctx)
|
||||
}
|
||||
|
||||
// InTx runs the given function in a transaction.
|
||||
func (q *querier) InTx(function func(querier database.Store) error, txOpts *database.TxOptions) error {
|
||||
return q.db.InTx(func(tx database.Store) error {
|
||||
|
||||
@@ -152,7 +152,10 @@ func TestDBAuthzRecursive(t *testing.T) {
|
||||
for i := 2; i < method.Type.NumIn(); i++ {
|
||||
ins = append(ins, reflect.New(method.Type.In(i)).Elem())
|
||||
}
|
||||
if method.Name == "InTx" || method.Name == "Ping" || method.Name == "Wrappers" {
|
||||
if method.Name == "InTx" ||
|
||||
method.Name == "Ping" ||
|
||||
method.Name == "Wrappers" ||
|
||||
method.Name == "PGLocks" {
|
||||
continue
|
||||
}
|
||||
// Log the name of the last method, so if there is a panic, it is
|
||||
|
||||
@@ -34,6 +34,7 @@ var errMatchAny = xerrors.New("match any error")
|
||||
var skipMethods = map[string]string{
|
||||
"InTx": "Not relevant",
|
||||
"Ping": "Not relevant",
|
||||
"PGLocks": "Not relevant",
|
||||
"Wrappers": "Not relevant",
|
||||
"AcquireLock": "Not relevant",
|
||||
"TryAcquireLock": "Not relevant",
|
||||
|
||||
@@ -0,0 +1,127 @@
|
||||
package dbfake
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
type OrganizationBuilder struct {
|
||||
t *testing.T
|
||||
db database.Store
|
||||
seed database.Organization
|
||||
allUsersAllowance int32
|
||||
members []uuid.UUID
|
||||
groups map[database.Group][]uuid.UUID
|
||||
}
|
||||
|
||||
func Organization(t *testing.T, db database.Store) OrganizationBuilder {
|
||||
return OrganizationBuilder{
|
||||
t: t,
|
||||
db: db,
|
||||
members: []uuid.UUID{},
|
||||
groups: make(map[database.Group][]uuid.UUID),
|
||||
}
|
||||
}
|
||||
|
||||
type OrganizationResponse struct {
|
||||
Org database.Organization
|
||||
AllUsersGroup database.Group
|
||||
Members []database.OrganizationMember
|
||||
Groups []database.Group
|
||||
}
|
||||
|
||||
func (b OrganizationBuilder) EveryoneAllowance(allowance int) OrganizationBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.allUsersAllowance = int32(allowance)
|
||||
return b
|
||||
}
|
||||
|
||||
func (b OrganizationBuilder) Seed(seed database.Organization) OrganizationBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.seed = seed
|
||||
return b
|
||||
}
|
||||
|
||||
func (b OrganizationBuilder) Members(users ...database.User) OrganizationBuilder {
|
||||
for _, u := range users {
|
||||
//nolint: revive // returns modified struct
|
||||
b.members = append(b.members, u.ID)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b OrganizationBuilder) Group(seed database.Group, members ...database.User) OrganizationBuilder {
|
||||
//nolint: revive // returns modified struct
|
||||
b.groups[seed] = []uuid.UUID{}
|
||||
for _, u := range members {
|
||||
//nolint: revive // returns modified struct
|
||||
b.groups[seed] = append(b.groups[seed], u.ID)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b OrganizationBuilder) Do() OrganizationResponse {
|
||||
org := dbgen.Organization(b.t, b.db, b.seed)
|
||||
|
||||
ctx := testutil.Context(b.t, testutil.WaitShort)
|
||||
//nolint:gocritic // builder code needs perms
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
everyone, err := b.db.InsertAllUsersGroup(ctx, org.ID)
|
||||
require.NoError(b.t, err)
|
||||
|
||||
if b.allUsersAllowance > 0 {
|
||||
everyone, err = b.db.UpdateGroupByID(ctx, database.UpdateGroupByIDParams{
|
||||
Name: everyone.Name,
|
||||
DisplayName: everyone.DisplayName,
|
||||
AvatarURL: everyone.AvatarURL,
|
||||
QuotaAllowance: b.allUsersAllowance,
|
||||
ID: everyone.ID,
|
||||
})
|
||||
require.NoError(b.t, err)
|
||||
}
|
||||
|
||||
members := make([]database.OrganizationMember, 0)
|
||||
if len(b.members) > 0 {
|
||||
for _, u := range b.members {
|
||||
newMem := dbgen.OrganizationMember(b.t, b.db, database.OrganizationMember{
|
||||
UserID: u,
|
||||
OrganizationID: org.ID,
|
||||
CreatedAt: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
Roles: nil,
|
||||
})
|
||||
members = append(members, newMem)
|
||||
}
|
||||
}
|
||||
|
||||
groups := make([]database.Group, 0)
|
||||
if len(b.groups) > 0 {
|
||||
for g, users := range b.groups {
|
||||
g.OrganizationID = org.ID
|
||||
group := dbgen.Group(b.t, b.db, g)
|
||||
groups = append(groups, group)
|
||||
|
||||
for _, u := range users {
|
||||
dbgen.GroupMember(b.t, b.db, database.GroupMemberTable{
|
||||
UserID: u,
|
||||
GroupID: group.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return OrganizationResponse{
|
||||
Org: org,
|
||||
AllUsersGroup: everyone,
|
||||
Members: members,
|
||||
Groups: groups,
|
||||
}
|
||||
}
|
||||
@@ -342,6 +342,7 @@ func User(t testing.TB, db database.Store, orig database.User) database.User {
|
||||
UpdatedAt: takeFirst(orig.UpdatedAt, dbtime.Now()),
|
||||
RBACRoles: takeFirstSlice(orig.RBACRoles, []string{}),
|
||||
LoginType: takeFirst(orig.LoginType, database.LoginTypePassword),
|
||||
Status: string(takeFirst(orig.Status, database.UserStatusDormant)),
|
||||
})
|
||||
require.NoError(t, err, "insert user")
|
||||
|
||||
@@ -407,6 +408,8 @@ func OrganizationMember(t testing.TB, db database.Store, orig database.Organizat
|
||||
}
|
||||
|
||||
func Group(t testing.TB, db database.Store, orig database.Group) database.Group {
|
||||
t.Helper()
|
||||
|
||||
name := takeFirst(orig.Name, testutil.GetRandomName(t))
|
||||
group, err := db.InsertGroup(genCtx, database.InsertGroupParams{
|
||||
ID: takeFirst(orig.ID, uuid.New()),
|
||||
|
||||
@@ -339,6 +339,10 @@ func (*FakeQuerier) Ping(_ context.Context) (time.Duration, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (*FakeQuerier) PGLocks(_ context.Context) (database.PGLocks, error) {
|
||||
return []database.PGLock{}, nil
|
||||
}
|
||||
|
||||
func (tx *fakeTx) AcquireLock(_ context.Context, id int64) error {
|
||||
if _, ok := tx.FakeQuerier.locks[id]; ok {
|
||||
return xerrors.Errorf("cannot acquire lock %d: already held", id)
|
||||
@@ -7709,6 +7713,11 @@ func (q *FakeQuerier) InsertUser(_ context.Context, arg database.InsertUserParam
|
||||
}
|
||||
}
|
||||
|
||||
status := database.UserStatusDormant
|
||||
if arg.Status != "" {
|
||||
status = database.UserStatus(arg.Status)
|
||||
}
|
||||
|
||||
user := database.User{
|
||||
ID: arg.ID,
|
||||
Email: arg.Email,
|
||||
@@ -7717,7 +7726,7 @@ func (q *FakeQuerier) InsertUser(_ context.Context, arg database.InsertUserParam
|
||||
UpdatedAt: arg.UpdatedAt,
|
||||
Username: arg.Username,
|
||||
Name: arg.Name,
|
||||
Status: database.UserStatusDormant,
|
||||
Status: status,
|
||||
RBACRoles: arg.RBACRoles,
|
||||
LoginType: arg.LoginType,
|
||||
}
|
||||
@@ -8640,6 +8649,7 @@ func (q *FakeQuerier) UpdateInactiveUsersToDormant(_ context.Context, params dat
|
||||
updated = append(updated, database.UpdateInactiveUsersToDormantRow{
|
||||
ID: user.ID,
|
||||
Email: user.Email,
|
||||
Username: user.Username,
|
||||
LastSeenAt: user.LastSeenAt,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -66,6 +66,13 @@ func (m queryMetricsStore) Ping(ctx context.Context) (time.Duration, error) {
|
||||
return duration, err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) PGLocks(ctx context.Context) (database.PGLocks, error) {
|
||||
start := time.Now()
|
||||
locks, err := m.s.PGLocks(ctx)
|
||||
m.queryLatencies.WithLabelValues("PGLocks").Observe(time.Since(start).Seconds())
|
||||
return locks, err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) InTx(f func(database.Store) error, options *database.TxOptions) error {
|
||||
return m.dbMetrics.InTx(f, options)
|
||||
}
|
||||
|
||||
@@ -4299,6 +4299,21 @@ func (mr *MockStoreMockRecorder) OrganizationMembers(arg0, arg1 any) *gomock.Cal
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OrganizationMembers", reflect.TypeOf((*MockStore)(nil).OrganizationMembers), arg0, arg1)
|
||||
}
|
||||
|
||||
// PGLocks mocks base method.
|
||||
func (m *MockStore) PGLocks(arg0 context.Context) (database.PGLocks, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "PGLocks", arg0)
|
||||
ret0, _ := ret[0].(database.PGLocks)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// PGLocks indicates an expected call of PGLocks.
|
||||
func (mr *MockStoreMockRecorder) PGLocks(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PGLocks", reflect.TypeOf((*MockStore)(nil).PGLocks), arg0)
|
||||
}
|
||||
|
||||
// Ping mocks base method.
|
||||
func (m *MockStore) Ping(arg0 context.Context) (time.Duration, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
@@ -135,7 +135,8 @@ func NewDB(t testing.TB, opts ...Option) (database.Store, pubsub.Pubsub) {
|
||||
if o.dumpOnFailure {
|
||||
t.Cleanup(func() { DumpOnFailure(t, connectionURL) })
|
||||
}
|
||||
db = database.New(sqlDB)
|
||||
// Unit tests should not retry serial transaction failures.
|
||||
db = database.New(sqlDB, database.WithSerialRetryCount(1))
|
||||
|
||||
ps, err = pubsub.New(context.Background(), o.logger, sqlDB, connectionURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
package dbtestutil
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
)
|
||||
|
||||
type DBTx struct {
|
||||
database.Store
|
||||
mu sync.Mutex
|
||||
done chan error
|
||||
finalErr chan error
|
||||
}
|
||||
|
||||
// StartTx starts a transaction and returns a DBTx object. This allows running
|
||||
// 2 transactions concurrently in a test more easily.
|
||||
// Example:
|
||||
//
|
||||
// a := StartTx(t, db, opts)
|
||||
// b := StartTx(t, db, opts)
|
||||
//
|
||||
// a.GetUsers(...)
|
||||
// b.GetUsers(...)
|
||||
//
|
||||
// require.NoError(t, a.Done()
|
||||
func StartTx(t *testing.T, db database.Store, opts *database.TxOptions) *DBTx {
|
||||
done := make(chan error)
|
||||
finalErr := make(chan error)
|
||||
txC := make(chan database.Store)
|
||||
|
||||
go func() {
|
||||
t.Helper()
|
||||
once := sync.Once{}
|
||||
count := 0
|
||||
|
||||
err := db.InTx(func(store database.Store) error {
|
||||
// InTx can be retried
|
||||
once.Do(func() {
|
||||
txC <- store
|
||||
})
|
||||
count++
|
||||
if count > 1 {
|
||||
// If you recursively call InTx, then don't use this.
|
||||
t.Logf("InTx called more than once: %d", count)
|
||||
assert.NoError(t, xerrors.New("InTx called more than once, this is not allowed with the StartTx helper"))
|
||||
}
|
||||
|
||||
<-done
|
||||
// Just return nil. The caller should be checking their own errors.
|
||||
return nil
|
||||
}, opts)
|
||||
finalErr <- err
|
||||
}()
|
||||
|
||||
txStore := <-txC
|
||||
close(txC)
|
||||
|
||||
return &DBTx{Store: txStore, done: done, finalErr: finalErr}
|
||||
}
|
||||
|
||||
// Done can only be called once. If you call it twice, it will panic.
|
||||
func (tx *DBTx) Done() error {
|
||||
tx.mu.Lock()
|
||||
defer tx.mu.Unlock()
|
||||
|
||||
close(tx.done)
|
||||
return <-tx.finalErr
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
)
|
||||
|
||||
// PGLock docs see: https://www.postgresql.org/docs/current/view-pg-locks.html#VIEW-PG-LOCKS
|
||||
type PGLock struct {
|
||||
// LockType see: https://www.postgresql.org/docs/current/monitoring-stats.html#WAIT-EVENT-LOCK-TABLE
|
||||
LockType *string `db:"locktype"`
|
||||
Database *string `db:"database"` // oid
|
||||
Relation *string `db:"relation"` // oid
|
||||
RelationName *string `db:"relation_name"`
|
||||
Page *int `db:"page"`
|
||||
Tuple *int `db:"tuple"`
|
||||
VirtualXID *string `db:"virtualxid"`
|
||||
TransactionID *string `db:"transactionid"` // xid
|
||||
ClassID *string `db:"classid"` // oid
|
||||
ObjID *string `db:"objid"` // oid
|
||||
ObjSubID *int `db:"objsubid"`
|
||||
VirtualTransaction *string `db:"virtualtransaction"`
|
||||
PID int `db:"pid"`
|
||||
Mode *string `db:"mode"`
|
||||
Granted bool `db:"granted"`
|
||||
FastPath *bool `db:"fastpath"`
|
||||
WaitStart *time.Time `db:"waitstart"`
|
||||
}
|
||||
|
||||
func (l PGLock) Equal(b PGLock) bool {
|
||||
// Lazy, but hope this works
|
||||
return reflect.DeepEqual(l, b)
|
||||
}
|
||||
|
||||
func (l PGLock) String() string {
|
||||
granted := "granted"
|
||||
if !l.Granted {
|
||||
granted = "waiting"
|
||||
}
|
||||
var details string
|
||||
switch safeString(l.LockType) {
|
||||
case "relation":
|
||||
details = ""
|
||||
case "page":
|
||||
details = fmt.Sprintf("page=%d", *l.Page)
|
||||
case "tuple":
|
||||
details = fmt.Sprintf("page=%d tuple=%d", *l.Page, *l.Tuple)
|
||||
case "virtualxid":
|
||||
details = "waiting to acquire virtual tx id lock"
|
||||
default:
|
||||
details = "???"
|
||||
}
|
||||
return fmt.Sprintf("%d-%5s [%s] %s/%s/%s: %s",
|
||||
l.PID,
|
||||
safeString(l.TransactionID),
|
||||
granted,
|
||||
safeString(l.RelationName),
|
||||
safeString(l.LockType),
|
||||
safeString(l.Mode),
|
||||
details,
|
||||
)
|
||||
}
|
||||
|
||||
// PGLocks returns a list of all locks in the database currently in use.
|
||||
func (q *sqlQuerier) PGLocks(ctx context.Context) (PGLocks, error) {
|
||||
rows, err := q.sdb.QueryContext(ctx, `
|
||||
SELECT
|
||||
relation::regclass AS relation_name,
|
||||
*
|
||||
FROM pg_locks;
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
var locks []PGLock
|
||||
err = sqlx.StructScan(rows, &locks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return locks, err
|
||||
}
|
||||
|
||||
type PGLocks []PGLock
|
||||
|
||||
func (l PGLocks) String() string {
|
||||
// Try to group things together by relation name.
|
||||
sort.Slice(l, func(i, j int) bool {
|
||||
return safeString(l[i].RelationName) < safeString(l[j].RelationName)
|
||||
})
|
||||
|
||||
var out strings.Builder
|
||||
for i, lock := range l {
|
||||
if i != 0 {
|
||||
_, _ = out.WriteString("\n")
|
||||
}
|
||||
_, _ = out.WriteString(lock.String())
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// Difference returns the difference between two sets of locks.
|
||||
// This is helpful to determine what changed between the two sets.
|
||||
func (l PGLocks) Difference(to PGLocks) (new PGLocks, removed PGLocks) {
|
||||
return slice.SymmetricDifferenceFunc(l, to, func(a, b PGLock) bool {
|
||||
return a.Equal(b)
|
||||
})
|
||||
}
|
||||
@@ -6736,23 +6736,33 @@ const getQuotaConsumedForUser = `-- name: GetQuotaConsumedForUser :one
|
||||
WITH latest_builds AS (
|
||||
SELECT
|
||||
DISTINCT ON
|
||||
(workspace_id) id,
|
||||
workspace_id,
|
||||
daily_cost
|
||||
(wb.workspace_id) wb.workspace_id,
|
||||
wb.daily_cost
|
||||
FROM
|
||||
workspace_builds wb
|
||||
-- This INNER JOIN prevents a seq scan of the workspace_builds table.
|
||||
-- Limit the rows to the absolute minimum required, which is all workspaces
|
||||
-- in a given organization for a given user.
|
||||
INNER JOIN
|
||||
workspaces on wb.workspace_id = workspaces.id
|
||||
WHERE
|
||||
workspaces.owner_id = $1 AND
|
||||
workspaces.organization_id = $2
|
||||
ORDER BY
|
||||
workspace_id,
|
||||
created_at DESC
|
||||
wb.workspace_id,
|
||||
wb.created_at DESC
|
||||
)
|
||||
SELECT
|
||||
coalesce(SUM(daily_cost), 0)::BIGINT
|
||||
FROM
|
||||
workspaces
|
||||
JOIN latest_builds ON
|
||||
INNER JOIN latest_builds ON
|
||||
latest_builds.workspace_id = workspaces.id
|
||||
WHERE NOT
|
||||
deleted AND
|
||||
WHERE
|
||||
NOT deleted AND
|
||||
-- We can likely remove these conditions since we check above.
|
||||
-- But it does not hurt to be defensive and make sure future query changes
|
||||
-- do not break anything.
|
||||
workspaces.owner_id = $1 AND
|
||||
workspaces.organization_id = $2
|
||||
`
|
||||
@@ -10345,10 +10355,15 @@ INSERT INTO
|
||||
created_at,
|
||||
updated_at,
|
||||
rbac_roles,
|
||||
login_type
|
||||
login_type,
|
||||
status
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9,
|
||||
-- if the status passed in is empty, fallback to dormant, which is what
|
||||
-- we were doing before.
|
||||
COALESCE(NULLIF($10::text, '')::user_status, 'dormant'::user_status)
|
||||
) RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at
|
||||
`
|
||||
|
||||
type InsertUserParams struct {
|
||||
@@ -10361,6 +10376,7 @@ type InsertUserParams struct {
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
RBACRoles pq.StringArray `db:"rbac_roles" json:"rbac_roles"`
|
||||
LoginType LoginType `db:"login_type" json:"login_type"`
|
||||
Status string `db:"status" json:"status"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) InsertUser(ctx context.Context, arg InsertUserParams) (User, error) {
|
||||
@@ -10374,6 +10390,7 @@ func (q *sqlQuerier) InsertUser(ctx context.Context, arg InsertUserParams) (User
|
||||
arg.UpdatedAt,
|
||||
arg.RBACRoles,
|
||||
arg.LoginType,
|
||||
arg.Status,
|
||||
)
|
||||
var i User
|
||||
err := row.Scan(
|
||||
@@ -10408,7 +10425,7 @@ SET
|
||||
WHERE
|
||||
last_seen_at < $2 :: timestamp
|
||||
AND status = 'active'::user_status
|
||||
RETURNING id, email, last_seen_at
|
||||
RETURNING id, email, username, last_seen_at
|
||||
`
|
||||
|
||||
type UpdateInactiveUsersToDormantParams struct {
|
||||
@@ -10419,6 +10436,7 @@ type UpdateInactiveUsersToDormantParams struct {
|
||||
type UpdateInactiveUsersToDormantRow struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
Email string `db:"email" json:"email"`
|
||||
Username string `db:"username" json:"username"`
|
||||
LastSeenAt time.Time `db:"last_seen_at" json:"last_seen_at"`
|
||||
}
|
||||
|
||||
@@ -10431,7 +10449,12 @@ func (q *sqlQuerier) UpdateInactiveUsersToDormant(ctx context.Context, arg Updat
|
||||
var items []UpdateInactiveUsersToDormantRow
|
||||
for rows.Next() {
|
||||
var i UpdateInactiveUsersToDormantRow
|
||||
if err := rows.Scan(&i.ID, &i.Email, &i.LastSeenAt); err != nil {
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.Email,
|
||||
&i.Username,
|
||||
&i.LastSeenAt,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
@@ -14947,7 +14970,7 @@ WHERE
|
||||
-- Filter by owner_name
|
||||
AND CASE
|
||||
WHEN $8 :: text != '' THEN
|
||||
workspaces.owner_id = (SELECT id FROM users WHERE lower(owner_username) = lower($8) AND deleted = false)
|
||||
workspaces.owner_id = (SELECT id FROM users WHERE lower(users.username) = lower($8) AND deleted = false)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by template_name
|
||||
|
||||
@@ -18,23 +18,33 @@ INNER JOIN groups ON
|
||||
WITH latest_builds AS (
|
||||
SELECT
|
||||
DISTINCT ON
|
||||
(workspace_id) id,
|
||||
workspace_id,
|
||||
daily_cost
|
||||
(wb.workspace_id) wb.workspace_id,
|
||||
wb.daily_cost
|
||||
FROM
|
||||
workspace_builds wb
|
||||
-- This INNER JOIN prevents a seq scan of the workspace_builds table.
|
||||
-- Limit the rows to the absolute minimum required, which is all workspaces
|
||||
-- in a given organization for a given user.
|
||||
INNER JOIN
|
||||
workspaces on wb.workspace_id = workspaces.id
|
||||
WHERE
|
||||
workspaces.owner_id = @owner_id AND
|
||||
workspaces.organization_id = @organization_id
|
||||
ORDER BY
|
||||
workspace_id,
|
||||
created_at DESC
|
||||
wb.workspace_id,
|
||||
wb.created_at DESC
|
||||
)
|
||||
SELECT
|
||||
coalesce(SUM(daily_cost), 0)::BIGINT
|
||||
FROM
|
||||
workspaces
|
||||
JOIN latest_builds ON
|
||||
INNER JOIN latest_builds ON
|
||||
latest_builds.workspace_id = workspaces.id
|
||||
WHERE NOT
|
||||
deleted AND
|
||||
WHERE
|
||||
NOT deleted AND
|
||||
-- We can likely remove these conditions since we check above.
|
||||
-- But it does not hurt to be defensive and make sure future query changes
|
||||
-- do not break anything.
|
||||
workspaces.owner_id = @owner_id AND
|
||||
workspaces.organization_id = @organization_id
|
||||
;
|
||||
|
||||
@@ -67,10 +67,15 @@ INSERT INTO
|
||||
created_at,
|
||||
updated_at,
|
||||
rbac_roles,
|
||||
login_type
|
||||
login_type,
|
||||
status
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING *;
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9,
|
||||
-- if the status passed in is empty, fallback to dormant, which is what
|
||||
-- we were doing before.
|
||||
COALESCE(NULLIF(@status::text, '')::user_status, 'dormant'::user_status)
|
||||
) RETURNING *;
|
||||
|
||||
-- name: UpdateUserProfile :one
|
||||
UPDATE
|
||||
@@ -286,7 +291,7 @@ SET
|
||||
WHERE
|
||||
last_seen_at < @last_seen_after :: timestamp
|
||||
AND status = 'active'::user_status
|
||||
RETURNING id, email, last_seen_at;
|
||||
RETURNING id, email, username, last_seen_at;
|
||||
|
||||
-- AllUserIDs returns all UserIDs regardless of user status or deletion.
|
||||
-- name: AllUserIDs :many
|
||||
|
||||
@@ -233,7 +233,7 @@ WHERE
|
||||
-- Filter by owner_name
|
||||
AND CASE
|
||||
WHEN @owner_username :: text != '' THEN
|
||||
workspaces.owner_id = (SELECT id FROM users WHERE lower(owner_username) = lower(@owner_username) AND deleted = false)
|
||||
workspaces.owner_id = (SELECT id FROM users WHERE lower(users.username) = lower(@owner_username) AND deleted = false)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by template_name
|
||||
|
||||
+5
-4
@@ -25,8 +25,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
tarMimeType = "application/x-tar"
|
||||
zipMimeType = "application/zip"
|
||||
tarMimeType = "application/x-tar"
|
||||
zipMimeType = "application/zip"
|
||||
windowsZipMimeType = "application/x-zip-compressed"
|
||||
|
||||
HTTPFileMaxBytes = 10 * (10 << 20)
|
||||
)
|
||||
@@ -48,7 +49,7 @@ func (api *API) postFile(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
switch contentType {
|
||||
case tarMimeType, zipMimeType:
|
||||
case tarMimeType, zipMimeType, windowsZipMimeType:
|
||||
default:
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: fmt.Sprintf("Unsupported content type header %q.", contentType),
|
||||
@@ -66,7 +67,7 @@ func (api *API) postFile(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if contentType == zipMimeType {
|
||||
if contentType == zipMimeType || contentType == windowsZipMimeType {
|
||||
zipReader, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
|
||||
@@ -43,6 +43,18 @@ func TestPostFiles(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("InsertWindowsZip", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
_, err := client.Upload(ctx, "application/x-zip-compressed", bytes.NewReader(archivetest.TestZipFileBytes()))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("InsertAlreadyExists", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
|
||||
@@ -82,6 +82,7 @@ const (
|
||||
|
||||
type ExtractAPIKeyConfig struct {
|
||||
DB database.Store
|
||||
ActivateDormantUser func(ctx context.Context, u database.User) (database.User, error)
|
||||
OAuth2Configs *OAuth2Configs
|
||||
RedirectToLogin bool
|
||||
DisableSessionExpiryRefresh bool
|
||||
@@ -414,21 +415,20 @@ func ExtractAPIKey(rw http.ResponseWriter, r *http.Request, cfg ExtractAPIKeyCon
|
||||
})
|
||||
}
|
||||
|
||||
if userStatus == database.UserStatusDormant {
|
||||
// If coder confirms that the dormant user is valid, it can switch their account to active.
|
||||
// nolint:gocritic
|
||||
u, err := cfg.DB.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{
|
||||
ID: key.UserID,
|
||||
Status: database.UserStatusActive,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
if userStatus == database.UserStatusDormant && cfg.ActivateDormantUser != nil {
|
||||
id, _ := uuid.Parse(actor.ID)
|
||||
user, err := cfg.ActivateDormantUser(ctx, database.User{
|
||||
ID: id,
|
||||
Username: actor.FriendlyName,
|
||||
Status: userStatus,
|
||||
})
|
||||
if err != nil {
|
||||
return write(http.StatusInternalServerError, codersdk.Response{
|
||||
Message: internalErrorMessage,
|
||||
Detail: fmt.Sprintf("can't activate a dormant user: %s", err.Error()),
|
||||
Detail: fmt.Sprintf("update user status: %s", err.Error()),
|
||||
})
|
||||
}
|
||||
userStatus = u.Status
|
||||
userStatus = user.Status
|
||||
}
|
||||
|
||||
if userStatus != database.UserStatusActive {
|
||||
|
||||
@@ -453,7 +453,7 @@ func (s *SMTPHandler) auth(ctx context.Context, mechs string) (sasl.Client, erro
|
||||
continue
|
||||
}
|
||||
if password == "" {
|
||||
errs = multierror.Append(errs, xerrors.New("cannot use PLAIN auth, password not defined (see CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD)"))
|
||||
errs = multierror.Append(errs, xerrors.New("cannot use PLAIN auth, password not defined (see CODER_EMAIL_AUTH_PASSWORD)"))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -475,7 +475,7 @@ func (s *SMTPHandler) auth(ctx context.Context, mechs string) (sasl.Client, erro
|
||||
continue
|
||||
}
|
||||
if password == "" {
|
||||
errs = multierror.Append(errs, xerrors.New("cannot use LOGIN auth, password not defined (see CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD)"))
|
||||
errs = multierror.Append(errs, xerrors.New("cannot use LOGIN auth, password not defined (see CODER_EMAIL_AUTH_PASSWORD)"))
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/xerrors"
|
||||
"tailscale.com/tailcfg"
|
||||
|
||||
"cdr.dev/slog"
|
||||
@@ -22,12 +23,13 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
const defaultRefreshRate = time.Minute
|
||||
|
||||
// ActiveUsers tracks the number of users that have authenticated within the past hour.
|
||||
func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
|
||||
func ActiveUsers(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
|
||||
if duration == 0 {
|
||||
duration = defaultRefreshRate
|
||||
}
|
||||
@@ -58,6 +60,7 @@ func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db datab
|
||||
|
||||
apiKeys, err := db.GetAPIKeysLastUsedAfter(ctx, dbtime.Now().Add(-1*time.Hour))
|
||||
if err != nil {
|
||||
logger.Error(ctx, "get api keys for active users prometheus metric", slog.Error(err))
|
||||
continue
|
||||
}
|
||||
distinctUsers := map[uuid.UUID]struct{}{}
|
||||
@@ -73,6 +76,57 @@ func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db datab
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Users tracks the total number of registered users, partitioned by status.
|
||||
func Users(ctx context.Context, logger slog.Logger, clk quartz.Clock, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
|
||||
if duration == 0 {
|
||||
// It's not super important this tracks real-time.
|
||||
duration = defaultRefreshRate * 5
|
||||
}
|
||||
|
||||
gauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "api",
|
||||
Name: "total_user_count",
|
||||
Help: "The total number of registered users, partitioned by status.",
|
||||
}, []string{"status"})
|
||||
err := registerer.Register(gauge)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("register total_user_count gauge: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(ctx)
|
||||
done := make(chan struct{})
|
||||
ticker := clk.NewTicker(duration)
|
||||
go func() {
|
||||
defer close(done)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
gauge.Reset()
|
||||
//nolint:gocritic // This is a system service that needs full access
|
||||
//to the users table.
|
||||
users, err := db.GetUsers(dbauthz.AsSystemRestricted(ctx), database.GetUsersParams{})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "get all users for prometheus metrics", slog.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
gauge.WithLabelValues(string(user.Status)).Inc()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
cancelFunc()
|
||||
<-done
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Workspaces tracks the total number of workspaces with labels on status.
|
||||
func Workspaces(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
|
||||
if duration == 0 {
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/coder/v2/tailnet/tailnettest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
func TestActiveUsers(t *testing.T) {
|
||||
@@ -98,7 +99,7 @@ func TestActiveUsers(t *testing.T) {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
registry := prometheus.NewRegistry()
|
||||
closeFunc, err := prometheusmetrics.ActiveUsers(context.Background(), registry, tc.Database(t), time.Millisecond)
|
||||
closeFunc, err := prometheusmetrics.ActiveUsers(context.Background(), slogtest.Make(t, nil), registry, tc.Database(t), time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(closeFunc)
|
||||
|
||||
@@ -112,6 +113,100 @@ func TestActiveUsers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUsers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
Name string
|
||||
Database func(t *testing.T) database.Store
|
||||
Count map[database.UserStatus]int
|
||||
}{{
|
||||
Name: "None",
|
||||
Database: func(t *testing.T) database.Store {
|
||||
return dbmem.New()
|
||||
},
|
||||
Count: map[database.UserStatus]int{},
|
||||
}, {
|
||||
Name: "One",
|
||||
Database: func(t *testing.T) database.Store {
|
||||
db := dbmem.New()
|
||||
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
|
||||
return db
|
||||
},
|
||||
Count: map[database.UserStatus]int{database.UserStatusActive: 1},
|
||||
}, {
|
||||
Name: "MultipleStatuses",
|
||||
Database: func(t *testing.T) database.Store {
|
||||
db := dbmem.New()
|
||||
|
||||
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
|
||||
dbgen.User(t, db, database.User{Status: database.UserStatusDormant})
|
||||
|
||||
return db
|
||||
},
|
||||
Count: map[database.UserStatus]int{database.UserStatusActive: 1, database.UserStatusDormant: 1},
|
||||
}, {
|
||||
Name: "MultipleActive",
|
||||
Database: func(t *testing.T) database.Store {
|
||||
db := dbmem.New()
|
||||
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
|
||||
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
|
||||
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
|
||||
return db
|
||||
},
|
||||
Count: map[database.UserStatus]int{database.UserStatusActive: 3},
|
||||
}} {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
registry := prometheus.NewRegistry()
|
||||
mClock := quartz.NewMock(t)
|
||||
db := tc.Database(t)
|
||||
closeFunc, err := prometheusmetrics.Users(context.Background(), slogtest.Make(t, nil), mClock, registry, db, time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(closeFunc)
|
||||
|
||||
_, w := mClock.AdvanceNext()
|
||||
w.MustWait(ctx)
|
||||
|
||||
checkFn := func() bool {
|
||||
metrics, err := registry.Gather()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If we get no metrics and we know none should exist, bail
|
||||
// early. If we get no metrics but we expect some, retry.
|
||||
if len(metrics) == 0 {
|
||||
return len(tc.Count) == 0
|
||||
}
|
||||
|
||||
for _, metric := range metrics[0].Metric {
|
||||
if tc.Count[database.UserStatus(*metric.Label[0].Value)] != int(metric.Gauge.GetValue()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
require.Eventually(t, checkFn, testutil.WaitShort, testutil.IntervalFast)
|
||||
|
||||
// Add another dormant user and ensure it updates
|
||||
dbgen.User(t, db, database.User{Status: database.UserStatusDormant})
|
||||
tc.Count[database.UserStatusDormant]++
|
||||
|
||||
_, w = mClock.AdvanceNext()
|
||||
w.MustWait(ctx)
|
||||
|
||||
require.Eventually(t, checkFn, testutil.WaitShort, testutil.IntervalFast)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorkspaceLatestBuildTotals(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -523,8 +523,8 @@ func TestAcquirer_MatchTags(t *testing.T) {
|
||||
// Generate a table that can be copy-pasted into docs/admin/provisioners.md
|
||||
lines := []string{
|
||||
"\n",
|
||||
"| Provisioner Tags | Job Tags | Can Run Job? |",
|
||||
"|------------------|----------|--------------|",
|
||||
"| Provisioner Tags | Job Tags | Same Org | Can Run Job? |",
|
||||
"|------------------|----------|----------|--------------|",
|
||||
}
|
||||
// turn the JSON map into k=v for readability
|
||||
kvs := func(m map[string]string) string {
|
||||
@@ -539,10 +539,14 @@ func TestAcquirer_MatchTags(t *testing.T) {
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
acquire := "✅"
|
||||
sameOrg := "✅"
|
||||
if !tt.expectAcquire {
|
||||
acquire = "❌"
|
||||
}
|
||||
s := fmt.Sprintf("| %s | %s | %s |", kvs(tt.acquireJobTags), kvs(tt.provisionerJobTags), acquire)
|
||||
if tt.unmatchedOrg {
|
||||
sameOrg = "❌"
|
||||
}
|
||||
s := fmt.Sprintf("| %s | %s | %s | %s |", kvs(tt.acquireJobTags), kvs(tt.provisionerJobTags), sameOrg, acquire)
|
||||
lines = append(lines, s)
|
||||
}
|
||||
t.Logf("You can paste this into docs/admin/provisioners.md")
|
||||
|
||||
@@ -1063,6 +1063,7 @@ func (s *server) FailJob(ctx context.Context, failJob *proto.FailedJob) (*proto.
|
||||
wriBytes, err := json.Marshal(buildResourceInfo)
|
||||
if err != nil {
|
||||
s.Logger.Error(ctx, "marshal workspace resource info for failed job", slog.Error(err))
|
||||
wriBytes = []byte("{}")
|
||||
}
|
||||
|
||||
bag := audit.BaggageFromContext(ctx)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"nhooyr.io/websocket"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/codersdk/wsjson"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/db2sdk"
|
||||
@@ -312,6 +313,7 @@ type logFollower struct {
|
||||
r *http.Request
|
||||
rw http.ResponseWriter
|
||||
conn *websocket.Conn
|
||||
enc *wsjson.Encoder[codersdk.ProvisionerJobLog]
|
||||
|
||||
jobID uuid.UUID
|
||||
after int64
|
||||
@@ -391,6 +393,7 @@ func (f *logFollower) follow() {
|
||||
}
|
||||
defer f.conn.Close(websocket.StatusNormalClosure, "done")
|
||||
go httpapi.Heartbeat(f.ctx, f.conn)
|
||||
f.enc = wsjson.NewEncoder[codersdk.ProvisionerJobLog](f.conn, websocket.MessageText)
|
||||
|
||||
// query for logs once right away, so we can get historical data from before
|
||||
// subscription
|
||||
@@ -488,11 +491,7 @@ func (f *logFollower) query() error {
|
||||
return xerrors.Errorf("error fetching logs: %w", err)
|
||||
}
|
||||
for _, log := range logs {
|
||||
logB, err := json.Marshal(convertProvisionerJobLog(log))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error marshaling log: %w", err)
|
||||
}
|
||||
err = f.conn.Write(f.ctx, websocket.MessageText, logB)
|
||||
err := f.enc.Encode(convertProvisionerJobLog(log))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error writing to websocket: %w", err)
|
||||
}
|
||||
|
||||
+67
-17
@@ -12,6 +12,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/cryptokeys"
|
||||
"github.com/coder/coder/v2/coderd/idpsync"
|
||||
"github.com/coder/coder/v2/coderd/jwtutils"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/apikey"
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
@@ -565,20 +567,13 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co
|
||||
return user, rbac.Subject{}, false
|
||||
}
|
||||
|
||||
if user.Status == database.UserStatusDormant {
|
||||
//nolint:gocritic // System needs to update status of the user account (dormant -> active).
|
||||
user, err = api.Database.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{
|
||||
ID: user.ID,
|
||||
Status: database.UserStatusActive,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
user, err = ActivateDormantUser(api.Logger, &api.Auditor, api.Database)(ctx, user)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "unable to update user status to active", slog.Error(err))
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error occurred. Try again later, or contact an admin for assistance.",
|
||||
})
|
||||
return user, rbac.Subject{}, false
|
||||
}
|
||||
return user, rbac.Subject{}, false
|
||||
}
|
||||
|
||||
subject, userStatus, err := httpmw.UserRBACSubject(ctx, api.Database, user.ID, rbac.ScopeAll)
|
||||
@@ -601,6 +596,42 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co
|
||||
return user, subject, true
|
||||
}
|
||||
|
||||
func ActivateDormantUser(logger slog.Logger, auditor *atomic.Pointer[audit.Auditor], db database.Store) func(ctx context.Context, user database.User) (database.User, error) {
|
||||
return func(ctx context.Context, user database.User) (database.User, error) {
|
||||
if user.ID == uuid.Nil || user.Status != database.UserStatusDormant {
|
||||
return user, nil
|
||||
}
|
||||
|
||||
//nolint:gocritic // System needs to update status of the user account (dormant -> active).
|
||||
newUser, err := db.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{
|
||||
ID: user.ID,
|
||||
Status: database.UserStatusActive,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(ctx, "unable to update user status to active", slog.Error(err))
|
||||
return user, xerrors.Errorf("update user status: %w", err)
|
||||
}
|
||||
|
||||
oldAuditUser := user
|
||||
newAuditUser := user
|
||||
newAuditUser.Status = database.UserStatusActive
|
||||
|
||||
audit.BackgroundAudit(ctx, &audit.BackgroundAuditParams[database.User]{
|
||||
Audit: *auditor.Load(),
|
||||
Log: logger,
|
||||
UserID: user.ID,
|
||||
Action: database.AuditActionWrite,
|
||||
Old: oldAuditUser,
|
||||
New: newAuditUser,
|
||||
Status: http.StatusOK,
|
||||
AdditionalFields: audit.BackgroundTaskFieldsBytes(ctx, logger, audit.BackgroundSubsystemDormancy),
|
||||
})
|
||||
|
||||
return newUser, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the user's session cookie.
|
||||
//
|
||||
// @Summary Log out user
|
||||
@@ -1385,10 +1416,22 @@ func (p *oauthLoginParams) CommitAuditLogs() {
|
||||
|
||||
func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.Cookie, database.User, database.APIKey, error) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
user database.User
|
||||
cookies []*http.Cookie
|
||||
logger = api.Logger.Named(userAuthLoggerName)
|
||||
ctx = r.Context()
|
||||
user database.User
|
||||
cookies []*http.Cookie
|
||||
logger = api.Logger.Named(userAuthLoggerName)
|
||||
auditor = *api.Auditor.Load()
|
||||
dormantConvertAudit *audit.Request[database.User]
|
||||
initDormantAuditOnce = sync.OnceFunc(func() {
|
||||
dormantConvertAudit = params.initAuditRequest(&audit.RequestParams{
|
||||
Audit: auditor,
|
||||
Log: api.Logger,
|
||||
Request: r,
|
||||
Action: database.AuditActionWrite,
|
||||
OrganizationID: uuid.Nil,
|
||||
AdditionalFields: audit.BackgroundTaskFields(audit.BackgroundSubsystemDormancy),
|
||||
})
|
||||
})
|
||||
)
|
||||
|
||||
var isConvertLoginType bool
|
||||
@@ -1490,6 +1533,7 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C
|
||||
Email: params.Email,
|
||||
Username: params.Username,
|
||||
OrganizationIDs: orgIDs,
|
||||
UserStatus: ptr.Ref(codersdk.UserStatusActive),
|
||||
},
|
||||
LoginType: params.LoginType,
|
||||
accountCreatorName: "oauth",
|
||||
@@ -1501,6 +1545,11 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C
|
||||
|
||||
// Activate dormant user on sign-in
|
||||
if user.Status == database.UserStatusDormant {
|
||||
// This is necessary because transactions can be retried, and we
|
||||
// only want to add the audit log a single time.
|
||||
initDormantAuditOnce()
|
||||
dormantConvertAudit.UserID = user.ID
|
||||
dormantConvertAudit.Old = user
|
||||
//nolint:gocritic // System needs to update status of the user account (dormant -> active).
|
||||
user, err = tx.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{
|
||||
ID: user.ID,
|
||||
@@ -1511,6 +1560,7 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C
|
||||
logger.Error(ctx, "unable to update user status to active", slog.Error(err))
|
||||
return xerrors.Errorf("update user status: %w", err)
|
||||
}
|
||||
dormantConvertAudit.New = user
|
||||
}
|
||||
|
||||
debugContext, err := json.Marshal(params.DebugContext)
|
||||
|
||||
+44
-1
@@ -1285,7 +1285,7 @@ func TestUserOIDC(t *testing.T) {
|
||||
tc.AssertResponse(t, resp)
|
||||
}
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
if tc.AssertUser != nil {
|
||||
user, err := client.User(ctx, "me")
|
||||
@@ -1300,6 +1300,49 @@ func TestUserOIDC(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("OIDCDormancy", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
auditor := audit.NewMock()
|
||||
fake := oidctest.NewFakeIDP(t,
|
||||
oidctest.WithRefresh(func(_ string) error {
|
||||
return xerrors.New("refreshing token should never occur")
|
||||
}),
|
||||
oidctest.WithServing(),
|
||||
)
|
||||
cfg := fake.OIDCConfig(t, nil, func(cfg *coderd.OIDCConfig) {
|
||||
cfg.AllowSignups = true
|
||||
})
|
||||
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
owner, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
Auditor: auditor,
|
||||
OIDCConfig: cfg,
|
||||
Logger: &logger,
|
||||
})
|
||||
|
||||
user := dbgen.User(t, db, database.User{
|
||||
LoginType: database.LoginTypeOIDC,
|
||||
Status: database.UserStatusDormant,
|
||||
})
|
||||
auditor.ResetLogs()
|
||||
|
||||
client, resp := fake.AttemptLogin(t, owner, jwt.MapClaims{
|
||||
"email": user.Email,
|
||||
})
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
auditor.Contains(t, database.AuditLog{
|
||||
ResourceType: database.ResourceTypeUser,
|
||||
AdditionalFields: json.RawMessage(`{"automatic_actor":"coder","automatic_subsystem":"dormancy"}`),
|
||||
})
|
||||
me, err := client.User(ctx, "me")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, codersdk.UserStatusActive, me.Status)
|
||||
})
|
||||
|
||||
t.Run("OIDCConvert", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
+13
-4
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/searchquery"
|
||||
"github.com/coder/coder/v2/coderd/telemetry"
|
||||
"github.com/coder/coder/v2/coderd/userpassword"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
@@ -188,10 +189,13 @@ func (api *API) postFirstUser(rw http.ResponseWriter, r *http.Request) {
|
||||
//nolint:gocritic // needed to create first user
|
||||
user, err := api.CreateUser(dbauthz.AsSystemRestricted(ctx), api.Database, CreateUserRequest{
|
||||
CreateUserRequestWithOrgs: codersdk.CreateUserRequestWithOrgs{
|
||||
Email: createUser.Email,
|
||||
Username: createUser.Username,
|
||||
Name: createUser.Name,
|
||||
Password: createUser.Password,
|
||||
Email: createUser.Email,
|
||||
Username: createUser.Username,
|
||||
Name: createUser.Name,
|
||||
Password: createUser.Password,
|
||||
// There's no reason to create the first user as dormant, since you have
|
||||
// to login immediately anyways.
|
||||
UserStatus: ptr.Ref(codersdk.UserStatusActive),
|
||||
OrganizationIDs: []uuid.UUID{defaultOrg.ID},
|
||||
},
|
||||
LoginType: database.LoginTypePassword,
|
||||
@@ -1343,6 +1347,10 @@ func (api *API) CreateUser(ctx context.Context, store database.Store, req Create
|
||||
err := store.InTx(func(tx database.Store) error {
|
||||
orgRoles := make([]string, 0)
|
||||
|
||||
status := ""
|
||||
if req.UserStatus != nil {
|
||||
status = string(*req.UserStatus)
|
||||
}
|
||||
params := database.InsertUserParams{
|
||||
ID: uuid.New(),
|
||||
Email: req.Email,
|
||||
@@ -1354,6 +1362,7 @@ func (api *API) CreateUser(ctx context.Context, store database.Store, req Create
|
||||
// All new users are defaulted to members of the site.
|
||||
RBACRoles: []string{},
|
||||
LoginType: req.LoginType,
|
||||
Status: status,
|
||||
}
|
||||
// If a user signs up with OAuth, they can have no password!
|
||||
if req.Password != "" {
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
@@ -695,6 +696,41 @@ func TestPostUsers(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// User should default to dormant.
|
||||
require.Equal(t, codersdk.UserStatusDormant, user.Status)
|
||||
|
||||
require.Len(t, auditor.AuditLogs(), numLogs)
|
||||
require.Equal(t, database.AuditActionCreate, auditor.AuditLogs()[numLogs-1].Action)
|
||||
require.Equal(t, database.AuditActionLogin, auditor.AuditLogs()[numLogs-2].Action)
|
||||
|
||||
require.Len(t, user.OrganizationIDs, 1)
|
||||
assert.Equal(t, firstUser.OrganizationID, user.OrganizationIDs[0])
|
||||
})
|
||||
|
||||
t.Run("CreateWithStatus", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
auditor := audit.NewMock()
|
||||
client := coderdtest.New(t, &coderdtest.Options{Auditor: auditor})
|
||||
numLogs := len(auditor.AuditLogs())
|
||||
|
||||
firstUser := coderdtest.CreateFirstUser(t, client)
|
||||
numLogs++ // add an audit log for user create
|
||||
numLogs++ // add an audit log for login
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
user, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
|
||||
OrganizationIDs: []uuid.UUID{firstUser.OrganizationID},
|
||||
Email: "another@user.org",
|
||||
Username: "someone-else",
|
||||
Password: "SomeSecurePassword!",
|
||||
UserStatus: ptr.Ref(codersdk.UserStatusActive),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, codersdk.UserStatusActive, user.Status)
|
||||
|
||||
require.Len(t, auditor.AuditLogs(), numLogs)
|
||||
require.Equal(t, database.AuditActionCreate, auditor.AuditLogs()[numLogs-1].Action)
|
||||
require.Equal(t, database.AuditActionLogin, auditor.AuditLogs()[numLogs-2].Action)
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/codersdk/workspacesdk"
|
||||
"github.com/coder/coder/v2/codersdk/wsjson"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/coder/v2/tailnet/proto"
|
||||
)
|
||||
@@ -404,11 +405,9 @@ func (api *API) workspaceAgentLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
go httpapi.Heartbeat(ctx, conn)
|
||||
|
||||
ctx, wsNetConn := codersdk.WebsocketNetConn(ctx, conn, websocket.MessageText)
|
||||
defer wsNetConn.Close() // Also closes conn.
|
||||
encoder := wsjson.NewEncoder[[]codersdk.WorkspaceAgentLog](conn, websocket.MessageText)
|
||||
defer encoder.Close(websocket.StatusNormalClosure)
|
||||
|
||||
// The Go stdlib JSON encoder appends a newline character after message write.
|
||||
encoder := json.NewEncoder(wsNetConn)
|
||||
err = encoder.Encode(convertWorkspaceAgentLogs(logs))
|
||||
if err != nil {
|
||||
return
|
||||
@@ -741,16 +740,8 @@ func (api *API) derpMapUpdates(rw http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
return
|
||||
}
|
||||
ctx, nconn := codersdk.WebsocketNetConn(ctx, ws, websocket.MessageBinary)
|
||||
defer nconn.Close()
|
||||
|
||||
// Slurp all packets from the connection into io.Discard so pongs get sent
|
||||
// by the websocket package. We don't do any reads ourselves so this is
|
||||
// necessary.
|
||||
go func() {
|
||||
_, _ = io.Copy(io.Discard, nconn)
|
||||
_ = nconn.Close()
|
||||
}()
|
||||
encoder := wsjson.NewEncoder[*tailcfg.DERPMap](ws, websocket.MessageBinary)
|
||||
defer encoder.Close(websocket.StatusGoingAway)
|
||||
|
||||
go func(ctx context.Context) {
|
||||
// TODO(mafredri): Is this too frequent? Use separate ping disconnect timeout?
|
||||
@@ -768,7 +759,7 @@ func (api *API) derpMapUpdates(rw http.ResponseWriter, r *http.Request) {
|
||||
err := ws.Ping(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
_ = nconn.Close()
|
||||
_ = ws.Close(websocket.StatusGoingAway, "ping failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -781,9 +772,8 @@ func (api *API) derpMapUpdates(rw http.ResponseWriter, r *http.Request) {
|
||||
for {
|
||||
derpMap := api.DERPMap()
|
||||
if lastDERPMap == nil || !tailnet.CompareDERPMaps(lastDERPMap, derpMap) {
|
||||
err := json.NewEncoder(nconn).Encode(derpMap)
|
||||
err := encoder.Encode(derpMap)
|
||||
if err != nil {
|
||||
_ = nconn.Close()
|
||||
return
|
||||
}
|
||||
lastDERPMap = derpMap
|
||||
|
||||
@@ -1313,6 +1313,39 @@ func TestWorkspaceFilterManual(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Workspaces, 0)
|
||||
})
|
||||
t.Run("Owner", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
otherUser, _ := coderdtest.CreateAnotherUser(t, client, user.OrganizationID, rbac.RoleOwner())
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
// Add a non-matching workspace
|
||||
coderdtest.CreateWorkspace(t, otherUser, template.ID)
|
||||
|
||||
workspaces := []codersdk.Workspace{
|
||||
coderdtest.CreateWorkspace(t, client, template.ID),
|
||||
coderdtest.CreateWorkspace(t, client, template.ID),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
sdkUser, err := client.User(ctx, codersdk.Me)
|
||||
require.NoError(t, err)
|
||||
|
||||
// match owner name
|
||||
res, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
FilterQuery: fmt.Sprintf("owner:%s", sdkUser.Username),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Workspaces, len(workspaces))
|
||||
for _, found := range res.Workspaces {
|
||||
require.Equal(t, found.OwnerName, sdkUser.Username)
|
||||
}
|
||||
})
|
||||
t.Run("IDs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
|
||||
+175
-3
@@ -926,6 +926,23 @@ when required by your organization's security policy.`,
|
||||
Name: "Config",
|
||||
Description: `Use a YAML configuration file when your server launch become unwieldy.`,
|
||||
}
|
||||
deploymentGroupEmail = serpent.Group{
|
||||
Name: "Email",
|
||||
Description: "Configure how emails are sent.",
|
||||
YAML: "email",
|
||||
}
|
||||
deploymentGroupEmailAuth = serpent.Group{
|
||||
Name: "Email Authentication",
|
||||
Parent: &deploymentGroupEmail,
|
||||
Description: "Configure SMTP authentication options.",
|
||||
YAML: "emailAuth",
|
||||
}
|
||||
deploymentGroupEmailTLS = serpent.Group{
|
||||
Name: "Email TLS",
|
||||
Parent: &deploymentGroupEmail,
|
||||
Description: "Configure TLS for your SMTP server target.",
|
||||
YAML: "emailTLS",
|
||||
}
|
||||
deploymentGroupNotifications = serpent.Group{
|
||||
Name: "Notifications",
|
||||
YAML: "notifications",
|
||||
@@ -997,6 +1014,135 @@ when required by your organization's security policy.`,
|
||||
Group: &deploymentGroupIntrospectionLogging,
|
||||
YAML: "filter",
|
||||
}
|
||||
emailFrom := serpent.Option{
|
||||
Name: "Email: From Address",
|
||||
Description: "The sender's address to use.",
|
||||
Flag: "email-from",
|
||||
Env: "CODER_EMAIL_FROM",
|
||||
Value: &c.Notifications.SMTP.From,
|
||||
Group: &deploymentGroupEmail,
|
||||
YAML: "from",
|
||||
}
|
||||
emailSmarthost := serpent.Option{
|
||||
Name: "Email: Smarthost",
|
||||
Description: "The intermediary SMTP host through which emails are sent.",
|
||||
Flag: "email-smarthost",
|
||||
Env: "CODER_EMAIL_SMARTHOST",
|
||||
Default: "localhost:587", // To pass validation.
|
||||
Value: &c.Notifications.SMTP.Smarthost,
|
||||
Group: &deploymentGroupEmail,
|
||||
YAML: "smarthost",
|
||||
}
|
||||
emailHello := serpent.Option{
|
||||
Name: "Email: Hello",
|
||||
Description: "The hostname identifying the SMTP server.",
|
||||
Flag: "email-hello",
|
||||
Env: "CODER_EMAIL_HELLO",
|
||||
Default: "localhost",
|
||||
Value: &c.Notifications.SMTP.Hello,
|
||||
Group: &deploymentGroupEmail,
|
||||
YAML: "hello",
|
||||
}
|
||||
emailForceTLS := serpent.Option{
|
||||
Name: "Email: Force TLS",
|
||||
Description: "Force a TLS connection to the configured SMTP smarthost.",
|
||||
Flag: "email-force-tls",
|
||||
Env: "CODER_EMAIL_FORCE_TLS",
|
||||
Default: "false",
|
||||
Value: &c.Notifications.SMTP.ForceTLS,
|
||||
Group: &deploymentGroupEmail,
|
||||
YAML: "forceTLS",
|
||||
}
|
||||
emailAuthIdentity := serpent.Option{
|
||||
Name: "Email Auth: Identity",
|
||||
Description: "Identity to use with PLAIN authentication.",
|
||||
Flag: "email-auth-identity",
|
||||
Env: "CODER_EMAIL_AUTH_IDENTITY",
|
||||
Value: &c.Notifications.SMTP.Auth.Identity,
|
||||
Group: &deploymentGroupEmailAuth,
|
||||
YAML: "identity",
|
||||
}
|
||||
emailAuthUsername := serpent.Option{
|
||||
Name: "Email Auth: Username",
|
||||
Description: "Username to use with PLAIN/LOGIN authentication.",
|
||||
Flag: "email-auth-username",
|
||||
Env: "CODER_EMAIL_AUTH_USERNAME",
|
||||
Value: &c.Notifications.SMTP.Auth.Username,
|
||||
Group: &deploymentGroupEmailAuth,
|
||||
YAML: "username",
|
||||
}
|
||||
emailAuthPassword := serpent.Option{
|
||||
Name: "Email Auth: Password",
|
||||
Description: "Password to use with PLAIN/LOGIN authentication.",
|
||||
Flag: "email-auth-password",
|
||||
Env: "CODER_EMAIL_AUTH_PASSWORD",
|
||||
Annotations: serpent.Annotations{}.Mark(annotationSecretKey, "true"),
|
||||
Value: &c.Notifications.SMTP.Auth.Password,
|
||||
Group: &deploymentGroupEmailAuth,
|
||||
}
|
||||
emailAuthPasswordFile := serpent.Option{
|
||||
Name: "Email Auth: Password File",
|
||||
Description: "File from which to load password for use with PLAIN/LOGIN authentication.",
|
||||
Flag: "email-auth-password-file",
|
||||
Env: "CODER_EMAIL_AUTH_PASSWORD_FILE",
|
||||
Value: &c.Notifications.SMTP.Auth.PasswordFile,
|
||||
Group: &deploymentGroupEmailAuth,
|
||||
YAML: "passwordFile",
|
||||
}
|
||||
emailTLSStartTLS := serpent.Option{
|
||||
Name: "Email TLS: StartTLS",
|
||||
Description: "Enable STARTTLS to upgrade insecure SMTP connections using TLS.",
|
||||
Flag: "email-tls-starttls",
|
||||
Env: "CODER_EMAIL_TLS_STARTTLS",
|
||||
Value: &c.Notifications.SMTP.TLS.StartTLS,
|
||||
Group: &deploymentGroupEmailTLS,
|
||||
YAML: "startTLS",
|
||||
}
|
||||
emailTLSServerName := serpent.Option{
|
||||
Name: "Email TLS: Server Name",
|
||||
Description: "Server name to verify against the target certificate.",
|
||||
Flag: "email-tls-server-name",
|
||||
Env: "CODER_EMAIL_TLS_SERVERNAME",
|
||||
Value: &c.Notifications.SMTP.TLS.ServerName,
|
||||
Group: &deploymentGroupEmailTLS,
|
||||
YAML: "serverName",
|
||||
}
|
||||
emailTLSSkipCertVerify := serpent.Option{
|
||||
Name: "Email TLS: Skip Certificate Verification (Insecure)",
|
||||
Description: "Skip verification of the target server's certificate (insecure).",
|
||||
Flag: "email-tls-skip-verify",
|
||||
Env: "CODER_EMAIL_TLS_SKIPVERIFY",
|
||||
Value: &c.Notifications.SMTP.TLS.InsecureSkipVerify,
|
||||
Group: &deploymentGroupEmailTLS,
|
||||
YAML: "insecureSkipVerify",
|
||||
}
|
||||
emailTLSCertAuthorityFile := serpent.Option{
|
||||
Name: "Email TLS: Certificate Authority File",
|
||||
Description: "CA certificate file to use.",
|
||||
Flag: "email-tls-ca-cert-file",
|
||||
Env: "CODER_EMAIL_TLS_CACERTFILE",
|
||||
Value: &c.Notifications.SMTP.TLS.CAFile,
|
||||
Group: &deploymentGroupEmailTLS,
|
||||
YAML: "caCertFile",
|
||||
}
|
||||
emailTLSCertFile := serpent.Option{
|
||||
Name: "Email TLS: Certificate File",
|
||||
Description: "Certificate file to use.",
|
||||
Flag: "email-tls-cert-file",
|
||||
Env: "CODER_EMAIL_TLS_CERTFILE",
|
||||
Value: &c.Notifications.SMTP.TLS.CertFile,
|
||||
Group: &deploymentGroupEmailTLS,
|
||||
YAML: "certFile",
|
||||
}
|
||||
emailTLSCertKeyFile := serpent.Option{
|
||||
Name: "Email TLS: Certificate Key File",
|
||||
Description: "Certificate key file to use.",
|
||||
Flag: "email-tls-cert-key-file",
|
||||
Env: "CODER_EMAIL_TLS_CERTKEYFILE",
|
||||
Value: &c.Notifications.SMTP.TLS.KeyFile,
|
||||
Group: &deploymentGroupEmailTLS,
|
||||
YAML: "certKeyFile",
|
||||
}
|
||||
opts := serpent.OptionSet{
|
||||
{
|
||||
Name: "Access URL",
|
||||
@@ -2432,6 +2578,21 @@ Write out the current server config as YAML to stdout.`,
|
||||
YAML: "thresholdDatabase",
|
||||
Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"),
|
||||
},
|
||||
// Email options
|
||||
emailFrom,
|
||||
emailSmarthost,
|
||||
emailHello,
|
||||
emailForceTLS,
|
||||
emailAuthIdentity,
|
||||
emailAuthUsername,
|
||||
emailAuthPassword,
|
||||
emailAuthPasswordFile,
|
||||
emailTLSStartTLS,
|
||||
emailTLSServerName,
|
||||
emailTLSSkipCertVerify,
|
||||
emailTLSCertAuthorityFile,
|
||||
emailTLSCertFile,
|
||||
emailTLSCertKeyFile,
|
||||
// Notifications Options
|
||||
{
|
||||
Name: "Notifications: Method",
|
||||
@@ -2462,36 +2623,37 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.From,
|
||||
Group: &deploymentGroupNotificationsEmail,
|
||||
YAML: "from",
|
||||
UseInstead: serpent.OptionSet{emailFrom},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email: Smarthost",
|
||||
Description: "The intermediary SMTP host through which emails are sent.",
|
||||
Flag: "notifications-email-smarthost",
|
||||
Env: "CODER_NOTIFICATIONS_EMAIL_SMARTHOST",
|
||||
Default: "localhost:587", // To pass validation.
|
||||
Value: &c.Notifications.SMTP.Smarthost,
|
||||
Group: &deploymentGroupNotificationsEmail,
|
||||
YAML: "smarthost",
|
||||
UseInstead: serpent.OptionSet{emailSmarthost},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email: Hello",
|
||||
Description: "The hostname identifying the SMTP server.",
|
||||
Flag: "notifications-email-hello",
|
||||
Env: "CODER_NOTIFICATIONS_EMAIL_HELLO",
|
||||
Default: "localhost",
|
||||
Value: &c.Notifications.SMTP.Hello,
|
||||
Group: &deploymentGroupNotificationsEmail,
|
||||
YAML: "hello",
|
||||
UseInstead: serpent.OptionSet{emailHello},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email: Force TLS",
|
||||
Description: "Force a TLS connection to the configured SMTP smarthost.",
|
||||
Flag: "notifications-email-force-tls",
|
||||
Env: "CODER_NOTIFICATIONS_EMAIL_FORCE_TLS",
|
||||
Default: "false",
|
||||
Value: &c.Notifications.SMTP.ForceTLS,
|
||||
Group: &deploymentGroupNotificationsEmail,
|
||||
YAML: "forceTLS",
|
||||
UseInstead: serpent.OptionSet{emailForceTLS},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email Auth: Identity",
|
||||
@@ -2501,6 +2663,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.Auth.Identity,
|
||||
Group: &deploymentGroupNotificationsEmailAuth,
|
||||
YAML: "identity",
|
||||
UseInstead: serpent.OptionSet{emailAuthIdentity},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email Auth: Username",
|
||||
@@ -2510,6 +2673,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.Auth.Username,
|
||||
Group: &deploymentGroupNotificationsEmailAuth,
|
||||
YAML: "username",
|
||||
UseInstead: serpent.OptionSet{emailAuthUsername},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email Auth: Password",
|
||||
@@ -2519,6 +2683,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Annotations: serpent.Annotations{}.Mark(annotationSecretKey, "true"),
|
||||
Value: &c.Notifications.SMTP.Auth.Password,
|
||||
Group: &deploymentGroupNotificationsEmailAuth,
|
||||
UseInstead: serpent.OptionSet{emailAuthPassword},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email Auth: Password File",
|
||||
@@ -2528,6 +2693,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.Auth.PasswordFile,
|
||||
Group: &deploymentGroupNotificationsEmailAuth,
|
||||
YAML: "passwordFile",
|
||||
UseInstead: serpent.OptionSet{emailAuthPasswordFile},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email TLS: StartTLS",
|
||||
@@ -2537,6 +2703,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.TLS.StartTLS,
|
||||
Group: &deploymentGroupNotificationsEmailTLS,
|
||||
YAML: "startTLS",
|
||||
UseInstead: serpent.OptionSet{emailTLSStartTLS},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email TLS: Server Name",
|
||||
@@ -2546,6 +2713,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.TLS.ServerName,
|
||||
Group: &deploymentGroupNotificationsEmailTLS,
|
||||
YAML: "serverName",
|
||||
UseInstead: serpent.OptionSet{emailTLSServerName},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email TLS: Skip Certificate Verification (Insecure)",
|
||||
@@ -2555,6 +2723,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.TLS.InsecureSkipVerify,
|
||||
Group: &deploymentGroupNotificationsEmailTLS,
|
||||
YAML: "insecureSkipVerify",
|
||||
UseInstead: serpent.OptionSet{emailTLSSkipCertVerify},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email TLS: Certificate Authority File",
|
||||
@@ -2564,6 +2733,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.TLS.CAFile,
|
||||
Group: &deploymentGroupNotificationsEmailTLS,
|
||||
YAML: "caCertFile",
|
||||
UseInstead: serpent.OptionSet{emailTLSCertAuthorityFile},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email TLS: Certificate File",
|
||||
@@ -2573,6 +2743,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.TLS.CertFile,
|
||||
Group: &deploymentGroupNotificationsEmailTLS,
|
||||
YAML: "certFile",
|
||||
UseInstead: serpent.OptionSet{emailTLSCertFile},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Email TLS: Certificate Key File",
|
||||
@@ -2582,6 +2753,7 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Notifications.SMTP.TLS.KeyFile,
|
||||
Group: &deploymentGroupNotificationsEmailTLS,
|
||||
YAML: "certKeyFile",
|
||||
UseInstead: serpent.OptionSet{emailTLSCertKeyFile},
|
||||
},
|
||||
{
|
||||
Name: "Notifications: Webhook: Endpoint",
|
||||
|
||||
@@ -78,6 +78,9 @@ func TestDeploymentValues_HighlyConfigurable(t *testing.T) {
|
||||
"Provisioner Daemon Pre-shared Key (PSK)": {
|
||||
yaml: true,
|
||||
},
|
||||
"Email Auth: Password": {
|
||||
yaml: true,
|
||||
},
|
||||
"Notifications: Email Auth: Password": {
|
||||
yaml: true,
|
||||
},
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/codersdk/drpc"
|
||||
"github.com/coder/coder/v2/codersdk/wsjson"
|
||||
"github.com/coder/coder/v2/provisionerd/proto"
|
||||
"github.com/coder/coder/v2/provisionerd/runner"
|
||||
)
|
||||
@@ -145,36 +146,8 @@ func (c *Client) provisionerJobLogsAfter(ctx context.Context, path string, after
|
||||
}
|
||||
return nil, nil, ReadBodyAsError(res)
|
||||
}
|
||||
logs := make(chan ProvisionerJobLog)
|
||||
closed := make(chan struct{})
|
||||
go func() {
|
||||
defer close(closed)
|
||||
defer close(logs)
|
||||
defer conn.Close(websocket.StatusGoingAway, "")
|
||||
var log ProvisionerJobLog
|
||||
for {
|
||||
msgType, msg, err := conn.Read(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if msgType != websocket.MessageText {
|
||||
return
|
||||
}
|
||||
err = json.Unmarshal(msg, &log)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case logs <- log:
|
||||
}
|
||||
}
|
||||
}()
|
||||
return logs, closeFunc(func() error {
|
||||
<-closed
|
||||
return nil
|
||||
}), nil
|
||||
d := wsjson.NewDecoder[ProvisionerJobLog](conn, websocket.MessageText, c.logger)
|
||||
return d.Chan(), d, nil
|
||||
}
|
||||
|
||||
// ServeProvisionerDaemonRequest are the parameters to call ServeProvisionerDaemon with
|
||||
|
||||
@@ -139,6 +139,8 @@ type CreateUserRequestWithOrgs struct {
|
||||
Password string `json:"password"`
|
||||
// UserLoginType defaults to LoginTypePassword.
|
||||
UserLoginType LoginType `json:"login_type"`
|
||||
// UserStatus defaults to UserStatusDormant.
|
||||
UserStatus *UserStatus `json:"user_status"`
|
||||
// OrganizationIDs is a list of organization IDs that the user should be a member of.
|
||||
OrganizationIDs []uuid.UUID `json:"organization_ids" validate:"" format:"uuid"`
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"nhooyr.io/websocket"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/tracing"
|
||||
"github.com/coder/coder/v2/codersdk/wsjson"
|
||||
)
|
||||
|
||||
type WorkspaceAgentStatus string
|
||||
@@ -454,30 +455,6 @@ func (c *Client) WorkspaceAgentLogsAfter(ctx context.Context, agentID uuid.UUID,
|
||||
}
|
||||
return nil, nil, ReadBodyAsError(res)
|
||||
}
|
||||
logChunks := make(chan []WorkspaceAgentLog, 1)
|
||||
closed := make(chan struct{})
|
||||
ctx, wsNetConn := WebsocketNetConn(ctx, conn, websocket.MessageText)
|
||||
decoder := json.NewDecoder(wsNetConn)
|
||||
go func() {
|
||||
defer close(closed)
|
||||
defer close(logChunks)
|
||||
defer conn.Close(websocket.StatusGoingAway, "")
|
||||
for {
|
||||
var logs []WorkspaceAgentLog
|
||||
err = decoder.Decode(&logs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case logChunks <- logs:
|
||||
}
|
||||
}
|
||||
}()
|
||||
return logChunks, closeFunc(func() error {
|
||||
_ = wsNetConn.Close()
|
||||
<-closed
|
||||
return nil
|
||||
}), nil
|
||||
d := wsjson.NewDecoder[[]WorkspaceAgentLog](conn, websocket.MessageText, c.logger)
|
||||
return d.Chan(), d, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
package wsjson
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync/atomic"
|
||||
|
||||
"nhooyr.io/websocket"
|
||||
|
||||
"cdr.dev/slog"
|
||||
)
|
||||
|
||||
type Decoder[T any] struct {
|
||||
conn *websocket.Conn
|
||||
typ websocket.MessageType
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
chanCalled atomic.Bool
|
||||
logger slog.Logger
|
||||
}
|
||||
|
||||
// Chan starts the decoder reading from the websocket and returns a channel for reading the
|
||||
// resulting values. The chan T is closed if the underlying websocket is closed, or we encounter an
|
||||
// error. We also close the underlying websocket if we encounter an error reading or decoding.
|
||||
func (d *Decoder[T]) Chan() <-chan T {
|
||||
if !d.chanCalled.CompareAndSwap(false, true) {
|
||||
panic("chan called more than once")
|
||||
}
|
||||
values := make(chan T, 1)
|
||||
go func() {
|
||||
defer close(values)
|
||||
defer d.conn.Close(websocket.StatusGoingAway, "")
|
||||
for {
|
||||
// we don't use d.ctx here because it only gets canceled after closing the connection
|
||||
// and a "connection closed" type error is more clear than context canceled.
|
||||
typ, b, err := d.conn.Read(context.Background())
|
||||
if err != nil {
|
||||
// might be benign like EOF, so just log at debug
|
||||
d.logger.Debug(d.ctx, "error reading from websocket", slog.Error(err))
|
||||
return
|
||||
}
|
||||
if typ != d.typ {
|
||||
d.logger.Error(d.ctx, "websocket type mismatch while decoding")
|
||||
return
|
||||
}
|
||||
var value T
|
||||
err = json.Unmarshal(b, &value)
|
||||
if err != nil {
|
||||
d.logger.Error(d.ctx, "error unmarshalling", slog.Error(err))
|
||||
return
|
||||
}
|
||||
select {
|
||||
case values <- value:
|
||||
// OK
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return values
|
||||
}
|
||||
|
||||
// nolint: revive // complains that Encoder has the same function name
|
||||
func (d *Decoder[T]) Close() error {
|
||||
err := d.conn.Close(websocket.StatusNormalClosure, "")
|
||||
d.cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
// NewDecoder creates a JSON-over-websocket decoder for type T, which must be deserializable from
|
||||
// JSON.
|
||||
func NewDecoder[T any](conn *websocket.Conn, typ websocket.MessageType, logger slog.Logger) *Decoder[T] {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &Decoder[T]{conn: conn, ctx: ctx, cancel: cancel, typ: typ, logger: logger}
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
package wsjson
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
"nhooyr.io/websocket"
|
||||
)
|
||||
|
||||
type Encoder[T any] struct {
|
||||
conn *websocket.Conn
|
||||
typ websocket.MessageType
|
||||
}
|
||||
|
||||
func (e *Encoder[T]) Encode(v T) error {
|
||||
w, err := e.conn.Writer(context.Background(), e.typ)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get websocket writer: %w", err)
|
||||
}
|
||||
defer w.Close()
|
||||
j := json.NewEncoder(w)
|
||||
err = j.Encode(v)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("encode json: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder[T]) Close(c websocket.StatusCode) error {
|
||||
return e.conn.Close(c, "")
|
||||
}
|
||||
|
||||
// NewEncoder creates a JSON-over websocket encoder for the type T, which must be JSON-serializable.
|
||||
// You may then call Encode() to send objects over the websocket. Creating an Encoder closes the
|
||||
// websocket for reading, turning it into a unidirectional write stream of JSON-encoded objects.
|
||||
func NewEncoder[T any](conn *websocket.Conn, typ websocket.MessageType) *Encoder[T] {
|
||||
// Here we close the websocket for reading, so that the websocket library will handle pings and
|
||||
// close frames.
|
||||
_ = conn.CloseRead(context.Background())
|
||||
return &Encoder[T]{conn: conn, typ: typ}
|
||||
}
|
||||
@@ -89,34 +89,34 @@ existing one.
|
||||
|
||||
**Server Settings:**
|
||||
|
||||
| Required | CLI | Env | Type | Description | Default |
|
||||
| :------: | --------------------------------- | ------------------------------------- | ----------- | ----------------------------------------- | ------------- |
|
||||
| ✔️ | `--notifications-email-from` | `CODER_NOTIFICATIONS_EMAIL_FROM` | `string` | The sender's address to use. | |
|
||||
| ✔️ | `--notifications-email-smarthost` | `CODER_NOTIFICATIONS_EMAIL_SMARTHOST` | `host:port` | The SMTP relay to send messages through. | localhost:587 |
|
||||
| ✔️ | `--notifications-email-hello` | `CODER_NOTIFICATIONS_EMAIL_HELLO` | `string` | The hostname identifying the SMTP server. | localhost |
|
||||
| Required | CLI | Env | Type | Description | Default |
|
||||
| :------: | ------------------- | ----------------------- | ----------- | ----------------------------------------- | ------------- |
|
||||
| ✔️ | `--email-from` | `CODER_EMAIL_FROM` | `string` | The sender's address to use. | |
|
||||
| ✔️ | `--email-smarthost` | `CODER_EMAIL_SMARTHOST` | `host:port` | The SMTP relay to send messages through. | localhost:587 |
|
||||
| ✔️ | `--email-hello` | `CODER_EMAIL_HELLO` | `string` | The hostname identifying the SMTP server. | localhost |
|
||||
|
||||
**Authentication Settings:**
|
||||
|
||||
| Required | CLI | Env | Type | Description |
|
||||
| :------: | ------------------------------------------ | ---------------------------------------------- | -------- | ------------------------------------------------------------------------- |
|
||||
| - | `--notifications-email-auth-username` | `CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME` | `string` | Username to use with PLAIN/LOGIN authentication. |
|
||||
| - | `--notifications-email-auth-password` | `CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD` | `string` | Password to use with PLAIN/LOGIN authentication. |
|
||||
| - | `--notifications-email-auth-password-file` | `CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD_FILE` | `string` | File from which to load password for use with PLAIN/LOGIN authentication. |
|
||||
| - | `--notifications-email-auth-identity` | `CODER_NOTIFICATIONS_EMAIL_AUTH_IDENTITY` | `string` | Identity to use with PLAIN authentication. |
|
||||
| Required | CLI | Env | Type | Description |
|
||||
| :------: | ---------------------------- | -------------------------------- | -------- | ------------------------------------------------------------------------- |
|
||||
| - | `--email-auth-username` | `CODER_EMAIL_AUTH_USERNAME` | `string` | Username to use with PLAIN/LOGIN authentication. |
|
||||
| - | `--email-auth-password` | `CODER_EMAIL_AUTH_PASSWORD` | `string` | Password to use with PLAIN/LOGIN authentication. |
|
||||
| - | `--email-auth-password-file` | `CODER_EMAIL_AUTH_PASSWORD_FILE` | `string` | File from which to load password for use with PLAIN/LOGIN authentication. |
|
||||
| - | `--email-auth-identity` | `CODER_EMAIL_AUTH_IDENTITY` | `string` | Identity to use with PLAIN authentication. |
|
||||
|
||||
**TLS Settings:**
|
||||
|
||||
| Required | CLI | Env | Type | Description | Default |
|
||||
| :------: | ----------------------------------------- | ------------------------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| - | `--notifications-email-force-tls` | `CODER_NOTIFICATIONS_EMAIL_FORCE_TLS` | `bool` | Force a TLS connection to the configured SMTP smarthost. If port 465 is used, TLS will be forced. See https://datatracker.ietf.org/doc/html/rfc8314#section-3.3. | false |
|
||||
| - | `--notifications-email-tls-starttls` | `CODER_NOTIFICATIONS_EMAIL_TLS_STARTTLS` | `bool` | Enable STARTTLS to upgrade insecure SMTP connections using TLS. Ignored if `CODER_NOTIFICATIONS_EMAIL_FORCE_TLS` is set. | false |
|
||||
| - | `--notifications-email-tls-skip-verify` | `CODER_NOTIFICATIONS_EMAIL_TLS_SKIPVERIFY` | `bool` | Skip verification of the target server's certificate (**insecure**). | false |
|
||||
| - | `--notifications-email-tls-server-name` | `CODER_NOTIFICATIONS_EMAIL_TLS_SERVERNAME` | `string` | Server name to verify against the target certificate. | |
|
||||
| - | `--notifications-email-tls-cert-file` | `CODER_NOTIFICATIONS_EMAIL_TLS_CERTFILE` | `string` | Certificate file to use. | |
|
||||
| - | `--notifications-email-tls-cert-key-file` | `CODER_NOTIFICATIONS_EMAIL_TLS_CERTKEYFILE` | `string` | Certificate key file to use. | |
|
||||
| Required | CLI | Env | Type | Description | Default |
|
||||
| :------: | --------------------------- | ----------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| - | `--email-force-tls` | `CODER_EMAIL_FORCE_TLS` | `bool` | Force a TLS connection to the configured SMTP smarthost. If port 465 is used, TLS will be forced. See https://datatracker.ietf.org/doc/html/rfc8314#section-3.3. | false |
|
||||
| - | `--email-tls-starttls` | `CODER_EMAIL_TLS_STARTTLS` | `bool` | Enable STARTTLS to upgrade insecure SMTP connections using TLS. Ignored if `CODER_NOTIFICATIONS_EMAIL_FORCE_TLS` is set. | false |
|
||||
| - | `--email-tls-skip-verify` | `CODER_EMAIL_TLS_SKIPVERIFY` | `bool` | Skip verification of the target server's certificate (**insecure**). | false |
|
||||
| - | `--email-tls-server-name` | `CODER_EMAIL_TLS_SERVERNAME` | `string` | Server name to verify against the target certificate. | |
|
||||
| - | `--email-tls-cert-file` | `CODER_EMAIL_TLS_CERTFILE` | `string` | Certificate file to use. | |
|
||||
| - | `--email-tls-cert-key-file` | `CODER_EMAIL_TLS_CERTKEYFILE` | `string` | Certificate key file to use. | |
|
||||
|
||||
**NOTE:** you _MUST_ use `CODER_NOTIFICATIONS_EMAIL_FORCE_TLS` if your smarthost
|
||||
supports TLS on a port other than `465`.
|
||||
**NOTE:** you _MUST_ use `CODER_EMAIL_FORCE_TLS` if your smarthost supports TLS
|
||||
on a port other than `465`.
|
||||
|
||||
### Send emails using G-Suite
|
||||
|
||||
@@ -126,9 +126,9 @@ After setting the required fields above:
|
||||
account you wish to send from
|
||||
2. Set the following configuration options:
|
||||
```
|
||||
CODER_NOTIFICATIONS_EMAIL_SMARTHOST=smtp.gmail.com:465
|
||||
CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME=<user>@<domain>
|
||||
CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD="<app password created above>"
|
||||
CODER_EMAIL_SMARTHOST=smtp.gmail.com:465
|
||||
CODER_EMAIL_AUTH_USERNAME=<user>@<domain>
|
||||
CODER_EMAIL_AUTH_PASSWORD="<app password created above>"
|
||||
```
|
||||
|
||||
See
|
||||
@@ -142,10 +142,10 @@ After setting the required fields above:
|
||||
1. Setup an account on Microsoft 365 or outlook.com
|
||||
2. Set the following configuration options:
|
||||
```
|
||||
CODER_NOTIFICATIONS_EMAIL_SMARTHOST=smtp-mail.outlook.com:587
|
||||
CODER_NOTIFICATIONS_EMAIL_TLS_STARTTLS=true
|
||||
CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME=<user>@<domain>
|
||||
CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD="<account password>"
|
||||
CODER_EMAIL_SMARTHOST=smtp-mail.outlook.com:587
|
||||
CODER_EMAIL_TLS_STARTTLS=true
|
||||
CODER_EMAIL_AUTH_USERNAME=<user>@<domain>
|
||||
CODER_EMAIL_AUTH_PASSWORD="<account password>"
|
||||
```
|
||||
|
||||
See
|
||||
|
||||
+37
-49
@@ -178,7 +178,8 @@ A provisioner can run a given build job if one of the below is true:
|
||||
1. If a job has any explicit tags, it can only run on a provisioner with those
|
||||
explicit tags (the provisioner could have additional tags).
|
||||
|
||||
The external provisioner in the above example can run build jobs with tags:
|
||||
The external provisioner in the above example can run build jobs in the same
|
||||
organization with tags:
|
||||
|
||||
- `environment=on_prem`
|
||||
- `datacenter=chicago`
|
||||
@@ -186,7 +187,8 @@ The external provisioner in the above example can run build jobs with tags:
|
||||
|
||||
However, it will not pick up any build jobs that do not have either of the
|
||||
`environment` or `datacenter` tags set. It will also not pick up any build jobs
|
||||
from templates with the tag `scope=user` set.
|
||||
from templates with the tag `scope=user` set, or build jobs from templates in
|
||||
different organizations.
|
||||
|
||||
> [!NOTE] If you only run tagged provisioners, you will need to specify a set of
|
||||
> tags that matches at least one provisioner for _all_ template import jobs and
|
||||
@@ -198,34 +200,35 @@ from templates with the tag `scope=user` set.
|
||||
|
||||
This is illustrated in the below table:
|
||||
|
||||
| Provisioner Tags | Job Tags | Can Run Job? |
|
||||
| ----------------------------------------------------------------- | ---------------------------------------------------------------- | ------------ |
|
||||
| scope=organization owner= | scope=organization owner= | ✅ |
|
||||
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ✅ |
|
||||
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem | ✅ |
|
||||
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem datacenter=chicago | ✅ |
|
||||
| scope=user owner=aaa | scope=user owner=aaa | ✅ |
|
||||
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa | ✅ |
|
||||
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem | ✅ |
|
||||
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem | ✅ |
|
||||
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ |
|
||||
| scope=organization owner= | scope=organization owner= environment=on-prem | ❌ |
|
||||
| scope=organization owner= environment=on-prem | scope=organization owner= | ❌ |
|
||||
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem datacenter=chicago | ❌ |
|
||||
| scope=organization owner= environment=on-prem datacenter=new_york | scope=organization owner= environment=on-prem datacenter=chicago | ❌ |
|
||||
| scope=user owner=aaa | scope=organization owner= | ❌ |
|
||||
| scope=user owner=aaa | scope=user owner=bbb | ❌ |
|
||||
| scope=organization owner= | scope=user owner=aaa | ❌ |
|
||||
| scope=organization owner= | scope=user owner=aaa environment=on-prem | ❌ |
|
||||
| scope=user owner=aaa | scope=user owner=aaa environment=on-prem | ❌ |
|
||||
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem datacenter=chicago | ❌ |
|
||||
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=new_york | ❌ |
|
||||
| Provisioner Tags | Job Tags | Same Org | Can Run Job? |
|
||||
| ----------------------------------------------------------------- | ---------------------------------------------------------------- | -------- | ------------ |
|
||||
| scope=organization owner= | scope=organization owner= | ✅ | ✅ |
|
||||
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ✅ | ✅ |
|
||||
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem | ✅ | ✅ |
|
||||
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ✅ |
|
||||
| scope=user owner=aaa | scope=user owner=aaa | ✅ | ✅ |
|
||||
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa | ✅ | ✅ |
|
||||
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem | ✅ | ✅ |
|
||||
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem | ✅ | ✅ |
|
||||
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ | ✅ |
|
||||
| scope=organization owner= | scope=organization owner= environment=on-prem | ✅ | ❌ |
|
||||
| scope=organization owner= environment=on-prem | scope=organization owner= | ✅ | ❌ |
|
||||
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ❌ |
|
||||
| scope=organization owner= environment=on-prem datacenter=new_york | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ❌ |
|
||||
| scope=user owner=aaa | scope=organization owner= | ✅ | ❌ |
|
||||
| scope=user owner=aaa | scope=user owner=bbb | ✅ | ❌ |
|
||||
| scope=organization owner= | scope=user owner=aaa | ✅ | ❌ |
|
||||
| scope=organization owner= | scope=user owner=aaa environment=on-prem | ✅ | ❌ |
|
||||
| scope=user owner=aaa | scope=user owner=aaa environment=on-prem | ✅ | ❌ |
|
||||
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ | ❌ |
|
||||
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=new_york | ✅ | ❌ |
|
||||
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ❌ | ❌ |
|
||||
|
||||
> **Note to maintainers:** to generate this table, run the following command and
|
||||
> copy the output:
|
||||
>
|
||||
> ```
|
||||
> go test -v -count=1 ./coderd/provisionerserver/ -test.run='^TestAcquirer_MatchTags/GenTable$'
|
||||
> go test -v -count=1 ./coderd/provisionerdserver/ -test.run='^TestAcquirer_MatchTags/GenTable$'
|
||||
> ```
|
||||
|
||||
## Types of provisioners
|
||||
@@ -288,8 +291,7 @@ will use in concert with the Helm chart for deploying the Coder server.
|
||||
```sh
|
||||
coder provisioner keys create my-cool-key --org default
|
||||
# Optionally, you can specify tags for the provisioner key:
|
||||
# coder provisioner keys create my-cool-key --org default --tags location=auh kind=k8s
|
||||
```
|
||||
# coder provisioner keys create my-cool-key --org default --tag location=auh --tag kind=k8s
|
||||
|
||||
Successfully created provisioner key kubernetes-key! Save this authentication
|
||||
token, it will not be shown again.
|
||||
@@ -300,25 +302,7 @@ will use in concert with the Helm chart for deploying the Coder server.
|
||||
1. Store the key in a kubernetes secret:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic coder-provisioner-psk --from-literal=key1=`<key omitted>`
|
||||
```
|
||||
|
||||
1. Modify your Coder `values.yaml` to include
|
||||
|
||||
```yaml
|
||||
provisionerDaemon:
|
||||
keySecretName: "coder-provisioner-keys"
|
||||
keySecretKey: "key1"
|
||||
```
|
||||
|
||||
1. Redeploy Coder with the new `values.yaml` to roll out the PSK. You can omit
|
||||
`--version <your version>` to also upgrade Coder to the latest version.
|
||||
|
||||
```sh
|
||||
helm upgrade coder coder-v2/coder \
|
||||
--namespace coder \
|
||||
--version <your version> \
|
||||
--values values.yaml
|
||||
kubectl create secret generic coder-provisioner-psk --from-literal=my-cool-key=`<key omitted>`
|
||||
```
|
||||
|
||||
1. Create a `provisioner-values.yaml` file for the provisioner daemons Helm
|
||||
@@ -331,13 +315,17 @@ will use in concert with the Helm chart for deploying the Coder server.
|
||||
value: "https://coder.example.com"
|
||||
replicaCount: 10
|
||||
provisionerDaemon:
|
||||
# NOTE: in older versions of the Helm chart (2.17.0 and below), it is required to set this to an empty string.
|
||||
pskSecretName: ""
|
||||
keySecretName: "coder-provisioner-keys"
|
||||
keySecretKey: "key1"
|
||||
keySecretKey: "my-cool-key"
|
||||
```
|
||||
|
||||
This example creates a deployment of 10 provisioner daemons (for 10
|
||||
concurrent builds) with the listed tags. For generic provisioners, remove the
|
||||
tags.
|
||||
concurrent builds) authenticating using the above key. The daemons will
|
||||
authenticate using the provisioner key created in the previous step and
|
||||
acquire jobs matching the tags specified when the provisioner key was
|
||||
created. The set of tags is inferred automatically from the provisioner key.
|
||||
|
||||
> Refer to the
|
||||
> [values.yaml](https://github.com/coder/coder/blob/main/helm/provisioner/values.yaml)
|
||||
|
||||
@@ -154,3 +154,17 @@ the top of the script to exit on error.
|
||||
|
||||
> **Note:** If you aren't seeing any logs, check that the `dir` directive points
|
||||
> to a valid directory in the file system.
|
||||
|
||||
## Slow workspace startup times
|
||||
|
||||
If your workspaces are taking longer to start than expected, or longer than
|
||||
desired, you can diagnose which steps have the highest impact in the workspace
|
||||
build timings UI (available in v2.17 and beyond). Admins can can
|
||||
programmatically pull startup times for individual workspace builds using our
|
||||
[build timings API endpoint](../../reference/api/builds.md#get-workspace-build-timings-by-id).
|
||||
|
||||
See our
|
||||
[guide on optimizing workspace build times](../../tutorials/best-practices/speed-up-templates.md)
|
||||
to optimize your templates based on this data.
|
||||
|
||||

|
||||
|
||||
@@ -31,6 +31,49 @@ Roles determine which actions users can take within the platform.
|
||||
A user may have one or more roles. All users have an implicit Member role that
|
||||
may use personal workspaces.
|
||||
|
||||
## Custom Roles (Premium) (Beta)
|
||||
|
||||
Starting in v2.16.0, Premium Coder deployments can configure custom roles on the
|
||||
[Organization](./organizations.md) level. You can create and assign custom roles
|
||||
in the dashboard under **Organizations** -> **My Organization** -> **Roles**.
|
||||
|
||||
> Note: This requires a Premium license.
|
||||
> [Contact your account team](https://coder.com/contact) for more details.
|
||||
|
||||

|
||||
|
||||
### Example roles
|
||||
|
||||
- The `Banking Compliance Auditor` custom role cannot create workspaces, but can
|
||||
read template source code and view audit logs
|
||||
- The `Organization Lead` role can access user workspaces for troubleshooting
|
||||
purposes, but cannot edit templates
|
||||
- The `Platform Member` role cannot edit or create workspaces as they are
|
||||
created via a third-party system
|
||||
|
||||
Custom roles can also be applied to
|
||||
[headless user accounts](./headless-auth.md):
|
||||
|
||||
- A `Health Check` role can view deployment status but cannot create workspaces,
|
||||
manage templates, or view users
|
||||
- A `CI` role can update manage templates but cannot create workspaces or view
|
||||
users
|
||||
|
||||
### Creating custom roles
|
||||
|
||||
Clicking "Create custom role" opens a UI to select the desired permissions for a
|
||||
given persona.
|
||||
|
||||

|
||||
|
||||
From there, you can assign the custom role to any user in the organization under
|
||||
the **Users** settings in the dashboard.
|
||||
|
||||

|
||||
|
||||
Note that these permissions only apply to the scope of an
|
||||
[organization](./organizations.md), not across the deployment.
|
||||
|
||||
### Security notes
|
||||
|
||||
A malicious Template Admin could write a template that executes commands on the
|
||||
|
||||
@@ -143,7 +143,12 @@ Confirm the user activation by typing **yes** and pressing **enter**.
|
||||
|
||||
## Reset a password
|
||||
|
||||
To reset a user's via the web UI:
|
||||
As of 2.17.0, users can reset their password independently on the login screen
|
||||
by clicking "Forgot Password." This feature requires
|
||||
[email notifications](../monitoring/notifications/index.md#smtp-email) to be
|
||||
configured on the deployment.
|
||||
|
||||
To reset a user's password as an administrator via the web UI:
|
||||
|
||||
1. Go to **Users**.
|
||||
2. Find the user whose password you want to reset, click the vertical ellipsis
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 141 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 62 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 82 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 105 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 110 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 3.3 MiB |
+29
-17
@@ -1,6 +1,6 @@
|
||||
# Install Coder on Kubernetes
|
||||
|
||||
You can install Coder on Kubernetes using Helm. We run on most Kubernetes
|
||||
You can install Coder on Kubernetes (K8s) using Helm. We run on most Kubernetes
|
||||
distributions, including [OpenShift](./openshift.md).
|
||||
|
||||
## Requirements
|
||||
@@ -121,27 +121,27 @@ coder:
|
||||
We support two release channels: mainline and stable - read the
|
||||
[Releases](./releases.md) page to learn more about which best suits your team.
|
||||
|
||||
For the **mainline** Coder release:
|
||||
- **Mainline** Coder release:
|
||||
|
||||
<!-- autoversion(mainline): "--version [version]" -->
|
||||
<!-- autoversion(mainline): "--version [version]" -->
|
||||
|
||||
```shell
|
||||
helm install coder coder-v2/coder \
|
||||
--namespace coder \
|
||||
--values values.yaml \
|
||||
--version 2.15.0
|
||||
```
|
||||
```shell
|
||||
helm install coder coder-v2/coder \
|
||||
--namespace coder \
|
||||
--values values.yaml \
|
||||
--version 2.15.0
|
||||
```
|
||||
|
||||
For the **stable** Coder release:
|
||||
- **Stable** Coder release:
|
||||
|
||||
<!-- autoversion(stable): "--version [version]" -->
|
||||
<!-- autoversion(stable): "--version [version]" -->
|
||||
|
||||
```shell
|
||||
helm install coder coder-v2/coder \
|
||||
--namespace coder \
|
||||
--values values.yaml \
|
||||
--version 2.15.1
|
||||
```
|
||||
```shell
|
||||
helm install coder coder-v2/coder \
|
||||
--namespace coder \
|
||||
--values values.yaml \
|
||||
--version 2.15.1
|
||||
```
|
||||
|
||||
You can watch Coder start up by running `kubectl get pods -n coder`. Once Coder
|
||||
has started, the `coder-*` pods should enter the `Running` state.
|
||||
@@ -167,6 +167,18 @@ helm upgrade coder coder-v2/coder \
|
||||
-f values.yaml
|
||||
```
|
||||
|
||||
## Coder Observability Chart
|
||||
|
||||
Use the [Observability Helm chart](https://github.com/coder/observability) for a
|
||||
pre-built set of dashboards to monitor your control plane over time. It includes
|
||||
Grafana, Prometheus, Loki, and Alert Manager out-of-the-box, and can be deployed
|
||||
on your existing Grafana instance.
|
||||
|
||||
We recommend that all administrators deploying on Kubernetes set the
|
||||
observability bundle up with the control plane from the start. For installation
|
||||
instructions, visit the
|
||||
[observability repository](https://github.com/coder/observability?tab=readme-ov-file#installation).
|
||||
|
||||
## Kubernetes Security Reference
|
||||
|
||||
Below are common requirements we see from our enterprise customers when
|
||||
|
||||
@@ -723,6 +723,18 @@
|
||||
"title": "FAQs",
|
||||
"description": "Miscellaneous FAQs from our community",
|
||||
"path": "./tutorials/faqs.md"
|
||||
},
|
||||
{
|
||||
"title": "Best practices",
|
||||
"description": "Guides to help you make the most of your Coder experience",
|
||||
"path": "./tutorials/best-practices/index.md",
|
||||
"children": [
|
||||
{
|
||||
"title": "Speed up your workspaces",
|
||||
"description": "Speed up your Coder templates and workspaces",
|
||||
"path": "./tutorials/best-practices/speed-up-templates.md"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
Generated
+10
-8
@@ -1342,20 +1342,22 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
|
||||
"name": "string",
|
||||
"organization_ids": ["497f6eca-6276-4993-bfeb-53cbbbba6f08"],
|
||||
"password": "string",
|
||||
"user_status": "active",
|
||||
"username": "string"
|
||||
}
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
| Name | Type | Required | Restrictions | Description |
|
||||
| ------------------ | ---------------------------------------- | -------- | ------------ | ----------------------------------------------------------------------------------- |
|
||||
| `email` | string | true | | |
|
||||
| `login_type` | [codersdk.LoginType](#codersdklogintype) | false | | Login type defaults to LoginTypePassword. |
|
||||
| `name` | string | false | | |
|
||||
| `organization_ids` | array of string | false | | Organization ids is a list of organization IDs that the user should be a member of. |
|
||||
| `password` | string | false | | |
|
||||
| `username` | string | true | | |
|
||||
| Name | Type | Required | Restrictions | Description |
|
||||
| ------------------ | ------------------------------------------ | -------- | ------------ | ----------------------------------------------------------------------------------- |
|
||||
| `email` | string | true | | |
|
||||
| `login_type` | [codersdk.LoginType](#codersdklogintype) | false | | Login type defaults to LoginTypePassword. |
|
||||
| `name` | string | false | | |
|
||||
| `organization_ids` | array of string | false | | Organization ids is a list of organization IDs that the user should be a member of. |
|
||||
| `password` | string | false | | |
|
||||
| `user_status` | [codersdk.UserStatus](#codersdkuserstatus) | false | | User status defaults to UserStatusDormant. |
|
||||
| `username` | string | true | | |
|
||||
|
||||
## codersdk.CreateWorkspaceBuildRequest
|
||||
|
||||
|
||||
Generated
+1
@@ -86,6 +86,7 @@ curl -X POST http://coder-server:8080/api/v2/users \
|
||||
"name": "string",
|
||||
"organization_ids": ["497f6eca-6276-4993-bfeb-53cbbbba6f08"],
|
||||
"password": "string",
|
||||
"user_status": "active",
|
||||
"username": "string"
|
||||
}
|
||||
```
|
||||
|
||||
Generated
+142
-3
@@ -1249,6 +1249,148 @@ Refresh interval for healthchecks.
|
||||
|
||||
The threshold for the database health check. If the median latency of the database exceeds this threshold over 5 attempts, the database is considered unhealthy. The default value is 15ms.
|
||||
|
||||
### --email-from
|
||||
|
||||
| | |
|
||||
| ----------- | ------------------------------ |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_FROM</code> |
|
||||
| YAML | <code>email.from</code> |
|
||||
|
||||
The sender's address to use.
|
||||
|
||||
### --email-smarthost
|
||||
|
||||
| | |
|
||||
| ----------- | ----------------------------------- |
|
||||
| Type | <code>host:port</code> |
|
||||
| Environment | <code>$CODER_EMAIL_SMARTHOST</code> |
|
||||
| YAML | <code>email.smarthost</code> |
|
||||
| Default | <code>localhost:587</code> |
|
||||
|
||||
The intermediary SMTP host through which emails are sent.
|
||||
|
||||
### --email-hello
|
||||
|
||||
| | |
|
||||
| ----------- | ------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_HELLO</code> |
|
||||
| YAML | <code>email.hello</code> |
|
||||
| Default | <code>localhost</code> |
|
||||
|
||||
The hostname identifying the SMTP server.
|
||||
|
||||
### --email-force-tls
|
||||
|
||||
| | |
|
||||
| ----------- | ----------------------------------- |
|
||||
| Type | <code>bool</code> |
|
||||
| Environment | <code>$CODER_EMAIL_FORCE_TLS</code> |
|
||||
| YAML | <code>email.forceTLS</code> |
|
||||
| Default | <code>false</code> |
|
||||
|
||||
Force a TLS connection to the configured SMTP smarthost.
|
||||
|
||||
### --email-auth-identity
|
||||
|
||||
| | |
|
||||
| ----------- | --------------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_AUTH_IDENTITY</code> |
|
||||
| YAML | <code>email.emailAuth.identity</code> |
|
||||
|
||||
Identity to use with PLAIN authentication.
|
||||
|
||||
### --email-auth-username
|
||||
|
||||
| | |
|
||||
| ----------- | --------------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_AUTH_USERNAME</code> |
|
||||
| YAML | <code>email.emailAuth.username</code> |
|
||||
|
||||
Username to use with PLAIN/LOGIN authentication.
|
||||
|
||||
### --email-auth-password
|
||||
|
||||
| | |
|
||||
| ----------- | --------------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_AUTH_PASSWORD</code> |
|
||||
|
||||
Password to use with PLAIN/LOGIN authentication.
|
||||
|
||||
### --email-auth-password-file
|
||||
|
||||
| | |
|
||||
| ----------- | -------------------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_AUTH_PASSWORD_FILE</code> |
|
||||
| YAML | <code>email.emailAuth.passwordFile</code> |
|
||||
|
||||
File from which to load password for use with PLAIN/LOGIN authentication.
|
||||
|
||||
### --email-tls-starttls
|
||||
|
||||
| | |
|
||||
| ----------- | -------------------------------------- |
|
||||
| Type | <code>bool</code> |
|
||||
| Environment | <code>$CODER_EMAIL_TLS_STARTTLS</code> |
|
||||
| YAML | <code>email.emailTLS.startTLS</code> |
|
||||
|
||||
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
|
||||
|
||||
### --email-tls-server-name
|
||||
|
||||
| | |
|
||||
| ----------- | ---------------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_TLS_SERVERNAME</code> |
|
||||
| YAML | <code>email.emailTLS.serverName</code> |
|
||||
|
||||
Server name to verify against the target certificate.
|
||||
|
||||
### --email-tls-skip-verify
|
||||
|
||||
| | |
|
||||
| ----------- | ---------------------------------------------- |
|
||||
| Type | <code>bool</code> |
|
||||
| Environment | <code>$CODER_EMAIL_TLS_SKIPVERIFY</code> |
|
||||
| YAML | <code>email.emailTLS.insecureSkipVerify</code> |
|
||||
|
||||
Skip verification of the target server's certificate (insecure).
|
||||
|
||||
### --email-tls-ca-cert-file
|
||||
|
||||
| | |
|
||||
| ----------- | ---------------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_TLS_CACERTFILE</code> |
|
||||
| YAML | <code>email.emailTLS.caCertFile</code> |
|
||||
|
||||
CA certificate file to use.
|
||||
|
||||
### --email-tls-cert-file
|
||||
|
||||
| | |
|
||||
| ----------- | -------------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_TLS_CERTFILE</code> |
|
||||
| YAML | <code>email.emailTLS.certFile</code> |
|
||||
|
||||
Certificate file to use.
|
||||
|
||||
### --email-tls-cert-key-file
|
||||
|
||||
| | |
|
||||
| ----------- | ----------------------------------------- |
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_EMAIL_TLS_CERTKEYFILE</code> |
|
||||
| YAML | <code>email.emailTLS.certKeyFile</code> |
|
||||
|
||||
Certificate key file to use.
|
||||
|
||||
### --notifications-method
|
||||
|
||||
| | |
|
||||
@@ -1288,7 +1430,6 @@ The sender's address to use.
|
||||
| Type | <code>host:port</code> |
|
||||
| Environment | <code>$CODER_NOTIFICATIONS_EMAIL_SMARTHOST</code> |
|
||||
| YAML | <code>notifications.email.smarthost</code> |
|
||||
| Default | <code>localhost:587</code> |
|
||||
|
||||
The intermediary SMTP host through which emails are sent.
|
||||
|
||||
@@ -1299,7 +1440,6 @@ The intermediary SMTP host through which emails are sent.
|
||||
| Type | <code>string</code> |
|
||||
| Environment | <code>$CODER_NOTIFICATIONS_EMAIL_HELLO</code> |
|
||||
| YAML | <code>notifications.email.hello</code> |
|
||||
| Default | <code>localhost</code> |
|
||||
|
||||
The hostname identifying the SMTP server.
|
||||
|
||||
@@ -1310,7 +1450,6 @@ The hostname identifying the SMTP server.
|
||||
| Type | <code>bool</code> |
|
||||
| Environment | <code>$CODER_NOTIFICATIONS_EMAIL_FORCE_TLS</code> |
|
||||
| YAML | <code>notifications.email.forceTLS</code> |
|
||||
| Default | <code>false</code> |
|
||||
|
||||
Force a TLS connection to the configured SMTP smarthost.
|
||||
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
# Best practices
|
||||
|
||||
Guides to help you make the most of your Coder experience.
|
||||
|
||||
<children></children>
|
||||
@@ -0,0 +1,143 @@
|
||||
# Speed up your Coder templates and workspaces
|
||||
|
||||
October 31, 2024
|
||||
|
||||
---
|
||||
|
||||
If it takes your workspace a long time to start, find out why and make some
|
||||
changes to your Coder templates to help speed things up.
|
||||
|
||||
## Monitoring
|
||||
|
||||
You can monitor [Coder logs](../../admin/monitoring/logs.md) through the
|
||||
system-native tools on your deployment platform, or stream logs to tools like
|
||||
Splunk, Datadog, Grafana Loki, and others.
|
||||
|
||||
### Workspace build timeline
|
||||
|
||||
Use the **Build timeline** to monitor the time it takes to start specific
|
||||
workspaces. Identify long scripts, resources, and other things you can
|
||||
potentially optimize within the template.
|
||||
|
||||

|
||||
|
||||
Adjust this request to match your Coder access URL and workspace:
|
||||
|
||||
```shell
|
||||
curl -X GET https://coder.example.com/api/v2/workspacebuilds/{workspacebuild}/timings \
|
||||
-H 'Accept: application/json' \
|
||||
-H 'Coder-Session-Token: API_KEY'
|
||||
```
|
||||
|
||||
Visit the
|
||||
[API documentation](../../reference/api/builds.md#get-workspace-build-timings-by-id)
|
||||
for more information.
|
||||
|
||||
### Coder Observability Chart
|
||||
|
||||
Use the [Observability Helm chart](https://github.com/coder/observability) for a
|
||||
pre-built set of dashboards to monitor your control plane over time. It includes
|
||||
Grafana, Prometheus, Loki, and Alert Manager out-of-the-box, and can be deployed
|
||||
on your existing Grafana instance.
|
||||
|
||||
We recommend that all administrators deploying on Kubernetes or on an existing
|
||||
Prometheus or Grafana stack set the observability bundle up with the control
|
||||
plane from the start. For installation instructions, visit the
|
||||
[observability repository](https://github.com/coder/observability?tab=readme-ov-file#installation),
|
||||
or our [Kubernetes installation guide](../../install/kubernetes.md).
|
||||
|
||||
### Enable Prometheus metrics for Coder
|
||||
|
||||
[Prometheus.io](https://prometheus.io/docs/introduction/overview/#what-is-prometheus)
|
||||
is included as part of the [observability chart](#coder-observability-chart). It
|
||||
offers a variety of
|
||||
[available metrics](../../admin/integrations/prometheus.md#available-metrics),
|
||||
such as `coderd_provisionerd_job_timings_seconds` and
|
||||
`coderd_agentstats_startup_script_seconds`, which measure how long the workspace
|
||||
takes to provision and how long the startup script takes.
|
||||
|
||||
You can
|
||||
[install it separately](https://prometheus.io/docs/prometheus/latest/getting_started/)
|
||||
if you prefer.
|
||||
|
||||
## Provisioners
|
||||
|
||||
`coder server` defaults to three provisioner daemons. Each provisioner daemon
|
||||
can handle one single job, such as start, stop, or delete at a time and can be
|
||||
resource intensive. When all provisioners are busy, workspaces enter a "pending"
|
||||
state until a provisioner becomes available.
|
||||
|
||||
### Increase provisioner daemons
|
||||
|
||||
Provisioners are queue-based to reduce unpredictable load to the Coder server.
|
||||
However, they can be scaled up to allow more concurrent provisioners. You risk
|
||||
overloading the central Coder server if you use too many built-in provisioners,
|
||||
so we recommend a maximum of five provisioners. For more than five provisioners,
|
||||
we recommend that you move to
|
||||
[external provisioners](../../admin/provisioners.md).
|
||||
|
||||
If you can’t move to external provisioners, use the `provisioner-daemons` flag
|
||||
to increase the number of provisioner daemons to five:
|
||||
|
||||
```shell
|
||||
coder server --provisioner-daemons=5
|
||||
```
|
||||
|
||||
Visit the
|
||||
[CLI documentation](../../reference/cli/server.md#--provisioner-daemons) for
|
||||
more information about increasing provisioner daemons, configuring external
|
||||
provisioners, and other options.
|
||||
|
||||
### Adjust provisioner CPU/memory
|
||||
|
||||
We recommend that you deploy Coder to its own respective Kubernetes cluster,
|
||||
separate from production applications. Keep in mind that Coder runs development
|
||||
workloads, so the cluster should be deployed as such, without production-level
|
||||
configurations.
|
||||
|
||||
Adjust the CPU and memory values as shown in
|
||||
[Helm provisioner values.yaml](https://github.com/coder/coder/blob/main/helm/provisioner/values.yaml#L134-L141):
|
||||
|
||||
```yaml
|
||||
…
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.25"
|
||||
memory: "1Gi"
|
||||
requests:
|
||||
cpu: "0.25"
|
||||
memory: "1Gi"
|
||||
…
|
||||
```
|
||||
|
||||
Visit the
|
||||
[validated architecture documentation](../../admin/infrastructure/validated-architectures/index.md#workspace-nodes)
|
||||
for more information.
|
||||
|
||||
## Set up Terraform provider caching
|
||||
|
||||
By default, Coder downloads each Terraform provider when a workspace starts.
|
||||
This can create unnecessary network and disk I/O.
|
||||
|
||||
`terraform init` generates a `.terraform.lock.hcl` which instructs Coder
|
||||
provisioners to cache specific versions of your providers.
|
||||
|
||||
To use `terraform init` to cache providers:
|
||||
|
||||
1. Pull the templates to your local device:
|
||||
|
||||
```shell
|
||||
coder templates pull
|
||||
```
|
||||
|
||||
1. Run `terraform init` to initialize the directory:
|
||||
|
||||
```shell
|
||||
terraform init
|
||||
```
|
||||
|
||||
1. Push the templates back to your Coder deployment:
|
||||
|
||||
```shell
|
||||
coder templates push
|
||||
```
|
||||
@@ -58,3 +58,12 @@ requires just a few lines of Terraform in your template, see the documentation
|
||||
on our registry for setup.
|
||||
|
||||

|
||||
|
||||
## Amazon DCV Windows
|
||||
|
||||
Our [Amazon DCV Windows](https://registry.coder.com/modules/amazon-dcv-windows)
|
||||
module adds a one-click button to open an Amazon DCV session in the browser.
|
||||
This requires just a few lines of Terraform in your template, see the
|
||||
documentation on our registry for setup.
|
||||
|
||||

|
||||
|
||||
@@ -109,6 +109,19 @@ your template's Terraform file and the target resources on your infrastructure.
|
||||
Unhealthy workspaces are usually caused by a misconfiguration in the agent or
|
||||
workspace startup scripts.
|
||||
|
||||
## Workspace build times
|
||||
|
||||
After a successful build, you can see a timing breakdown of the workspace
|
||||
startup process from the dashboard (starting in v2.17). We capture and display
|
||||
both time taken to provision the workspace's compute and agent startup steps.
|
||||
These include any
|
||||
[`coder_script`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/script)s
|
||||
such as [dotfiles](./workspace-dotfiles.md) or
|
||||
[`coder_app`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/app)
|
||||
startups.
|
||||
|
||||

|
||||
|
||||
### Next steps
|
||||
|
||||
- [Connecting to your workspace](./index.md)
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/coder/coder/v2/enterprise/dbcrypt"
|
||||
"github.com/coder/coder/v2/enterprise/trialer"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/quartz"
|
||||
"github.com/coder/serpent"
|
||||
|
||||
agplcoderd "github.com/coder/coder/v2/coderd"
|
||||
@@ -95,7 +96,7 @@ func (r *RootCmd) Server(_ func()) *serpent.Command {
|
||||
DefaultQuietHoursSchedule: options.DeploymentValues.UserQuietHoursSchedule.DefaultSchedule.Value(),
|
||||
ProvisionerDaemonPSK: options.DeploymentValues.Provisioner.DaemonPSK.Value(),
|
||||
|
||||
CheckInactiveUsersCancelFunc: dormancy.CheckInactiveUsers(ctx, options.Logger, options.Database),
|
||||
CheckInactiveUsersCancelFunc: dormancy.CheckInactiveUsers(ctx, options.Logger, quartz.NewReal(), options.Database, options.Auditor),
|
||||
}
|
||||
|
||||
if encKeys := options.DeploymentValues.ExternalTokenEncryptionKeys.Value(); len(encKeys) != 0 {
|
||||
|
||||
+69
-3
@@ -107,6 +107,58 @@ Use a YAML configuration file when your server launch become unwieldy.
|
||||
|
||||
Write out the current server config as YAML to stdout.
|
||||
|
||||
EMAIL OPTIONS:
|
||||
Configure how emails are sent.
|
||||
|
||||
--email-force-tls bool, $CODER_EMAIL_FORCE_TLS (default: false)
|
||||
Force a TLS connection to the configured SMTP smarthost.
|
||||
|
||||
--email-from string, $CODER_EMAIL_FROM
|
||||
The sender's address to use.
|
||||
|
||||
--email-hello string, $CODER_EMAIL_HELLO (default: localhost)
|
||||
The hostname identifying the SMTP server.
|
||||
|
||||
--email-smarthost host:port, $CODER_EMAIL_SMARTHOST (default: localhost:587)
|
||||
The intermediary SMTP host through which emails are sent.
|
||||
|
||||
EMAIL / EMAIL AUTHENTICATION OPTIONS:
|
||||
Configure SMTP authentication options.
|
||||
|
||||
--email-auth-identity string, $CODER_EMAIL_AUTH_IDENTITY
|
||||
Identity to use with PLAIN authentication.
|
||||
|
||||
--email-auth-password string, $CODER_EMAIL_AUTH_PASSWORD
|
||||
Password to use with PLAIN/LOGIN authentication.
|
||||
|
||||
--email-auth-password-file string, $CODER_EMAIL_AUTH_PASSWORD_FILE
|
||||
File from which to load password for use with PLAIN/LOGIN
|
||||
authentication.
|
||||
|
||||
--email-auth-username string, $CODER_EMAIL_AUTH_USERNAME
|
||||
Username to use with PLAIN/LOGIN authentication.
|
||||
|
||||
EMAIL / EMAIL TLS OPTIONS:
|
||||
Configure TLS for your SMTP server target.
|
||||
|
||||
--email-tls-ca-cert-file string, $CODER_EMAIL_TLS_CACERTFILE
|
||||
CA certificate file to use.
|
||||
|
||||
--email-tls-cert-file string, $CODER_EMAIL_TLS_CERTFILE
|
||||
Certificate file to use.
|
||||
|
||||
--email-tls-cert-key-file string, $CODER_EMAIL_TLS_CERTKEYFILE
|
||||
Certificate key file to use.
|
||||
|
||||
--email-tls-server-name string, $CODER_EMAIL_TLS_SERVERNAME
|
||||
Server name to verify against the target certificate.
|
||||
|
||||
--email-tls-skip-verify bool, $CODER_EMAIL_TLS_SKIPVERIFY
|
||||
Skip verification of the target server's certificate (insecure).
|
||||
|
||||
--email-tls-starttls bool, $CODER_EMAIL_TLS_STARTTLS
|
||||
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
|
||||
|
||||
INTROSPECTION / HEALTH CHECK OPTIONS:
|
||||
--health-check-refresh duration, $CODER_HEALTH_CHECK_REFRESH (default: 10m0s)
|
||||
Refresh interval for healthchecks.
|
||||
@@ -350,54 +402,68 @@ Configure how notifications are processed and delivered.
|
||||
NOTIFICATIONS / EMAIL OPTIONS:
|
||||
Configure how email notifications are sent.
|
||||
|
||||
--notifications-email-force-tls bool, $CODER_NOTIFICATIONS_EMAIL_FORCE_TLS (default: false)
|
||||
--notifications-email-force-tls bool, $CODER_NOTIFICATIONS_EMAIL_FORCE_TLS
|
||||
Force a TLS connection to the configured SMTP smarthost.
|
||||
DEPRECATED: Use --email-force-tls instead.
|
||||
|
||||
--notifications-email-from string, $CODER_NOTIFICATIONS_EMAIL_FROM
|
||||
The sender's address to use.
|
||||
DEPRECATED: Use --email-from instead.
|
||||
|
||||
--notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO (default: localhost)
|
||||
--notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO
|
||||
The hostname identifying the SMTP server.
|
||||
DEPRECATED: Use --email-hello instead.
|
||||
|
||||
--notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST (default: localhost:587)
|
||||
--notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST
|
||||
The intermediary SMTP host through which emails are sent.
|
||||
DEPRECATED: Use --email-smarthost instead.
|
||||
|
||||
NOTIFICATIONS / EMAIL / EMAIL AUTHENTICATION OPTIONS:
|
||||
Configure SMTP authentication options.
|
||||
|
||||
--notifications-email-auth-identity string, $CODER_NOTIFICATIONS_EMAIL_AUTH_IDENTITY
|
||||
Identity to use with PLAIN authentication.
|
||||
DEPRECATED: Use --email-auth-identity instead.
|
||||
|
||||
--notifications-email-auth-password string, $CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD
|
||||
Password to use with PLAIN/LOGIN authentication.
|
||||
DEPRECATED: Use --email-auth-password instead.
|
||||
|
||||
--notifications-email-auth-password-file string, $CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD_FILE
|
||||
File from which to load password for use with PLAIN/LOGIN
|
||||
authentication.
|
||||
DEPRECATED: Use --email-auth-password-file instead.
|
||||
|
||||
--notifications-email-auth-username string, $CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME
|
||||
Username to use with PLAIN/LOGIN authentication.
|
||||
DEPRECATED: Use --email-auth-username instead.
|
||||
|
||||
NOTIFICATIONS / EMAIL / EMAIL TLS OPTIONS:
|
||||
Configure TLS for your SMTP server target.
|
||||
|
||||
--notifications-email-tls-ca-cert-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CACERTFILE
|
||||
CA certificate file to use.
|
||||
DEPRECATED: Use --email-tls-ca-cert-file instead.
|
||||
|
||||
--notifications-email-tls-cert-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CERTFILE
|
||||
Certificate file to use.
|
||||
DEPRECATED: Use --email-tls-cert-file instead.
|
||||
|
||||
--notifications-email-tls-cert-key-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CERTKEYFILE
|
||||
Certificate key file to use.
|
||||
DEPRECATED: Use --email-tls-cert-key-file instead.
|
||||
|
||||
--notifications-email-tls-server-name string, $CODER_NOTIFICATIONS_EMAIL_TLS_SERVERNAME
|
||||
Server name to verify against the target certificate.
|
||||
DEPRECATED: Use --email-tls-server-name instead.
|
||||
|
||||
--notifications-email-tls-skip-verify bool, $CODER_NOTIFICATIONS_EMAIL_TLS_SKIPVERIFY
|
||||
Skip verification of the target server's certificate (insecure).
|
||||
DEPRECATED: Use --email-tls-skip-verify instead.
|
||||
|
||||
--notifications-email-tls-starttls bool, $CODER_NOTIFICATIONS_EMAIL_TLS_STARTTLS
|
||||
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
|
||||
DEPRECATED: Use --email-tls-starttls instead.
|
||||
|
||||
NOTIFICATIONS / WEBHOOK OPTIONS:
|
||||
--notifications-webhook-endpoint url, $CODER_NOTIFICATIONS_WEBHOOK_ENDPOINT
|
||||
|
||||
@@ -172,6 +172,7 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
|
||||
}
|
||||
apiKeyMiddleware := httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: options.Database,
|
||||
ActivateDormantUser: coderd.ActivateDormantUser(options.Logger, &api.AGPL.Auditor, options.Database),
|
||||
OAuth2Configs: oauthConfigs,
|
||||
RedirectToLogin: false,
|
||||
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
|
||||
|
||||
@@ -3,14 +3,17 @@ package dormancy
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,50 +25,49 @@ const (
|
||||
|
||||
// CheckInactiveUsers function updates status of inactive users from active to dormant
|
||||
// using default parameters.
|
||||
func CheckInactiveUsers(ctx context.Context, logger slog.Logger, db database.Store) func() {
|
||||
return CheckInactiveUsersWithOptions(ctx, logger, db, jobInterval, accountDormancyPeriod)
|
||||
func CheckInactiveUsers(ctx context.Context, logger slog.Logger, clk quartz.Clock, db database.Store, auditor audit.Auditor) func() {
|
||||
return CheckInactiveUsersWithOptions(ctx, logger, clk, db, auditor, jobInterval, accountDormancyPeriod)
|
||||
}
|
||||
|
||||
// CheckInactiveUsersWithOptions function updates status of inactive users from active to dormant
|
||||
// using provided parameters.
|
||||
func CheckInactiveUsersWithOptions(ctx context.Context, logger slog.Logger, db database.Store, checkInterval, dormancyPeriod time.Duration) func() {
|
||||
func CheckInactiveUsersWithOptions(ctx context.Context, logger slog.Logger, clk quartz.Clock, db database.Store, auditor audit.Auditor, checkInterval, dormancyPeriod time.Duration) func() {
|
||||
logger = logger.Named("dormancy")
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(ctx)
|
||||
done := make(chan struct{})
|
||||
ticker := time.NewTicker(checkInterval)
|
||||
go func() {
|
||||
defer close(done)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
tf := clk.TickerFunc(ctx, checkInterval, func() error {
|
||||
startTime := time.Now()
|
||||
lastSeenAfter := dbtime.Now().Add(-dormancyPeriod)
|
||||
logger.Debug(ctx, "check inactive user accounts", slog.F("dormancy_period", dormancyPeriod), slog.F("last_seen_after", lastSeenAfter))
|
||||
|
||||
startTime := time.Now()
|
||||
lastSeenAfter := dbtime.Now().Add(-dormancyPeriod)
|
||||
logger.Debug(ctx, "check inactive user accounts", slog.F("dormancy_period", dormancyPeriod), slog.F("last_seen_after", lastSeenAfter))
|
||||
|
||||
updatedUsers, err := db.UpdateInactiveUsersToDormant(ctx, database.UpdateInactiveUsersToDormantParams{
|
||||
LastSeenAfter: lastSeenAfter,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
})
|
||||
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
|
||||
logger.Error(ctx, "can't mark inactive users as dormant", slog.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, u := range updatedUsers {
|
||||
logger.Info(ctx, "account has been marked as dormant", slog.F("email", u.Email), slog.F("last_seen_at", u.LastSeenAt))
|
||||
}
|
||||
logger.Debug(ctx, "checking user accounts is done", slog.F("num_dormant_accounts", len(updatedUsers)), slog.F("execution_time", time.Since(startTime)))
|
||||
updatedUsers, err := db.UpdateInactiveUsersToDormant(ctx, database.UpdateInactiveUsersToDormantParams{
|
||||
LastSeenAfter: lastSeenAfter,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
})
|
||||
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
|
||||
logger.Error(ctx, "can't mark inactive users as dormant", slog.Error(err))
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
|
||||
for _, u := range updatedUsers {
|
||||
logger.Info(ctx, "account has been marked as dormant", slog.F("email", u.Email), slog.F("last_seen_at", u.LastSeenAt))
|
||||
audit.BackgroundAudit(ctx, &audit.BackgroundAuditParams[database.User]{
|
||||
Audit: auditor,
|
||||
Log: logger,
|
||||
UserID: u.ID,
|
||||
Action: database.AuditActionWrite,
|
||||
Old: database.User{ID: u.ID, Username: u.Username, Status: database.UserStatusActive},
|
||||
New: database.User{ID: u.ID, Username: u.Username, Status: database.UserStatusDormant},
|
||||
Status: http.StatusOK,
|
||||
AdditionalFields: audit.BackgroundTaskFieldsBytes(ctx, logger, audit.BackgroundSubsystemDormancy),
|
||||
})
|
||||
}
|
||||
logger.Debug(ctx, "checking user accounts is done", slog.F("num_dormant_accounts", len(updatedUsers)), slog.F("execution_time", time.Since(startTime)))
|
||||
return nil
|
||||
})
|
||||
|
||||
return func() {
|
||||
cancelFunc()
|
||||
<-done
|
||||
_ = tf.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,10 +10,11 @@ import (
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmem"
|
||||
"github.com/coder/coder/v2/enterprise/coderd/dormancy"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
func TestCheckInactiveUsers(t *testing.T) {
|
||||
@@ -42,29 +43,34 @@ func TestCheckInactiveUsers(t *testing.T) {
|
||||
suspendedUser2 := setupUser(ctx, t, db, "suspended-user-2@coder.com", database.UserStatusSuspended, time.Now().Add(-dormancyPeriod).Add(-time.Hour))
|
||||
suspendedUser3 := setupUser(ctx, t, db, "suspended-user-3@coder.com", database.UserStatusSuspended, time.Now().Add(-dormancyPeriod).Add(-6*time.Hour))
|
||||
|
||||
mAudit := audit.NewMock()
|
||||
mClock := quartz.NewMock(t)
|
||||
// Run the periodic job
|
||||
closeFunc := dormancy.CheckInactiveUsersWithOptions(ctx, logger, db, interval, dormancyPeriod)
|
||||
closeFunc := dormancy.CheckInactiveUsersWithOptions(ctx, logger, mClock, db, mAudit, interval, dormancyPeriod)
|
||||
t.Cleanup(closeFunc)
|
||||
|
||||
var rows []database.GetUsersRow
|
||||
var err error
|
||||
require.Eventually(t, func() bool {
|
||||
rows, err = db.GetUsers(ctx, database.GetUsersParams{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
dur, w := mClock.AdvanceNext()
|
||||
require.Equal(t, interval, dur)
|
||||
w.MustWait(ctx)
|
||||
|
||||
var dormant, suspended int
|
||||
for _, row := range rows {
|
||||
if row.Status == database.UserStatusDormant {
|
||||
dormant++
|
||||
} else if row.Status == database.UserStatusSuspended {
|
||||
suspended++
|
||||
}
|
||||
rows, err := db.GetUsers(ctx, database.GetUsersParams{})
|
||||
require.NoError(t, err)
|
||||
|
||||
var dormant, suspended int
|
||||
for _, row := range rows {
|
||||
if row.Status == database.UserStatusDormant {
|
||||
dormant++
|
||||
} else if row.Status == database.UserStatusSuspended {
|
||||
suspended++
|
||||
}
|
||||
// 6 users in total, 3 dormant, 3 suspended
|
||||
return len(rows) == 9 && dormant == 3 && suspended == 3
|
||||
}, testutil.WaitShort, testutil.IntervalMedium)
|
||||
}
|
||||
|
||||
// 9 users in total, 3 active, 3 dormant, 3 suspended
|
||||
require.Len(t, rows, 9)
|
||||
require.Equal(t, 3, dormant)
|
||||
require.Equal(t, 3, suspended)
|
||||
|
||||
require.Len(t, mAudit.AuditLogs(), 3)
|
||||
|
||||
allUsers := ignoreUpdatedAt(database.ConvertUserRows(rows))
|
||||
|
||||
|
||||
@@ -2,11 +2,13 @@ package coderd_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -14,6 +16,11 @@ import (
|
||||
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/enterprise/coderd/coderdenttest"
|
||||
@@ -295,6 +302,497 @@ func TestWorkspaceQuota(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// nolint:paralleltest,tparallel // Tests must run serially
|
||||
func TestWorkspaceSerialization(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("Serialization errors only occur in postgres")
|
||||
}
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
otherUser := dbgen.User(t, db, database.User{})
|
||||
|
||||
org := dbfake.Organization(t, db).
|
||||
EveryoneAllowance(20).
|
||||
Members(user, otherUser).
|
||||
Group(database.Group{
|
||||
QuotaAllowance: 10,
|
||||
}, user, otherUser).
|
||||
Group(database.Group{
|
||||
QuotaAllowance: 10,
|
||||
}, user).
|
||||
Do()
|
||||
|
||||
otherOrg := dbfake.Organization(t, db).
|
||||
EveryoneAllowance(20).
|
||||
Members(user, otherUser).
|
||||
Group(database.Group{
|
||||
QuotaAllowance: 10,
|
||||
}, user, otherUser).
|
||||
Group(database.Group{
|
||||
QuotaAllowance: 10,
|
||||
}, user).
|
||||
Do()
|
||||
|
||||
// TX mixing tests. **DO NOT** run these in parallel.
|
||||
// The goal here is to mess around with different ordering of
|
||||
// transactions and queries.
|
||||
|
||||
// UpdateBuildDeadline bumps a workspace deadline while doing a quota
|
||||
// commit to the same workspace build.
|
||||
//
|
||||
// Note: This passes if the interrupt is run before 'GetQuota()'
|
||||
// Passing orders:
|
||||
// - BeginTX -> Bump! -> GetQuota -> GetAllowance -> UpdateCost -> EndTx
|
||||
// - BeginTX -> GetQuota -> GetAllowance -> UpdateCost -> Bump! -> EndTx
|
||||
t.Run("UpdateBuildDeadline", func(t *testing.T) {
|
||||
t.Log("Expected to fail. As long as quota & deadline are on the same " +
|
||||
" table and affect the same row, this will likely always fail.")
|
||||
|
||||
// +------------------------------+------------------+
|
||||
// | Begin Tx | |
|
||||
// +------------------------------+------------------+
|
||||
// | GetQuota(user) | |
|
||||
// +------------------------------+------------------+
|
||||
// | | BumpDeadline(w1) |
|
||||
// +------------------------------+------------------+
|
||||
// | GetAllowance(user) | |
|
||||
// +------------------------------+------------------+
|
||||
// | UpdateWorkspaceBuildCost(w1) | |
|
||||
// +------------------------------+------------------+
|
||||
// | CommitTx() | |
|
||||
// +------------------------------+------------------+
|
||||
// pq: could not serialize access due to concurrent update
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
//nolint:gocritic // testing
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
|
||||
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
bumpDeadline := func() {
|
||||
err := db.InTx(func(db database.Store) error {
|
||||
err := db.UpdateWorkspaceBuildDeadlineByID(ctx, database.UpdateWorkspaceBuildDeadlineByIDParams{
|
||||
Deadline: dbtime.Now(),
|
||||
MaxDeadline: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
ID: myWorkspace.Build.ID,
|
||||
})
|
||||
return err
|
||||
}, &database.TxOptions{
|
||||
Isolation: sql.LevelSerializable,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Start TX
|
||||
// Run order
|
||||
|
||||
quota := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
|
||||
quota.GetQuota(ctx, t) // Step 1
|
||||
bumpDeadline() // Interrupt
|
||||
quota.GetAllowance(ctx, t) // Step 2
|
||||
|
||||
err := quota.DBTx.UpdateWorkspaceBuildCostByID(ctx, database.UpdateWorkspaceBuildCostByIDParams{
|
||||
ID: myWorkspace.Build.ID,
|
||||
DailyCost: 10,
|
||||
}) // Step 3
|
||||
require.ErrorContains(t, err, "could not serialize access due to concurrent update")
|
||||
// End commit
|
||||
require.ErrorContains(t, quota.Done(), "failed transaction")
|
||||
})
|
||||
|
||||
// UpdateOtherBuildDeadline bumps a user's other workspace deadline
|
||||
// while doing a quota commit.
|
||||
t.Run("UpdateOtherBuildDeadline", func(t *testing.T) {
|
||||
// +------------------------------+------------------+
|
||||
// | Begin Tx | |
|
||||
// +------------------------------+------------------+
|
||||
// | GetQuota(user) | |
|
||||
// +------------------------------+------------------+
|
||||
// | | BumpDeadline(w2) |
|
||||
// +------------------------------+------------------+
|
||||
// | GetAllowance(user) | |
|
||||
// +------------------------------+------------------+
|
||||
// | UpdateWorkspaceBuildCost(w1) | |
|
||||
// +------------------------------+------------------+
|
||||
// | CommitTx() | |
|
||||
// +------------------------------+------------------+
|
||||
// Works!
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
//nolint:gocritic // testing
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
|
||||
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
// Use the same template
|
||||
otherWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).
|
||||
Seed(database.WorkspaceBuild{
|
||||
TemplateVersionID: myWorkspace.TemplateVersion.ID,
|
||||
}).
|
||||
Do()
|
||||
|
||||
bumpDeadline := func() {
|
||||
err := db.InTx(func(db database.Store) error {
|
||||
err := db.UpdateWorkspaceBuildDeadlineByID(ctx, database.UpdateWorkspaceBuildDeadlineByIDParams{
|
||||
Deadline: dbtime.Now(),
|
||||
MaxDeadline: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
ID: otherWorkspace.Build.ID,
|
||||
})
|
||||
return err
|
||||
}, &database.TxOptions{
|
||||
Isolation: sql.LevelSerializable,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Start TX
|
||||
// Run order
|
||||
|
||||
quota := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
|
||||
quota.GetQuota(ctx, t) // Step 1
|
||||
bumpDeadline() // Interrupt
|
||||
quota.GetAllowance(ctx, t) // Step 2
|
||||
quota.UpdateWorkspaceBuildCostByID(ctx, t, 10) // Step 3
|
||||
// End commit
|
||||
require.NoError(t, quota.Done())
|
||||
})
|
||||
|
||||
t.Run("ActivityBump", func(t *testing.T) {
|
||||
t.Log("Expected to fail. As long as quota & deadline are on the same " +
|
||||
" table and affect the same row, this will likely always fail.")
|
||||
// +---------------------+----------------------------------+
|
||||
// | W1 Quota Tx | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | Begin Tx | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | GetQuota(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | GetAllowance(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | | ActivityBump(w1) |
|
||||
// +---------------------+----------------------------------+
|
||||
// | UpdateBuildCost(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | CommitTx() | |
|
||||
// +---------------------+----------------------------------+
|
||||
// pq: could not serialize access due to concurrent update
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
//nolint:gocritic // testing
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
|
||||
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).
|
||||
Seed(database.WorkspaceBuild{
|
||||
// Make sure the bump does something
|
||||
Deadline: dbtime.Now().Add(time.Hour * -20),
|
||||
}).
|
||||
Do()
|
||||
|
||||
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
|
||||
|
||||
// Run order
|
||||
one.GetQuota(ctx, t)
|
||||
one.GetAllowance(ctx, t)
|
||||
|
||||
err := db.ActivityBumpWorkspace(ctx, database.ActivityBumpWorkspaceParams{
|
||||
NextAutostart: time.Now(),
|
||||
WorkspaceID: myWorkspace.Workspace.ID,
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = one.DBTx.UpdateWorkspaceBuildCostByID(ctx, database.UpdateWorkspaceBuildCostByIDParams{
|
||||
ID: myWorkspace.Build.ID,
|
||||
DailyCost: 10,
|
||||
})
|
||||
require.ErrorContains(t, err, "could not serialize access due to concurrent update")
|
||||
|
||||
// End commit
|
||||
assert.ErrorContains(t, one.Done(), "failed transaction")
|
||||
})
|
||||
|
||||
t.Run("BumpLastUsedAt", func(t *testing.T) {
|
||||
// +---------------------+----------------------------------+
|
||||
// | W1 Quota Tx | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | Begin Tx | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | GetQuota(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | GetAllowance(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | | UpdateWorkspaceLastUsedAt(w1) |
|
||||
// +---------------------+----------------------------------+
|
||||
// | UpdateBuildCost(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | CommitTx() | |
|
||||
// +---------------------+----------------------------------+
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
//nolint:gocritic // testing
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
|
||||
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
|
||||
|
||||
// Run order
|
||||
one.GetQuota(ctx, t)
|
||||
one.GetAllowance(ctx, t)
|
||||
|
||||
err := db.UpdateWorkspaceLastUsedAt(ctx, database.UpdateWorkspaceLastUsedAtParams{
|
||||
ID: myWorkspace.Workspace.ID,
|
||||
LastUsedAt: dbtime.Now(),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
|
||||
|
||||
// End commit
|
||||
assert.NoError(t, one.Done())
|
||||
})
|
||||
|
||||
t.Run("UserMod", func(t *testing.T) {
|
||||
// +---------------------+----------------------------------+
|
||||
// | W1 Quota Tx | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | Begin Tx | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | GetQuota(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | GetAllowance(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | | RemoveUserFromOrg |
|
||||
// +---------------------+----------------------------------+
|
||||
// | UpdateBuildCost(w1) | |
|
||||
// +---------------------+----------------------------------+
|
||||
// | CommitTx() | |
|
||||
// +---------------------+----------------------------------+
|
||||
// Works!
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
//nolint:gocritic // testing
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
var err error
|
||||
|
||||
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
|
||||
|
||||
// Run order
|
||||
|
||||
one.GetQuota(ctx, t)
|
||||
one.GetAllowance(ctx, t)
|
||||
|
||||
err = db.DeleteOrganizationMember(ctx, database.DeleteOrganizationMemberParams{
|
||||
OrganizationID: myWorkspace.Workspace.OrganizationID,
|
||||
UserID: user.ID,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
|
||||
|
||||
// End commit
|
||||
assert.NoError(t, one.Done())
|
||||
})
|
||||
|
||||
// QuotaCommit 2 workspaces in different orgs.
|
||||
// Workspaces do not share templates, owners, or orgs
|
||||
t.Run("DoubleQuotaUnrelatedWorkspaces", func(t *testing.T) {
|
||||
// +---------------------+---------------------+
|
||||
// | W1 Quota Tx | W2 Quota Tx |
|
||||
// +---------------------+---------------------+
|
||||
// | Begin Tx | |
|
||||
// +---------------------+---------------------+
|
||||
// | | Begin Tx |
|
||||
// +---------------------+---------------------+
|
||||
// | GetQuota(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | GetAllowance(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | UpdateBuildCost(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | | UpdateBuildCost(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | | GetQuota(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | | GetAllowance(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | CommitTx() | |
|
||||
// +---------------------+---------------------+
|
||||
// | | CommitTx() |
|
||||
// +---------------------+---------------------+
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
//nolint:gocritic // testing
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
|
||||
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
myOtherWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: otherOrg.Org.ID, // Different org!
|
||||
OwnerID: otherUser.ID,
|
||||
}).Do()
|
||||
|
||||
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
|
||||
two := newCommitter(t, db, myOtherWorkspace.Workspace, myOtherWorkspace.Build)
|
||||
|
||||
// Run order
|
||||
one.GetQuota(ctx, t)
|
||||
one.GetAllowance(ctx, t)
|
||||
|
||||
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
|
||||
|
||||
two.GetQuota(ctx, t)
|
||||
two.GetAllowance(ctx, t)
|
||||
two.UpdateWorkspaceBuildCostByID(ctx, t, 10)
|
||||
|
||||
// End commit
|
||||
assert.NoError(t, one.Done())
|
||||
assert.NoError(t, two.Done())
|
||||
})
|
||||
|
||||
// QuotaCommit 2 workspaces in different orgs.
|
||||
// Workspaces do not share templates or orgs
|
||||
t.Run("DoubleQuotaUserWorkspacesDiffOrgs", func(t *testing.T) {
|
||||
// +---------------------+---------------------+
|
||||
// | W1 Quota Tx | W2 Quota Tx |
|
||||
// +---------------------+---------------------+
|
||||
// | Begin Tx | |
|
||||
// +---------------------+---------------------+
|
||||
// | | Begin Tx |
|
||||
// +---------------------+---------------------+
|
||||
// | GetQuota(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | GetAllowance(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | UpdateBuildCost(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | | UpdateBuildCost(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | | GetQuota(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | | GetAllowance(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | CommitTx() | |
|
||||
// +---------------------+---------------------+
|
||||
// | | CommitTx() |
|
||||
// +---------------------+---------------------+
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
//nolint:gocritic // testing
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
|
||||
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
myOtherWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: otherOrg.Org.ID, // Different org!
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
|
||||
two := newCommitter(t, db, myOtherWorkspace.Workspace, myOtherWorkspace.Build)
|
||||
|
||||
// Run order
|
||||
one.GetQuota(ctx, t)
|
||||
one.GetAllowance(ctx, t)
|
||||
|
||||
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
|
||||
|
||||
two.GetQuota(ctx, t)
|
||||
two.GetAllowance(ctx, t)
|
||||
two.UpdateWorkspaceBuildCostByID(ctx, t, 10)
|
||||
|
||||
// End commit
|
||||
assert.NoError(t, one.Done())
|
||||
assert.NoError(t, two.Done())
|
||||
})
|
||||
|
||||
// QuotaCommit 2 workspaces in the same org.
|
||||
// Workspaces do not share templates
|
||||
t.Run("DoubleQuotaUserWorkspaces", func(t *testing.T) {
|
||||
t.Log("Setting a new build cost to a workspace in a org affects other " +
|
||||
"workspaces in the same org. This is expected to fail.")
|
||||
// +---------------------+---------------------+
|
||||
// | W1 Quota Tx | W2 Quota Tx |
|
||||
// +---------------------+---------------------+
|
||||
// | Begin Tx | |
|
||||
// +---------------------+---------------------+
|
||||
// | | Begin Tx |
|
||||
// +---------------------+---------------------+
|
||||
// | GetQuota(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | GetAllowance(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | UpdateBuildCost(w1) | |
|
||||
// +---------------------+---------------------+
|
||||
// | | UpdateBuildCost(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | | GetQuota(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | | GetAllowance(w2) |
|
||||
// +---------------------+---------------------+
|
||||
// | CommitTx() | |
|
||||
// +---------------------+---------------------+
|
||||
// | | CommitTx() |
|
||||
// +---------------------+---------------------+
|
||||
// pq: could not serialize access due to read/write dependencies among transactions
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
//nolint:gocritic // testing
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
|
||||
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
myOtherWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.Org.ID,
|
||||
OwnerID: user.ID,
|
||||
}).Do()
|
||||
|
||||
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
|
||||
two := newCommitter(t, db, myOtherWorkspace.Workspace, myOtherWorkspace.Build)
|
||||
|
||||
// Run order
|
||||
one.GetQuota(ctx, t)
|
||||
one.GetAllowance(ctx, t)
|
||||
|
||||
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
|
||||
|
||||
two.GetQuota(ctx, t)
|
||||
two.GetAllowance(ctx, t)
|
||||
two.UpdateWorkspaceBuildCostByID(ctx, t, 10)
|
||||
|
||||
// End commit
|
||||
assert.NoError(t, one.Done())
|
||||
assert.ErrorContains(t, two.Done(), "could not serialize access due to read/write dependencies among transactions")
|
||||
})
|
||||
}
|
||||
|
||||
func deprecatedQuotaEndpoint(ctx context.Context, client *codersdk.Client, userID string) (codersdk.WorkspaceQuota, error) {
|
||||
res, err := client.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/workspace-quota/%s", userID), nil)
|
||||
if err != nil {
|
||||
@@ -335,3 +833,65 @@ func applyWithCost(cost int32) []*proto.Response {
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
// committer does what the CommitQuota does, but allows
|
||||
// stepping through the actions in the tx and controlling the
|
||||
// timing.
|
||||
// This is a nice wrapper to make the tests more concise.
|
||||
type committer struct {
|
||||
DBTx *dbtestutil.DBTx
|
||||
w database.WorkspaceTable
|
||||
b database.WorkspaceBuild
|
||||
}
|
||||
|
||||
func newCommitter(t *testing.T, db database.Store, workspace database.WorkspaceTable, build database.WorkspaceBuild) *committer {
|
||||
quotaTX := dbtestutil.StartTx(t, db, &database.TxOptions{
|
||||
Isolation: sql.LevelSerializable,
|
||||
ReadOnly: false,
|
||||
})
|
||||
return &committer{DBTx: quotaTX, w: workspace, b: build}
|
||||
}
|
||||
|
||||
// GetQuota touches:
|
||||
// - workspace_builds
|
||||
// - workspaces
|
||||
func (c *committer) GetQuota(ctx context.Context, t *testing.T) int64 {
|
||||
t.Helper()
|
||||
|
||||
consumed, err := c.DBTx.GetQuotaConsumedForUser(ctx, database.GetQuotaConsumedForUserParams{
|
||||
OwnerID: c.w.OwnerID,
|
||||
OrganizationID: c.w.OrganizationID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return consumed
|
||||
}
|
||||
|
||||
// GetAllowance touches:
|
||||
// - group_members_expanded
|
||||
// - users
|
||||
// - groups
|
||||
// - org_members
|
||||
func (c *committer) GetAllowance(ctx context.Context, t *testing.T) int64 {
|
||||
t.Helper()
|
||||
|
||||
allowance, err := c.DBTx.GetQuotaAllowanceForUser(ctx, database.GetQuotaAllowanceForUserParams{
|
||||
UserID: c.w.OwnerID,
|
||||
OrganizationID: c.w.OrganizationID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return allowance
|
||||
}
|
||||
|
||||
func (c *committer) UpdateWorkspaceBuildCostByID(ctx context.Context, t *testing.T, cost int32) bool {
|
||||
t.Helper()
|
||||
|
||||
err := c.DBTx.UpdateWorkspaceBuildCostByID(ctx, database.UpdateWorkspaceBuildCostByIDParams{
|
||||
ID: c.b.ID,
|
||||
DailyCost: cost,
|
||||
})
|
||||
return assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func (c *committer) Done() error {
|
||||
return c.DBTx.Done()
|
||||
}
|
||||
|
||||
@@ -174,15 +174,15 @@ require (
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/goleak v1.3.1-0.20240429205332-517bace7cc29
|
||||
go4.org/netipx v0.0.0-20230728180743-ad4cb58a6516
|
||||
golang.org/x/crypto v0.28.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa
|
||||
golang.org/x/mod v0.21.0
|
||||
golang.org/x/net v0.30.0
|
||||
golang.org/x/oauth2 v0.23.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/sys v0.26.0
|
||||
golang.org/x/term v0.25.0
|
||||
golang.org/x/text v0.19.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/term v0.27.0
|
||||
golang.org/x/text v0.21.0
|
||||
golang.org/x/tools v0.26.0
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
|
||||
google.golang.org/api v0.203.0
|
||||
|
||||
@@ -1062,8 +1062,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI=
|
||||
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
|
||||
@@ -1110,8 +1110,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1152,8 +1152,9 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -1161,8 +1162,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@@ -1173,8 +1174,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
|
||||
@@ -34,22 +34,23 @@ env:
|
||||
value: "0.0.0.0:2112"
|
||||
{{- if and (empty .Values.provisionerDaemon.pskSecretName) (empty .Values.provisionerDaemon.keySecretName) }}
|
||||
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified." }}
|
||||
{{- else if and (.Values.provisionerDaemon.pskSecretName) (.Values.provisionerDaemon.keySecretName) }}
|
||||
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both." }}
|
||||
{{- end }}
|
||||
{{- if .Values.provisionerDaemon.pskSecretName }}
|
||||
- name: CODER_PROVISIONER_DAEMON_PSK
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
|
||||
key: psk
|
||||
{{- end }}
|
||||
{{- if and .Values.provisionerDaemon.keySecretName .Values.provisionerDaemon.keySecretKey }}
|
||||
{{- else if and .Values.provisionerDaemon.keySecretName .Values.provisionerDaemon.keySecretKey }}
|
||||
{{- if and (not (empty .Values.provisionerDaemon.pskSecretName)) (ne .Values.provisionerDaemon.pskSecretName "coder-provisioner-psk") }}
|
||||
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both." }}
|
||||
{{- else if .Values.provisionerDaemon.tags }}
|
||||
{{ fail "provisionerDaemon.tags may not be specified with provisionerDaemon.keySecretName." }}
|
||||
{{- end }}
|
||||
- name: CODER_PROVISIONER_DAEMON_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.provisionerDaemon.keySecretName | quote }}
|
||||
key: {{ .Values.provisionerDaemon.keySecretKey | quote }}
|
||||
{{- else }}
|
||||
- name: CODER_PROVISIONER_DAEMON_PSK
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
|
||||
key: psk
|
||||
{{- end }}
|
||||
{{- if include "provisioner.tags" . }}
|
||||
- name: CODER_PROVISIONERD_TAGS
|
||||
|
||||
@@ -56,6 +56,12 @@ var testCases = []testCase{
|
||||
name: "provisionerd_key",
|
||||
expectedError: "",
|
||||
},
|
||||
// Test explicitly for the workaround where setting provisionerDaemon.pskSecretName=""
|
||||
// was required to use provisioner keys.
|
||||
{
|
||||
name: "provisionerd_key_psk_empty_workaround",
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
name: "provisionerd_psk_and_key",
|
||||
expectedError: `Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both.`,
|
||||
@@ -64,6 +70,10 @@ var testCases = []testCase{
|
||||
name: "provisionerd_no_psk_or_key",
|
||||
expectedError: `Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified.`,
|
||||
},
|
||||
{
|
||||
name: "provisionerd_key_tags",
|
||||
expectedError: `provisionerDaemon.tags may not be specified with provisionerDaemon.keySecretName.`,
|
||||
},
|
||||
{
|
||||
name: "extra_templates",
|
||||
expectedError: "",
|
||||
|
||||
@@ -112,8 +112,6 @@ spec:
|
||||
secretKeyRef:
|
||||
key: provisionerd-key
|
||||
name: coder-provisionerd-key
|
||||
- name: CODER_PROVISIONERD_TAGS
|
||||
value: clusterType=k8s,location=auh
|
||||
- name: CODER_URL
|
||||
value: http://coder.default.svc.cluster.local
|
||||
image: ghcr.io/coder/coder:latest
|
||||
|
||||
@@ -2,9 +2,5 @@ coder:
|
||||
image:
|
||||
tag: latest
|
||||
provisionerDaemon:
|
||||
pskSecretName: ""
|
||||
keySecretName: "coder-provisionerd-key"
|
||||
keySecretKey: "provisionerd-key"
|
||||
tags:
|
||||
location: auh
|
||||
clusterType: k8s
|
||||
|
||||
+135
@@ -0,0 +1,135 @@
|
||||
---
|
||||
# Source: coder-provisioner/templates/coder.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations: {}
|
||||
labels:
|
||||
app.kubernetes.io/instance: release-name
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: coder-provisioner
|
||||
app.kubernetes.io/part-of: coder-provisioner
|
||||
app.kubernetes.io/version: 0.1.0
|
||||
helm.sh/chart: coder-provisioner-0.1.0
|
||||
name: coder-provisioner
|
||||
---
|
||||
# Source: coder-provisioner/templates/rbac.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: coder-provisioner-workspace-perms
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
---
|
||||
# Source: coder-provisioner/templates/rbac.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: "coder-provisioner"
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: "coder-provisioner"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: coder-provisioner-workspace-perms
|
||||
---
|
||||
# Source: coder-provisioner/templates/coder.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations: {}
|
||||
labels:
|
||||
app.kubernetes.io/instance: release-name
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: coder-provisioner
|
||||
app.kubernetes.io/part-of: coder-provisioner
|
||||
app.kubernetes.io/version: 0.1.0
|
||||
helm.sh/chart: coder-provisioner-0.1.0
|
||||
name: coder-provisioner
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: release-name
|
||||
app.kubernetes.io/name: coder-provisioner
|
||||
template:
|
||||
metadata:
|
||||
annotations: {}
|
||||
labels:
|
||||
app.kubernetes.io/instance: release-name
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: coder-provisioner
|
||||
app.kubernetes.io/part-of: coder-provisioner
|
||||
app.kubernetes.io/version: 0.1.0
|
||||
helm.sh/chart: coder-provisioner-0.1.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- provisionerd
|
||||
- start
|
||||
command:
|
||||
- /opt/coder
|
||||
env:
|
||||
- name: CODER_PROMETHEUS_ADDRESS
|
||||
value: 0.0.0.0:2112
|
||||
- name: CODER_PROVISIONER_DAEMON_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: provisionerd-key
|
||||
name: coder-provisionerd-key
|
||||
- name: CODER_URL
|
||||
value: http://coder.default.svc.cluster.local
|
||||
image: ghcr.io/coder/coder:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle: {}
|
||||
name: coder
|
||||
ports: null
|
||||
resources: {}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: null
|
||||
runAsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
volumeMounts: []
|
||||
restartPolicy: Always
|
||||
serviceAccountName: coder-provisioner
|
||||
terminationGracePeriodSeconds: 600
|
||||
volumes: []
|
||||
@@ -0,0 +1,7 @@
|
||||
coder:
|
||||
image:
|
||||
tag: latest
|
||||
provisionerDaemon:
|
||||
pskSecretName: ""
|
||||
keySecretName: "coder-provisionerd-key"
|
||||
keySecretKey: "provisionerd-key"
|
||||
@@ -0,0 +1,9 @@
|
||||
coder:
|
||||
image:
|
||||
tag: latest
|
||||
provisionerDaemon:
|
||||
keySecretName: "coder-provisionerd-key"
|
||||
keySecretKey: "provisionerd-key"
|
||||
tags:
|
||||
location: auh
|
||||
clusterType: k8s
|
||||
@@ -4,6 +4,3 @@ coder:
|
||||
provisionerDaemon:
|
||||
pskSecretName: ""
|
||||
keySecretName: ""
|
||||
tags:
|
||||
location: auh
|
||||
clusterType: k8s
|
||||
|
||||
+1
-1
@@ -111,7 +111,7 @@ spec:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: psk
|
||||
name: coder-provisionerd-psk
|
||||
name: not-the-default-coder-provisioner-psk
|
||||
- name: CODER_PROVISIONERD_TAGS
|
||||
value: clusterType=k8s,location=auh
|
||||
- name: CODER_URL
|
||||
|
||||
+1
-1
@@ -2,7 +2,7 @@ coder:
|
||||
image:
|
||||
tag: latest
|
||||
provisionerDaemon:
|
||||
pskSecretName: "coder-provisionerd-psk"
|
||||
pskSecretName: "not-the-default-coder-provisioner-psk"
|
||||
tags:
|
||||
location: auh
|
||||
clusterType: k8s
|
||||
|
||||
@@ -2,7 +2,7 @@ coder:
|
||||
image:
|
||||
tag: latest
|
||||
provisionerDaemon:
|
||||
pskSecretName: "coder-provisionerd-psk"
|
||||
pskSecretName: "not-the-default-coder-provisioner-psk"
|
||||
keySecretName: "coder-provisionerd-key"
|
||||
keySecretKey: "provisionerd-key"
|
||||
tags:
|
||||
|
||||
@@ -204,14 +204,23 @@ provisionerDaemon:
|
||||
# provisionerDaemon.keySecretName -- The name of the Kubernetes
|
||||
# secret that contains a provisioner key to use to authenticate with Coder.
|
||||
# See: https://coder.com/docs/admin/provisioners#authentication
|
||||
# NOTE: it is not permitted to specify both provisionerDaemon.keySecretName
|
||||
# and provisionerDaemon.pskSecretName. An exception is made for the purposes
|
||||
# of backwards-compatibility: if provisionerDaemon.pskSecretName is unchanged
|
||||
# from the default value and provisionerDaemon.keySecretName is set, then
|
||||
# provisionerDaemon.keySecretName and provisionerDaemon.keySecretKey will take
|
||||
# precedence over provisionerDaemon.pskSecretName.
|
||||
keySecretName: ""
|
||||
# provisionerDaemon.keySecretKey -- The key of the Kubernetes
|
||||
# secret specified in provisionerDaemon.keySecretName that contains
|
||||
# the provisioner key. Defaults to "key".
|
||||
keySecretKey: "key"
|
||||
|
||||
# provisionerDaemon.tags -- Tags to filter provisioner jobs by.
|
||||
# provisionerDaemon.tags -- If using a PSK, specify the set of provisioner
|
||||
# job tags for which this provisioner daemon is responsible.
|
||||
# See: https://coder.com/docs/admin/provisioners#provisioner-tags
|
||||
# NOTE: it is not permitted to specify both provisionerDaemon.tags and
|
||||
# provsionerDaemon.keySecretName.
|
||||
tags:
|
||||
{}
|
||||
# location: usa
|
||||
|
||||
+5
-3
@@ -231,13 +231,15 @@ export const createTemplate = async (
|
||||
* random name.
|
||||
*/
|
||||
export const createGroup = async (page: Page): Promise<string> => {
|
||||
await page.goto("/groups/create", { waitUntil: "domcontentloaded" });
|
||||
await expectUrl(page).toHavePathName("/groups/create");
|
||||
await page.goto("/deployment/groups/create", {
|
||||
waitUntil: "domcontentloaded",
|
||||
});
|
||||
await expectUrl(page).toHavePathName("/deployment/groups/create");
|
||||
|
||||
const name = randomName();
|
||||
await page.getByLabel("Name", { exact: true }).fill(name);
|
||||
await page.getByTestId("form-submit").click();
|
||||
await expectUrl(page).toHavePathName(`/groups/${name}`);
|
||||
await expectUrl(page).toHavePathName(`/deployment/groups/${name}`);
|
||||
return name;
|
||||
};
|
||||
|
||||
|
||||
+1
-1
@@ -1868,7 +1868,7 @@ class ApiMethods {
|
||||
|
||||
uploadFile = async (file: File): Promise<TypesGen.UploadResponse> => {
|
||||
const response = await this.axios.post("/api/v2/files", file, {
|
||||
headers: { "Content-Type": "application/x-tar" },
|
||||
headers: { "Content-Type": file.type },
|
||||
});
|
||||
|
||||
return response.data;
|
||||
|
||||
Generated
+1
@@ -328,6 +328,7 @@ export interface CreateUserRequestWithOrgs {
|
||||
readonly name: string;
|
||||
readonly password: string;
|
||||
readonly login_type: LoginType;
|
||||
readonly user_status?: UserStatus;
|
||||
readonly organization_ids: Readonly<Array<string>>;
|
||||
}
|
||||
|
||||
|
||||
@@ -22,9 +22,3 @@ export default meta;
|
||||
type Story = StoryObj<typeof ActiveUserChart>;
|
||||
|
||||
export const Example: Story = {};
|
||||
|
||||
export const UserLimit: Story = {
|
||||
args: {
|
||||
userLimit: 10,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -14,7 +14,6 @@ import {
|
||||
Tooltip,
|
||||
defaults,
|
||||
} from "chart.js";
|
||||
import annotationPlugin from "chartjs-plugin-annotation";
|
||||
import {
|
||||
HelpTooltip,
|
||||
HelpTooltipContent,
|
||||
@@ -36,21 +35,16 @@ ChartJS.register(
|
||||
Title,
|
||||
Tooltip,
|
||||
Legend,
|
||||
annotationPlugin,
|
||||
);
|
||||
|
||||
const USER_LIMIT_DISPLAY_THRESHOLD = 60;
|
||||
|
||||
export interface ActiveUserChartProps {
|
||||
data: readonly { date: string; amount: number }[];
|
||||
interval: "day" | "week";
|
||||
userLimit: number | undefined;
|
||||
}
|
||||
|
||||
export const ActiveUserChart: FC<ActiveUserChartProps> = ({
|
||||
data,
|
||||
interval,
|
||||
userLimit,
|
||||
}) => {
|
||||
const theme = useTheme();
|
||||
|
||||
@@ -64,24 +58,6 @@ export const ActiveUserChart: FC<ActiveUserChartProps> = ({
|
||||
responsive: true,
|
||||
animation: false,
|
||||
plugins: {
|
||||
annotation: {
|
||||
annotations: [
|
||||
{
|
||||
type: "line",
|
||||
scaleID: "y",
|
||||
display: shouldDisplayUserLimit(userLimit, chartData),
|
||||
value: userLimit,
|
||||
borderColor: theme.palette.secondary.contrastText,
|
||||
borderWidth: 5,
|
||||
label: {
|
||||
content: "User limit",
|
||||
color: theme.palette.primary.contrastText,
|
||||
display: true,
|
||||
font: { weight: "normal" },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
legend: {
|
||||
display: false,
|
||||
},
|
||||
@@ -103,7 +79,6 @@ export const ActiveUserChart: FC<ActiveUserChartProps> = ({
|
||||
precision: 0,
|
||||
},
|
||||
},
|
||||
|
||||
x: {
|
||||
grid: { color: theme.palette.divider },
|
||||
ticks: {
|
||||
@@ -138,32 +113,26 @@ export const ActiveUserChart: FC<ActiveUserChartProps> = ({
|
||||
);
|
||||
};
|
||||
|
||||
export const ActiveUsersTitle: FC = () => {
|
||||
type ActiveUsersTitleProps = {
|
||||
interval: "day" | "week";
|
||||
};
|
||||
|
||||
export const ActiveUsersTitle: FC<ActiveUsersTitleProps> = ({ interval }) => {
|
||||
return (
|
||||
<div css={{ display: "flex", alignItems: "center", gap: 8 }}>
|
||||
Active Users
|
||||
{interval === "day" ? "Daily" : "Weekly"} Active Users
|
||||
<HelpTooltip>
|
||||
<HelpTooltipTrigger size="small" />
|
||||
<HelpTooltipContent>
|
||||
<HelpTooltipTitle>How do we calculate active users?</HelpTooltipTitle>
|
||||
<HelpTooltipText>
|
||||
When a connection is initiated to a user's workspace they are
|
||||
considered an active user. e.g. apps, web terminal, SSH
|
||||
considered an active user. e.g. apps, web terminal, SSH. This is for
|
||||
measuring user activity and has no connection to license
|
||||
consumption.
|
||||
</HelpTooltipText>
|
||||
</HelpTooltipContent>
|
||||
</HelpTooltip>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
function shouldDisplayUserLimit(
|
||||
userLimit: number | undefined,
|
||||
activeUsers: number[],
|
||||
): boolean {
|
||||
if (!userLimit || activeUsers.length === 0) {
|
||||
return false;
|
||||
}
|
||||
return (
|
||||
Math.max(...activeUsers) >= (userLimit * USER_LIMIT_DISPLAY_THRESHOLD) / 100
|
||||
);
|
||||
}
|
||||
|
||||
@@ -142,6 +142,9 @@ const DeploymentSettingsNavigation: FC<DeploymentSettingsNavigationProps> = ({
|
||||
{permissions.viewAllUsers && (
|
||||
<SidebarNavSubItem href="users">Users</SidebarNavSubItem>
|
||||
)}
|
||||
{permissions.viewAnyGroup && (
|
||||
<SidebarNavSubItem href="groups">Groups</SidebarNavSubItem>
|
||||
)}
|
||||
{permissions.viewNotificationTemplate && (
|
||||
<SidebarNavSubItem href="notifications">
|
||||
<Stack direction="row" alignItems="center" spacing={1}>
|
||||
|
||||
@@ -23,7 +23,7 @@ export const AuditLogDescription: FC<AuditLogDescriptionProps> = ({
|
||||
target = "";
|
||||
}
|
||||
|
||||
// This occurs when SCIM creates a user.
|
||||
// This occurs when SCIM creates a user, or dormancy changes a users status.
|
||||
if (
|
||||
auditLog.resource_type === "user" &&
|
||||
auditLog.additional_fields?.automatic_actor === "coder"
|
||||
|
||||
-7
@@ -50,13 +50,6 @@ type Story = StoryObj<typeof GeneralSettingsPageView>;
|
||||
|
||||
export const Page: Story = {};
|
||||
|
||||
export const WithUserLimit: Story = {
|
||||
args: {
|
||||
deploymentDAUs: MockDeploymentDAUResponse,
|
||||
entitlements: MockEntitlementsWithUserLimit,
|
||||
},
|
||||
};
|
||||
|
||||
export const NoDAUs: Story = {
|
||||
args: {
|
||||
deploymentDAUs: undefined,
|
||||
|
||||
+2
-10
@@ -49,16 +49,8 @@ export const GeneralSettingsPageView: FC<GeneralSettingsPageViewProps> = ({
|
||||
)}
|
||||
{deploymentDAUs && (
|
||||
<div css={{ marginBottom: 24, height: 200 }}>
|
||||
<ChartSection title={<ActiveUsersTitle />}>
|
||||
<ActiveUserChart
|
||||
data={deploymentDAUs.entries}
|
||||
interval="day"
|
||||
userLimit={
|
||||
entitlements?.features.user_limit.enabled
|
||||
? entitlements?.features.user_limit.limit
|
||||
: undefined
|
||||
}
|
||||
/>
|
||||
<ChartSection title={<ActiveUsersTitle interval="day" />}>
|
||||
<ActiveUserChart data={deploymentDAUs.entries} interval="day" />
|
||||
</ChartSection>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -19,7 +19,7 @@ export const CreateGroupPage: FC = () => {
|
||||
<CreateGroupPageView
|
||||
onSubmit={async (data) => {
|
||||
const newGroup = await createGroupMutation.mutateAsync(data);
|
||||
navigate(`/groups/${newGroup.name}`);
|
||||
navigate(`/deployment/groups/${newGroup.name}`);
|
||||
}}
|
||||
error={createGroupMutation.error}
|
||||
isLoading={createGroupMutation.isLoading}
|
||||
|
||||
@@ -44,7 +44,7 @@ export const CreateGroupPageView: FC<CreateGroupPageViewProps> = ({
|
||||
initialTouched,
|
||||
});
|
||||
const getFieldHelpers = getFormHelpers<CreateGroupRequest>(form, error);
|
||||
const onCancel = () => navigate("/groups");
|
||||
const onCancel = () => navigate("/deployment/groups");
|
||||
|
||||
return (
|
||||
<Margins>
|
||||
|
||||
@@ -211,7 +211,7 @@ export const GroupPage: FC = () => {
|
||||
try {
|
||||
await deleteGroupMutation.mutateAsync(groupId);
|
||||
displaySuccess("Group deleted successfully.");
|
||||
navigate("/groups");
|
||||
navigate("/deployment/groups");
|
||||
} catch (error) {
|
||||
displayError(getErrorMessage(error, "Failed to delete group."));
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user