Compare commits

...

3 Commits

Author SHA1 Message Date
Ben Potter 0598aecf90 chore: add cherry-picks for Coder v2.17.1 (#15454)
Co-authored-by: Cian Johnston <cian@coder.com>
2024-11-08 15:24:55 -06:00
Stephen Kirby 5a6d23a4a3 fix(site/static/icon): add filebrowser icon (#15367) (#15370)
Fixes https://github.com/coder/coder/issues/15365

We used to hit
https://raw.githubusercontent.com/filebrowser/logo/master/icon_raw.svg
for the filebrowser icon but coder/modules#334 modified the icon URL to
point to a self-hosted icon.

I simply copied the icon from the `coder/modules` repo.

(cherry picked from commit dc29b81286)

Co-authored-by: Cian Johnston <cian@coder.com>
2024-11-04 16:30:11 -06:00
Stephen Kirby 9a444b3af2 chore: cherry pick PRs for 2.17 (#15339)
- [x]  https://github.com/coder/coder/pull/15305 
- [x]  https://github.com/coder/coder/pull/15307 
- [x]  https://github.com/coder/coder/pull/15270 
- [x]  https://github.com/coder/coder/pull/15261 
- [x]  https://github.com/coder/coder/pull/15281
- [x]  https://github.com/coder/coder/pull/15298
- Release Docs:
    - [x]  https://github.com/coder/coder/pull/15296
    - [x]  https://github.com/coder/coder/pull/15280
    - [x]  https://github.com/coder/coder/pull/15294
    - [x]  https://github.com/coder/coder/pull/15310

---------

Co-authored-by: Steven Masley <Emyrk@users.noreply.github.com>
Co-authored-by: Bruno Quaresma <bruno@coder.com>
Co-authored-by: Danielle Maywood <danielle@themaywoods.com>
Co-authored-by: Colin Adler <colin1adler@gmail.com>
Co-authored-by: Edward Angert <EdwardAngert@users.noreply.github.com>
2024-11-01 13:39:29 -05:00
86 changed files with 2708 additions and 347 deletions
+7 -1
View File
@@ -212,10 +212,16 @@ func enablePrometheus(
options.PrometheusRegistry.MustRegister(collectors.NewGoCollector())
options.PrometheusRegistry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
closeUsersFunc, err := prometheusmetrics.ActiveUsers(ctx, options.PrometheusRegistry, options.Database, 0)
closeActiveUsersFunc, err := prometheusmetrics.ActiveUsers(ctx, options.Logger.Named("active_user_metrics"), options.PrometheusRegistry, options.Database, 0)
if err != nil {
return nil, xerrors.Errorf("register active users prometheus metric: %w", err)
}
afterCtx(ctx, closeActiveUsersFunc)
closeUsersFunc, err := prometheusmetrics.Users(ctx, options.Logger.Named("user_metrics"), quartz.NewReal(), options.PrometheusRegistry, options.Database, 0)
if err != nil {
return nil, xerrors.Errorf("register users prometheus metric: %w", err)
}
afterCtx(ctx, closeUsersFunc)
closeWorkspacesFunc, err := prometheusmetrics.Workspaces(ctx, options.Logger.Named("workspaces_metrics"), options.PrometheusRegistry, options.Database, 0)
+1
View File
@@ -197,6 +197,7 @@ func (r *RootCmd) newCreateAdminUserCommand() *serpent.Command {
UpdatedAt: dbtime.Now(),
RBACRoles: []string{rbac.RoleOwner().String()},
LoginType: database.LoginTypePassword,
Status: "",
})
if err != nil {
return xerrors.Errorf("insert user: %w", err)
+69 -3
View File
@@ -106,6 +106,58 @@ Use a YAML configuration file when your server launch become unwieldy.
Write out the current server config as YAML to stdout.
EMAIL OPTIONS:
Configure how emails are sent.
--email-force-tls bool, $CODER_EMAIL_FORCE_TLS (default: false)
Force a TLS connection to the configured SMTP smarthost.
--email-from string, $CODER_EMAIL_FROM
The sender's address to use.
--email-hello string, $CODER_EMAIL_HELLO (default: localhost)
The hostname identifying the SMTP server.
--email-smarthost host:port, $CODER_EMAIL_SMARTHOST (default: localhost:587)
The intermediary SMTP host through which emails are sent.
EMAIL / EMAIL AUTHENTICATION OPTIONS:
Configure SMTP authentication options.
--email-auth-identity string, $CODER_EMAIL_AUTH_IDENTITY
Identity to use with PLAIN authentication.
--email-auth-password string, $CODER_EMAIL_AUTH_PASSWORD
Password to use with PLAIN/LOGIN authentication.
--email-auth-password-file string, $CODER_EMAIL_AUTH_PASSWORD_FILE
File from which to load password for use with PLAIN/LOGIN
authentication.
--email-auth-username string, $CODER_EMAIL_AUTH_USERNAME
Username to use with PLAIN/LOGIN authentication.
EMAIL / EMAIL TLS OPTIONS:
Configure TLS for your SMTP server target.
--email-tls-ca-cert-file string, $CODER_EMAIL_TLS_CACERTFILE
CA certificate file to use.
--email-tls-cert-file string, $CODER_EMAIL_TLS_CERTFILE
Certificate file to use.
--email-tls-cert-key-file string, $CODER_EMAIL_TLS_CERTKEYFILE
Certificate key file to use.
--email-tls-server-name string, $CODER_EMAIL_TLS_SERVERNAME
Server name to verify against the target certificate.
--email-tls-skip-verify bool, $CODER_EMAIL_TLS_SKIPVERIFY
Skip verification of the target server's certificate (insecure).
--email-tls-starttls bool, $CODER_EMAIL_TLS_STARTTLS
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
INTROSPECTION / HEALTH CHECK OPTIONS:
--health-check-refresh duration, $CODER_HEALTH_CHECK_REFRESH (default: 10m0s)
Refresh interval for healthchecks.
@@ -349,54 +401,68 @@ Configure how notifications are processed and delivered.
NOTIFICATIONS / EMAIL OPTIONS:
Configure how email notifications are sent.
--notifications-email-force-tls bool, $CODER_NOTIFICATIONS_EMAIL_FORCE_TLS (default: false)
--notifications-email-force-tls bool, $CODER_NOTIFICATIONS_EMAIL_FORCE_TLS
Force a TLS connection to the configured SMTP smarthost.
DEPRECATED: Use --email-force-tls instead.
--notifications-email-from string, $CODER_NOTIFICATIONS_EMAIL_FROM
The sender's address to use.
DEPRECATED: Use --email-from instead.
--notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO (default: localhost)
--notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO
The hostname identifying the SMTP server.
DEPRECATED: Use --email-hello instead.
--notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST (default: localhost:587)
--notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST
The intermediary SMTP host through which emails are sent.
DEPRECATED: Use --email-smarthost instead.
NOTIFICATIONS / EMAIL / EMAIL AUTHENTICATION OPTIONS:
Configure SMTP authentication options.
--notifications-email-auth-identity string, $CODER_NOTIFICATIONS_EMAIL_AUTH_IDENTITY
Identity to use with PLAIN authentication.
DEPRECATED: Use --email-auth-identity instead.
--notifications-email-auth-password string, $CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD
Password to use with PLAIN/LOGIN authentication.
DEPRECATED: Use --email-auth-password instead.
--notifications-email-auth-password-file string, $CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD_FILE
File from which to load password for use with PLAIN/LOGIN
authentication.
DEPRECATED: Use --email-auth-password-file instead.
--notifications-email-auth-username string, $CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME
Username to use with PLAIN/LOGIN authentication.
DEPRECATED: Use --email-auth-username instead.
NOTIFICATIONS / EMAIL / EMAIL TLS OPTIONS:
Configure TLS for your SMTP server target.
--notifications-email-tls-ca-cert-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CACERTFILE
CA certificate file to use.
DEPRECATED: Use --email-tls-ca-cert-file instead.
--notifications-email-tls-cert-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CERTFILE
Certificate file to use.
DEPRECATED: Use --email-tls-cert-file instead.
--notifications-email-tls-cert-key-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CERTKEYFILE
Certificate key file to use.
DEPRECATED: Use --email-tls-cert-key-file instead.
--notifications-email-tls-server-name string, $CODER_NOTIFICATIONS_EMAIL_TLS_SERVERNAME
Server name to verify against the target certificate.
DEPRECATED: Use --email-tls-server-name instead.
--notifications-email-tls-skip-verify bool, $CODER_NOTIFICATIONS_EMAIL_TLS_SKIPVERIFY
Skip verification of the target server's certificate (insecure).
DEPRECATED: Use --email-tls-skip-verify instead.
--notifications-email-tls-starttls bool, $CODER_NOTIFICATIONS_EMAIL_TLS_STARTTLS
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
DEPRECATED: Use --email-tls-starttls instead.
NOTIFICATIONS / WEBHOOK OPTIONS:
--notifications-webhook-endpoint url, $CODER_NOTIFICATIONS_WEBHOOK_ENDPOINT
+48 -3
View File
@@ -518,6 +518,51 @@ userQuietHoursSchedule:
# compatibility reasons, this will be removed in a future release.
# (default: false, type: bool)
allowWorkspaceRenames: false
# Configure how emails are sent.
email:
# The sender's address to use.
# (default: <unset>, type: string)
from: ""
# The intermediary SMTP host through which emails are sent.
# (default: localhost:587, type: host:port)
smarthost: localhost:587
# The hostname identifying the SMTP server.
# (default: localhost, type: string)
hello: localhost
# Force a TLS connection to the configured SMTP smarthost.
# (default: false, type: bool)
forceTLS: false
# Configure SMTP authentication options.
emailAuth:
# Identity to use with PLAIN authentication.
# (default: <unset>, type: string)
identity: ""
# Username to use with PLAIN/LOGIN authentication.
# (default: <unset>, type: string)
username: ""
# File from which to load password for use with PLAIN/LOGIN authentication.
# (default: <unset>, type: string)
passwordFile: ""
# Configure TLS for your SMTP server target.
emailTLS:
# Enable STARTTLS to upgrade insecure SMTP connections using TLS.
# (default: <unset>, type: bool)
startTLS: false
# Server name to verify against the target certificate.
# (default: <unset>, type: string)
serverName: ""
# Skip verification of the target server's certificate (insecure).
# (default: <unset>, type: bool)
insecureSkipVerify: false
# CA certificate file to use.
# (default: <unset>, type: string)
caCertFile: ""
# Certificate file to use.
# (default: <unset>, type: string)
certFile: ""
# Certificate key file to use.
# (default: <unset>, type: string)
certKeyFile: ""
# Configure how notifications are processed and delivered.
notifications:
# Which delivery method to use (available options: 'smtp', 'webhook').
@@ -532,13 +577,13 @@ notifications:
# (default: <unset>, type: string)
from: ""
# The intermediary SMTP host through which emails are sent.
# (default: localhost:587, type: host:port)
# (default: <unset>, type: host:port)
smarthost: localhost:587
# The hostname identifying the SMTP server.
# (default: localhost, type: string)
# (default: <unset>, type: string)
hello: localhost
# Force a TLS connection to the configured SMTP smarthost.
# (default: false, type: bool)
# (default: <unset>, type: bool)
forceTLS: false
# Configure SMTP authentication options.
emailAuth:
+8
View File
@@ -9896,6 +9896,14 @@ const docTemplate = `{
"password": {
"type": "string"
},
"user_status": {
"description": "UserStatus defaults to UserStatusDormant.",
"allOf": [
{
"$ref": "#/definitions/codersdk.UserStatus"
}
]
},
"username": {
"type": "string"
}
+8
View File
@@ -8809,6 +8809,14 @@
"password": {
"type": "string"
},
"user_status": {
"description": "UserStatus defaults to UserStatusDormant.",
"allOf": [
{
"$ref": "#/definitions/codersdk.UserStatus"
}
]
},
"username": {
"type": "string"
}
+33
View File
@@ -0,0 +1,33 @@
package audit
import (
"context"
"encoding/json"
"cdr.dev/slog"
)
type BackgroundSubsystem string
const (
BackgroundSubsystemDormancy BackgroundSubsystem = "dormancy"
)
func BackgroundTaskFields(subsystem BackgroundSubsystem) map[string]string {
return map[string]string{
"automatic_actor": "coder",
"automatic_subsystem": string(subsystem),
}
}
func BackgroundTaskFieldsBytes(ctx context.Context, logger slog.Logger, subsystem BackgroundSubsystem) []byte {
af := BackgroundTaskFields(subsystem)
wriBytes, err := json.Marshal(af)
if err != nil {
logger.Error(ctx, "marshal additional fields for dormancy audit", slog.Error(err))
return []byte("{}")
}
return wriBytes
}
+7 -6
View File
@@ -62,12 +62,13 @@ type BackgroundAuditParams[T Auditable] struct {
Audit Auditor
Log slog.Logger
UserID uuid.UUID
RequestID uuid.UUID
Status int
Action database.AuditAction
OrganizationID uuid.UUID
IP string
UserID uuid.UUID
RequestID uuid.UUID
Status int
Action database.AuditAction
OrganizationID uuid.UUID
IP string
// todo: this should automatically marshal an interface{} instead of accepting a raw message.
AdditionalFields json.RawMessage
New T
+1
View File
@@ -702,6 +702,7 @@ func New(options *Options) *API {
apiKeyMiddleware := httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
DB: options.Database,
ActivateDormantUser: ActivateDormantUser(options.Logger, &api.Auditor, options.Database),
OAuth2Configs: oauthConfigs,
RedirectToLogin: false,
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
+3
View File
@@ -718,6 +718,9 @@ func createAnotherUserRetry(t testing.TB, client *codersdk.Client, organizationI
Name: RandomName(t),
Password: "SomeSecurePassword!",
OrganizationIDs: organizationIDs,
// Always create users as active in tests to ignore an extra audit log
// when logging in.
UserStatus: ptr.Ref(codersdk.UserStatusActive),
}
for _, m := range mutators {
m(&req)
+28 -5
View File
@@ -28,6 +28,7 @@ type Store interface {
wrapper
Ping(ctx context.Context) (time.Duration, error)
PGLocks(ctx context.Context) (PGLocks, error)
InTx(func(Store) error, *TxOptions) error
}
@@ -48,13 +49,26 @@ type DBTX interface {
GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error
}
func WithSerialRetryCount(count int) func(*sqlQuerier) {
return func(q *sqlQuerier) {
q.serialRetryCount = count
}
}
// New creates a new database store using a SQL database connection.
func New(sdb *sql.DB) Store {
func New(sdb *sql.DB, opts ...func(*sqlQuerier)) Store {
dbx := sqlx.NewDb(sdb, "postgres")
return &sqlQuerier{
q := &sqlQuerier{
db: dbx,
sdb: dbx,
// This is an arbitrary number.
serialRetryCount: 3,
}
for _, opt := range opts {
opt(q)
}
return q
}
// TxOptions is used to pass some execution metadata to the callers.
@@ -104,6 +118,10 @@ type querier interface {
type sqlQuerier struct {
sdb *sqlx.DB
db DBTX
// serialRetryCount is the number of times to retry a transaction
// if it fails with a serialization error.
serialRetryCount int
}
func (*sqlQuerier) Wrappers() []string {
@@ -143,11 +161,9 @@ func (q *sqlQuerier) InTx(function func(Store) error, txOpts *TxOptions) error {
// If we are in a transaction already, the parent InTx call will handle the retry.
// We do not want to duplicate those retries.
if !inTx && sqlOpts.Isolation == sql.LevelSerializable {
// This is an arbitrarily chosen number.
const retryAmount = 3
var err error
attempts := 0
for attempts = 0; attempts < retryAmount; attempts++ {
for attempts = 0; attempts < q.serialRetryCount; attempts++ {
txOpts.executionCount++
err = q.runTx(function, sqlOpts)
if err == nil {
@@ -203,3 +219,10 @@ func (q *sqlQuerier) runTx(function func(Store) error, txOpts *sql.TxOptions) er
}
return nil
}
func safeString(s *string) string {
if s == nil {
return "<nil>"
}
return *s
}
+4
View File
@@ -603,6 +603,10 @@ func (q *querier) Ping(ctx context.Context) (time.Duration, error) {
return q.db.Ping(ctx)
}
func (q *querier) PGLocks(ctx context.Context) (database.PGLocks, error) {
return q.db.PGLocks(ctx)
}
// InTx runs the given function in a transaction.
func (q *querier) InTx(function func(querier database.Store) error, txOpts *database.TxOptions) error {
return q.db.InTx(func(tx database.Store) error {
+4 -1
View File
@@ -152,7 +152,10 @@ func TestDBAuthzRecursive(t *testing.T) {
for i := 2; i < method.Type.NumIn(); i++ {
ins = append(ins, reflect.New(method.Type.In(i)).Elem())
}
if method.Name == "InTx" || method.Name == "Ping" || method.Name == "Wrappers" {
if method.Name == "InTx" ||
method.Name == "Ping" ||
method.Name == "Wrappers" ||
method.Name == "PGLocks" {
continue
}
// Log the name of the last method, so if there is a panic, it is
+1
View File
@@ -34,6 +34,7 @@ var errMatchAny = xerrors.New("match any error")
var skipMethods = map[string]string{
"InTx": "Not relevant",
"Ping": "Not relevant",
"PGLocks": "Not relevant",
"Wrappers": "Not relevant",
"AcquireLock": "Not relevant",
"TryAcquireLock": "Not relevant",
+127
View File
@@ -0,0 +1,127 @@
package dbfake
import (
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/testutil"
)
type OrganizationBuilder struct {
t *testing.T
db database.Store
seed database.Organization
allUsersAllowance int32
members []uuid.UUID
groups map[database.Group][]uuid.UUID
}
func Organization(t *testing.T, db database.Store) OrganizationBuilder {
return OrganizationBuilder{
t: t,
db: db,
members: []uuid.UUID{},
groups: make(map[database.Group][]uuid.UUID),
}
}
type OrganizationResponse struct {
Org database.Organization
AllUsersGroup database.Group
Members []database.OrganizationMember
Groups []database.Group
}
func (b OrganizationBuilder) EveryoneAllowance(allowance int) OrganizationBuilder {
//nolint: revive // returns modified struct
b.allUsersAllowance = int32(allowance)
return b
}
func (b OrganizationBuilder) Seed(seed database.Organization) OrganizationBuilder {
//nolint: revive // returns modified struct
b.seed = seed
return b
}
func (b OrganizationBuilder) Members(users ...database.User) OrganizationBuilder {
for _, u := range users {
//nolint: revive // returns modified struct
b.members = append(b.members, u.ID)
}
return b
}
func (b OrganizationBuilder) Group(seed database.Group, members ...database.User) OrganizationBuilder {
//nolint: revive // returns modified struct
b.groups[seed] = []uuid.UUID{}
for _, u := range members {
//nolint: revive // returns modified struct
b.groups[seed] = append(b.groups[seed], u.ID)
}
return b
}
func (b OrganizationBuilder) Do() OrganizationResponse {
org := dbgen.Organization(b.t, b.db, b.seed)
ctx := testutil.Context(b.t, testutil.WaitShort)
//nolint:gocritic // builder code needs perms
ctx = dbauthz.AsSystemRestricted(ctx)
everyone, err := b.db.InsertAllUsersGroup(ctx, org.ID)
require.NoError(b.t, err)
if b.allUsersAllowance > 0 {
everyone, err = b.db.UpdateGroupByID(ctx, database.UpdateGroupByIDParams{
Name: everyone.Name,
DisplayName: everyone.DisplayName,
AvatarURL: everyone.AvatarURL,
QuotaAllowance: b.allUsersAllowance,
ID: everyone.ID,
})
require.NoError(b.t, err)
}
members := make([]database.OrganizationMember, 0)
if len(b.members) > 0 {
for _, u := range b.members {
newMem := dbgen.OrganizationMember(b.t, b.db, database.OrganizationMember{
UserID: u,
OrganizationID: org.ID,
CreatedAt: dbtime.Now(),
UpdatedAt: dbtime.Now(),
Roles: nil,
})
members = append(members, newMem)
}
}
groups := make([]database.Group, 0)
if len(b.groups) > 0 {
for g, users := range b.groups {
g.OrganizationID = org.ID
group := dbgen.Group(b.t, b.db, g)
groups = append(groups, group)
for _, u := range users {
dbgen.GroupMember(b.t, b.db, database.GroupMemberTable{
UserID: u,
GroupID: group.ID,
})
}
}
}
return OrganizationResponse{
Org: org,
AllUsersGroup: everyone,
Members: members,
Groups: groups,
}
}
+3
View File
@@ -342,6 +342,7 @@ func User(t testing.TB, db database.Store, orig database.User) database.User {
UpdatedAt: takeFirst(orig.UpdatedAt, dbtime.Now()),
RBACRoles: takeFirstSlice(orig.RBACRoles, []string{}),
LoginType: takeFirst(orig.LoginType, database.LoginTypePassword),
Status: string(takeFirst(orig.Status, database.UserStatusDormant)),
})
require.NoError(t, err, "insert user")
@@ -407,6 +408,8 @@ func OrganizationMember(t testing.TB, db database.Store, orig database.Organizat
}
func Group(t testing.TB, db database.Store, orig database.Group) database.Group {
t.Helper()
name := takeFirst(orig.Name, testutil.GetRandomName(t))
group, err := db.InsertGroup(genCtx, database.InsertGroupParams{
ID: takeFirst(orig.ID, uuid.New()),
+11 -1
View File
@@ -339,6 +339,10 @@ func (*FakeQuerier) Ping(_ context.Context) (time.Duration, error) {
return 0, nil
}
func (*FakeQuerier) PGLocks(_ context.Context) (database.PGLocks, error) {
return []database.PGLock{}, nil
}
func (tx *fakeTx) AcquireLock(_ context.Context, id int64) error {
if _, ok := tx.FakeQuerier.locks[id]; ok {
return xerrors.Errorf("cannot acquire lock %d: already held", id)
@@ -7709,6 +7713,11 @@ func (q *FakeQuerier) InsertUser(_ context.Context, arg database.InsertUserParam
}
}
status := database.UserStatusDormant
if arg.Status != "" {
status = database.UserStatus(arg.Status)
}
user := database.User{
ID: arg.ID,
Email: arg.Email,
@@ -7717,7 +7726,7 @@ func (q *FakeQuerier) InsertUser(_ context.Context, arg database.InsertUserParam
UpdatedAt: arg.UpdatedAt,
Username: arg.Username,
Name: arg.Name,
Status: database.UserStatusDormant,
Status: status,
RBACRoles: arg.RBACRoles,
LoginType: arg.LoginType,
}
@@ -8640,6 +8649,7 @@ func (q *FakeQuerier) UpdateInactiveUsersToDormant(_ context.Context, params dat
updated = append(updated, database.UpdateInactiveUsersToDormantRow{
ID: user.ID,
Email: user.Email,
Username: user.Username,
LastSeenAt: user.LastSeenAt,
})
}
@@ -66,6 +66,13 @@ func (m queryMetricsStore) Ping(ctx context.Context) (time.Duration, error) {
return duration, err
}
func (m queryMetricsStore) PGLocks(ctx context.Context) (database.PGLocks, error) {
start := time.Now()
locks, err := m.s.PGLocks(ctx)
m.queryLatencies.WithLabelValues("PGLocks").Observe(time.Since(start).Seconds())
return locks, err
}
func (m queryMetricsStore) InTx(f func(database.Store) error, options *database.TxOptions) error {
return m.dbMetrics.InTx(f, options)
}
+15
View File
@@ -4299,6 +4299,21 @@ func (mr *MockStoreMockRecorder) OrganizationMembers(arg0, arg1 any) *gomock.Cal
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OrganizationMembers", reflect.TypeOf((*MockStore)(nil).OrganizationMembers), arg0, arg1)
}
// PGLocks mocks base method.
func (m *MockStore) PGLocks(arg0 context.Context) (database.PGLocks, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PGLocks", arg0)
ret0, _ := ret[0].(database.PGLocks)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PGLocks indicates an expected call of PGLocks.
func (mr *MockStoreMockRecorder) PGLocks(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PGLocks", reflect.TypeOf((*MockStore)(nil).PGLocks), arg0)
}
// Ping mocks base method.
func (m *MockStore) Ping(arg0 context.Context) (time.Duration, error) {
m.ctrl.T.Helper()
+2 -1
View File
@@ -135,7 +135,8 @@ func NewDB(t testing.TB, opts ...Option) (database.Store, pubsub.Pubsub) {
if o.dumpOnFailure {
t.Cleanup(func() { DumpOnFailure(t, connectionURL) })
}
db = database.New(sqlDB)
// Unit tests should not retry serial transaction failures.
db = database.New(sqlDB, database.WithSerialRetryCount(1))
ps, err = pubsub.New(context.Background(), o.logger, sqlDB, connectionURL)
require.NoError(t, err)
+73
View File
@@ -0,0 +1,73 @@
package dbtestutil
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/coderd/database"
)
type DBTx struct {
database.Store
mu sync.Mutex
done chan error
finalErr chan error
}
// StartTx starts a transaction and returns a DBTx object. This allows running
// 2 transactions concurrently in a test more easily.
// Example:
//
// a := StartTx(t, db, opts)
// b := StartTx(t, db, opts)
//
// a.GetUsers(...)
// b.GetUsers(...)
//
// require.NoError(t, a.Done()
func StartTx(t *testing.T, db database.Store, opts *database.TxOptions) *DBTx {
done := make(chan error)
finalErr := make(chan error)
txC := make(chan database.Store)
go func() {
t.Helper()
once := sync.Once{}
count := 0
err := db.InTx(func(store database.Store) error {
// InTx can be retried
once.Do(func() {
txC <- store
})
count++
if count > 1 {
// If you recursively call InTx, then don't use this.
t.Logf("InTx called more than once: %d", count)
assert.NoError(t, xerrors.New("InTx called more than once, this is not allowed with the StartTx helper"))
}
<-done
// Just return nil. The caller should be checking their own errors.
return nil
}, opts)
finalErr <- err
}()
txStore := <-txC
close(txC)
return &DBTx{Store: txStore, done: done, finalErr: finalErr}
}
// Done can only be called once. If you call it twice, it will panic.
func (tx *DBTx) Done() error {
tx.mu.Lock()
defer tx.mu.Unlock()
close(tx.done)
return <-tx.finalErr
}
+119
View File
@@ -0,0 +1,119 @@
package database
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"time"
"github.com/jmoiron/sqlx"
"github.com/coder/coder/v2/coderd/util/slice"
)
// PGLock docs see: https://www.postgresql.org/docs/current/view-pg-locks.html#VIEW-PG-LOCKS
type PGLock struct {
// LockType see: https://www.postgresql.org/docs/current/monitoring-stats.html#WAIT-EVENT-LOCK-TABLE
LockType *string `db:"locktype"`
Database *string `db:"database"` // oid
Relation *string `db:"relation"` // oid
RelationName *string `db:"relation_name"`
Page *int `db:"page"`
Tuple *int `db:"tuple"`
VirtualXID *string `db:"virtualxid"`
TransactionID *string `db:"transactionid"` // xid
ClassID *string `db:"classid"` // oid
ObjID *string `db:"objid"` // oid
ObjSubID *int `db:"objsubid"`
VirtualTransaction *string `db:"virtualtransaction"`
PID int `db:"pid"`
Mode *string `db:"mode"`
Granted bool `db:"granted"`
FastPath *bool `db:"fastpath"`
WaitStart *time.Time `db:"waitstart"`
}
func (l PGLock) Equal(b PGLock) bool {
// Lazy, but hope this works
return reflect.DeepEqual(l, b)
}
func (l PGLock) String() string {
granted := "granted"
if !l.Granted {
granted = "waiting"
}
var details string
switch safeString(l.LockType) {
case "relation":
details = ""
case "page":
details = fmt.Sprintf("page=%d", *l.Page)
case "tuple":
details = fmt.Sprintf("page=%d tuple=%d", *l.Page, *l.Tuple)
case "virtualxid":
details = "waiting to acquire virtual tx id lock"
default:
details = "???"
}
return fmt.Sprintf("%d-%5s [%s] %s/%s/%s: %s",
l.PID,
safeString(l.TransactionID),
granted,
safeString(l.RelationName),
safeString(l.LockType),
safeString(l.Mode),
details,
)
}
// PGLocks returns a list of all locks in the database currently in use.
func (q *sqlQuerier) PGLocks(ctx context.Context) (PGLocks, error) {
rows, err := q.sdb.QueryContext(ctx, `
SELECT
relation::regclass AS relation_name,
*
FROM pg_locks;
`)
if err != nil {
return nil, err
}
defer rows.Close()
var locks []PGLock
err = sqlx.StructScan(rows, &locks)
if err != nil {
return nil, err
}
return locks, err
}
type PGLocks []PGLock
func (l PGLocks) String() string {
// Try to group things together by relation name.
sort.Slice(l, func(i, j int) bool {
return safeString(l[i].RelationName) < safeString(l[j].RelationName)
})
var out strings.Builder
for i, lock := range l {
if i != 0 {
_, _ = out.WriteString("\n")
}
_, _ = out.WriteString(lock.String())
}
return out.String()
}
// Difference returns the difference between two sets of locks.
// This is helpful to determine what changed between the two sets.
func (l PGLocks) Difference(to PGLocks) (new PGLocks, removed PGLocks) {
return slice.SymmetricDifferenceFunc(l, to, func(a, b PGLock) bool {
return a.Equal(b)
})
}
+36 -13
View File
@@ -6736,23 +6736,33 @@ const getQuotaConsumedForUser = `-- name: GetQuotaConsumedForUser :one
WITH latest_builds AS (
SELECT
DISTINCT ON
(workspace_id) id,
workspace_id,
daily_cost
(wb.workspace_id) wb.workspace_id,
wb.daily_cost
FROM
workspace_builds wb
-- This INNER JOIN prevents a seq scan of the workspace_builds table.
-- Limit the rows to the absolute minimum required, which is all workspaces
-- in a given organization for a given user.
INNER JOIN
workspaces on wb.workspace_id = workspaces.id
WHERE
workspaces.owner_id = $1 AND
workspaces.organization_id = $2
ORDER BY
workspace_id,
created_at DESC
wb.workspace_id,
wb.created_at DESC
)
SELECT
coalesce(SUM(daily_cost), 0)::BIGINT
FROM
workspaces
JOIN latest_builds ON
INNER JOIN latest_builds ON
latest_builds.workspace_id = workspaces.id
WHERE NOT
deleted AND
WHERE
NOT deleted AND
-- We can likely remove these conditions since we check above.
-- But it does not hurt to be defensive and make sure future query changes
-- do not break anything.
workspaces.owner_id = $1 AND
workspaces.organization_id = $2
`
@@ -10345,10 +10355,15 @@ INSERT INTO
created_at,
updated_at,
rbac_roles,
login_type
login_type,
status
)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at
($1, $2, $3, $4, $5, $6, $7, $8, $9,
-- if the status passed in is empty, fallback to dormant, which is what
-- we were doing before.
COALESCE(NULLIF($10::text, '')::user_status, 'dormant'::user_status)
) RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at
`
type InsertUserParams struct {
@@ -10361,6 +10376,7 @@ type InsertUserParams struct {
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
RBACRoles pq.StringArray `db:"rbac_roles" json:"rbac_roles"`
LoginType LoginType `db:"login_type" json:"login_type"`
Status string `db:"status" json:"status"`
}
func (q *sqlQuerier) InsertUser(ctx context.Context, arg InsertUserParams) (User, error) {
@@ -10374,6 +10390,7 @@ func (q *sqlQuerier) InsertUser(ctx context.Context, arg InsertUserParams) (User
arg.UpdatedAt,
arg.RBACRoles,
arg.LoginType,
arg.Status,
)
var i User
err := row.Scan(
@@ -10408,7 +10425,7 @@ SET
WHERE
last_seen_at < $2 :: timestamp
AND status = 'active'::user_status
RETURNING id, email, last_seen_at
RETURNING id, email, username, last_seen_at
`
type UpdateInactiveUsersToDormantParams struct {
@@ -10419,6 +10436,7 @@ type UpdateInactiveUsersToDormantParams struct {
type UpdateInactiveUsersToDormantRow struct {
ID uuid.UUID `db:"id" json:"id"`
Email string `db:"email" json:"email"`
Username string `db:"username" json:"username"`
LastSeenAt time.Time `db:"last_seen_at" json:"last_seen_at"`
}
@@ -10431,7 +10449,12 @@ func (q *sqlQuerier) UpdateInactiveUsersToDormant(ctx context.Context, arg Updat
var items []UpdateInactiveUsersToDormantRow
for rows.Next() {
var i UpdateInactiveUsersToDormantRow
if err := rows.Scan(&i.ID, &i.Email, &i.LastSeenAt); err != nil {
if err := rows.Scan(
&i.ID,
&i.Email,
&i.Username,
&i.LastSeenAt,
); err != nil {
return nil, err
}
items = append(items, i)
@@ -14947,7 +14970,7 @@ WHERE
-- Filter by owner_name
AND CASE
WHEN $8 :: text != '' THEN
workspaces.owner_id = (SELECT id FROM users WHERE lower(owner_username) = lower($8) AND deleted = false)
workspaces.owner_id = (SELECT id FROM users WHERE lower(users.username) = lower($8) AND deleted = false)
ELSE true
END
-- Filter by template_name
+18 -8
View File
@@ -18,23 +18,33 @@ INNER JOIN groups ON
WITH latest_builds AS (
SELECT
DISTINCT ON
(workspace_id) id,
workspace_id,
daily_cost
(wb.workspace_id) wb.workspace_id,
wb.daily_cost
FROM
workspace_builds wb
-- This INNER JOIN prevents a seq scan of the workspace_builds table.
-- Limit the rows to the absolute minimum required, which is all workspaces
-- in a given organization for a given user.
INNER JOIN
workspaces on wb.workspace_id = workspaces.id
WHERE
workspaces.owner_id = @owner_id AND
workspaces.organization_id = @organization_id
ORDER BY
workspace_id,
created_at DESC
wb.workspace_id,
wb.created_at DESC
)
SELECT
coalesce(SUM(daily_cost), 0)::BIGINT
FROM
workspaces
JOIN latest_builds ON
INNER JOIN latest_builds ON
latest_builds.workspace_id = workspaces.id
WHERE NOT
deleted AND
WHERE
NOT deleted AND
-- We can likely remove these conditions since we check above.
-- But it does not hurt to be defensive and make sure future query changes
-- do not break anything.
workspaces.owner_id = @owner_id AND
workspaces.organization_id = @organization_id
;
+8 -3
View File
@@ -67,10 +67,15 @@ INSERT INTO
created_at,
updated_at,
rbac_roles,
login_type
login_type,
status
)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING *;
($1, $2, $3, $4, $5, $6, $7, $8, $9,
-- if the status passed in is empty, fallback to dormant, which is what
-- we were doing before.
COALESCE(NULLIF(@status::text, '')::user_status, 'dormant'::user_status)
) RETURNING *;
-- name: UpdateUserProfile :one
UPDATE
@@ -286,7 +291,7 @@ SET
WHERE
last_seen_at < @last_seen_after :: timestamp
AND status = 'active'::user_status
RETURNING id, email, last_seen_at;
RETURNING id, email, username, last_seen_at;
-- AllUserIDs returns all UserIDs regardless of user status or deletion.
-- name: AllUserIDs :many
+1 -1
View File
@@ -233,7 +233,7 @@ WHERE
-- Filter by owner_name
AND CASE
WHEN @owner_username :: text != '' THEN
workspaces.owner_id = (SELECT id FROM users WHERE lower(owner_username) = lower(@owner_username) AND deleted = false)
workspaces.owner_id = (SELECT id FROM users WHERE lower(users.username) = lower(@owner_username) AND deleted = false)
ELSE true
END
-- Filter by template_name
+9 -9
View File
@@ -82,6 +82,7 @@ const (
type ExtractAPIKeyConfig struct {
DB database.Store
ActivateDormantUser func(ctx context.Context, u database.User) (database.User, error)
OAuth2Configs *OAuth2Configs
RedirectToLogin bool
DisableSessionExpiryRefresh bool
@@ -414,21 +415,20 @@ func ExtractAPIKey(rw http.ResponseWriter, r *http.Request, cfg ExtractAPIKeyCon
})
}
if userStatus == database.UserStatusDormant {
// If coder confirms that the dormant user is valid, it can switch their account to active.
// nolint:gocritic
u, err := cfg.DB.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{
ID: key.UserID,
Status: database.UserStatusActive,
UpdatedAt: dbtime.Now(),
if userStatus == database.UserStatusDormant && cfg.ActivateDormantUser != nil {
id, _ := uuid.Parse(actor.ID)
user, err := cfg.ActivateDormantUser(ctx, database.User{
ID: id,
Username: actor.FriendlyName,
Status: userStatus,
})
if err != nil {
return write(http.StatusInternalServerError, codersdk.Response{
Message: internalErrorMessage,
Detail: fmt.Sprintf("can't activate a dormant user: %s", err.Error()),
Detail: fmt.Sprintf("update user status: %s", err.Error()),
})
}
userStatus = u.Status
userStatus = user.Status
}
if userStatus != database.UserStatusActive {
+2 -2
View File
@@ -453,7 +453,7 @@ func (s *SMTPHandler) auth(ctx context.Context, mechs string) (sasl.Client, erro
continue
}
if password == "" {
errs = multierror.Append(errs, xerrors.New("cannot use PLAIN auth, password not defined (see CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD)"))
errs = multierror.Append(errs, xerrors.New("cannot use PLAIN auth, password not defined (see CODER_EMAIL_AUTH_PASSWORD)"))
continue
}
@@ -475,7 +475,7 @@ func (s *SMTPHandler) auth(ctx context.Context, mechs string) (sasl.Client, erro
continue
}
if password == "" {
errs = multierror.Append(errs, xerrors.New("cannot use LOGIN auth, password not defined (see CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD)"))
errs = multierror.Append(errs, xerrors.New("cannot use LOGIN auth, password not defined (see CODER_EMAIL_AUTH_PASSWORD)"))
continue
}
+55 -1
View File
@@ -12,6 +12,7 @@ import (
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/xerrors"
"tailscale.com/tailcfg"
"cdr.dev/slog"
@@ -22,12 +23,13 @@ import (
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/quartz"
)
const defaultRefreshRate = time.Minute
// ActiveUsers tracks the number of users that have authenticated within the past hour.
func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
func ActiveUsers(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
if duration == 0 {
duration = defaultRefreshRate
}
@@ -58,6 +60,7 @@ func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db datab
apiKeys, err := db.GetAPIKeysLastUsedAfter(ctx, dbtime.Now().Add(-1*time.Hour))
if err != nil {
logger.Error(ctx, "get api keys for active users prometheus metric", slog.Error(err))
continue
}
distinctUsers := map[uuid.UUID]struct{}{}
@@ -73,6 +76,57 @@ func ActiveUsers(ctx context.Context, registerer prometheus.Registerer, db datab
}, nil
}
// Users tracks the total number of registered users, partitioned by status.
func Users(ctx context.Context, logger slog.Logger, clk quartz.Clock, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
if duration == 0 {
// It's not super important this tracks real-time.
duration = defaultRefreshRate * 5
}
gauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "coderd",
Subsystem: "api",
Name: "total_user_count",
Help: "The total number of registered users, partitioned by status.",
}, []string{"status"})
err := registerer.Register(gauge)
if err != nil {
return nil, xerrors.Errorf("register total_user_count gauge: %w", err)
}
ctx, cancelFunc := context.WithCancel(ctx)
done := make(chan struct{})
ticker := clk.NewTicker(duration)
go func() {
defer close(done)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
}
gauge.Reset()
//nolint:gocritic // This is a system service that needs full access
//to the users table.
users, err := db.GetUsers(dbauthz.AsSystemRestricted(ctx), database.GetUsersParams{})
if err != nil {
logger.Error(ctx, "get all users for prometheus metrics", slog.Error(err))
continue
}
for _, user := range users {
gauge.WithLabelValues(string(user.Status)).Inc()
}
}
}()
return func() {
cancelFunc()
<-done
}, nil
}
// Workspaces tracks the total number of workspaces with labels on status.
func Workspaces(ctx context.Context, logger slog.Logger, registerer prometheus.Registerer, db database.Store, duration time.Duration) (func(), error) {
if duration == 0 {
@@ -38,6 +38,7 @@ import (
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/tailnettest"
"github.com/coder/coder/v2/testutil"
"github.com/coder/quartz"
)
func TestActiveUsers(t *testing.T) {
@@ -98,7 +99,7 @@ func TestActiveUsers(t *testing.T) {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
registry := prometheus.NewRegistry()
closeFunc, err := prometheusmetrics.ActiveUsers(context.Background(), registry, tc.Database(t), time.Millisecond)
closeFunc, err := prometheusmetrics.ActiveUsers(context.Background(), slogtest.Make(t, nil), registry, tc.Database(t), time.Millisecond)
require.NoError(t, err)
t.Cleanup(closeFunc)
@@ -112,6 +113,100 @@ func TestActiveUsers(t *testing.T) {
}
}
func TestUsers(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
Name string
Database func(t *testing.T) database.Store
Count map[database.UserStatus]int
}{{
Name: "None",
Database: func(t *testing.T) database.Store {
return dbmem.New()
},
Count: map[database.UserStatus]int{},
}, {
Name: "One",
Database: func(t *testing.T) database.Store {
db := dbmem.New()
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
return db
},
Count: map[database.UserStatus]int{database.UserStatusActive: 1},
}, {
Name: "MultipleStatuses",
Database: func(t *testing.T) database.Store {
db := dbmem.New()
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
dbgen.User(t, db, database.User{Status: database.UserStatusDormant})
return db
},
Count: map[database.UserStatus]int{database.UserStatusActive: 1, database.UserStatusDormant: 1},
}, {
Name: "MultipleActive",
Database: func(t *testing.T) database.Store {
db := dbmem.New()
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
dbgen.User(t, db, database.User{Status: database.UserStatusActive})
return db
},
Count: map[database.UserStatus]int{database.UserStatusActive: 3},
}} {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel()
registry := prometheus.NewRegistry()
mClock := quartz.NewMock(t)
db := tc.Database(t)
closeFunc, err := prometheusmetrics.Users(context.Background(), slogtest.Make(t, nil), mClock, registry, db, time.Millisecond)
require.NoError(t, err)
t.Cleanup(closeFunc)
_, w := mClock.AdvanceNext()
w.MustWait(ctx)
checkFn := func() bool {
metrics, err := registry.Gather()
if err != nil {
return false
}
// If we get no metrics and we know none should exist, bail
// early. If we get no metrics but we expect some, retry.
if len(metrics) == 0 {
return len(tc.Count) == 0
}
for _, metric := range metrics[0].Metric {
if tc.Count[database.UserStatus(*metric.Label[0].Value)] != int(metric.Gauge.GetValue()) {
return false
}
}
return true
}
require.Eventually(t, checkFn, testutil.WaitShort, testutil.IntervalFast)
// Add another dormant user and ensure it updates
dbgen.User(t, db, database.User{Status: database.UserStatusDormant})
tc.Count[database.UserStatusDormant]++
_, w = mClock.AdvanceNext()
w.MustWait(ctx)
require.Eventually(t, checkFn, testutil.WaitShort, testutil.IntervalFast)
})
}
}
func TestWorkspaceLatestBuildTotals(t *testing.T) {
t.Parallel()
+7 -3
View File
@@ -523,8 +523,8 @@ func TestAcquirer_MatchTags(t *testing.T) {
// Generate a table that can be copy-pasted into docs/admin/provisioners.md
lines := []string{
"\n",
"| Provisioner Tags | Job Tags | Can Run Job? |",
"|------------------|----------|--------------|",
"| Provisioner Tags | Job Tags | Same Org | Can Run Job? |",
"|------------------|----------|----------|--------------|",
}
// turn the JSON map into k=v for readability
kvs := func(m map[string]string) string {
@@ -539,10 +539,14 @@ func TestAcquirer_MatchTags(t *testing.T) {
}
for _, tt := range testCases {
acquire := "✅"
sameOrg := "✅"
if !tt.expectAcquire {
acquire = "❌"
}
s := fmt.Sprintf("| %s | %s | %s |", kvs(tt.acquireJobTags), kvs(tt.provisionerJobTags), acquire)
if tt.unmatchedOrg {
sameOrg = "❌"
}
s := fmt.Sprintf("| %s | %s | %s | %s |", kvs(tt.acquireJobTags), kvs(tt.provisionerJobTags), sameOrg, acquire)
lines = append(lines, s)
}
t.Logf("You can paste this into docs/admin/provisioners.md")
@@ -1063,6 +1063,7 @@ func (s *server) FailJob(ctx context.Context, failJob *proto.FailedJob) (*proto.
wriBytes, err := json.Marshal(buildResourceInfo)
if err != nil {
s.Logger.Error(ctx, "marshal workspace resource info for failed job", slog.Error(err))
wriBytes = []byte("{}")
}
bag := audit.BaggageFromContext(ctx)
+67 -17
View File
@@ -12,6 +12,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/coreos/go-oidc/v3/oidc"
@@ -27,6 +28,7 @@ import (
"github.com/coder/coder/v2/coderd/cryptokeys"
"github.com/coder/coder/v2/coderd/idpsync"
"github.com/coder/coder/v2/coderd/jwtutils"
"github.com/coder/coder/v2/coderd/util/ptr"
"github.com/coder/coder/v2/coderd/apikey"
"github.com/coder/coder/v2/coderd/audit"
@@ -565,20 +567,13 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co
return user, rbac.Subject{}, false
}
if user.Status == database.UserStatusDormant {
//nolint:gocritic // System needs to update status of the user account (dormant -> active).
user, err = api.Database.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{
ID: user.ID,
Status: database.UserStatusActive,
UpdatedAt: dbtime.Now(),
user, err = ActivateDormantUser(api.Logger, &api.Auditor, api.Database)(ctx, user)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error.",
Detail: err.Error(),
})
if err != nil {
logger.Error(ctx, "unable to update user status to active", slog.Error(err))
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error occurred. Try again later, or contact an admin for assistance.",
})
return user, rbac.Subject{}, false
}
return user, rbac.Subject{}, false
}
subject, userStatus, err := httpmw.UserRBACSubject(ctx, api.Database, user.ID, rbac.ScopeAll)
@@ -601,6 +596,42 @@ func (api *API) loginRequest(ctx context.Context, rw http.ResponseWriter, req co
return user, subject, true
}
func ActivateDormantUser(logger slog.Logger, auditor *atomic.Pointer[audit.Auditor], db database.Store) func(ctx context.Context, user database.User) (database.User, error) {
return func(ctx context.Context, user database.User) (database.User, error) {
if user.ID == uuid.Nil || user.Status != database.UserStatusDormant {
return user, nil
}
//nolint:gocritic // System needs to update status of the user account (dormant -> active).
newUser, err := db.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{
ID: user.ID,
Status: database.UserStatusActive,
UpdatedAt: dbtime.Now(),
})
if err != nil {
logger.Error(ctx, "unable to update user status to active", slog.Error(err))
return user, xerrors.Errorf("update user status: %w", err)
}
oldAuditUser := user
newAuditUser := user
newAuditUser.Status = database.UserStatusActive
audit.BackgroundAudit(ctx, &audit.BackgroundAuditParams[database.User]{
Audit: *auditor.Load(),
Log: logger,
UserID: user.ID,
Action: database.AuditActionWrite,
Old: oldAuditUser,
New: newAuditUser,
Status: http.StatusOK,
AdditionalFields: audit.BackgroundTaskFieldsBytes(ctx, logger, audit.BackgroundSubsystemDormancy),
})
return newUser, nil
}
}
// Clear the user's session cookie.
//
// @Summary Log out user
@@ -1385,10 +1416,22 @@ func (p *oauthLoginParams) CommitAuditLogs() {
func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.Cookie, database.User, database.APIKey, error) {
var (
ctx = r.Context()
user database.User
cookies []*http.Cookie
logger = api.Logger.Named(userAuthLoggerName)
ctx = r.Context()
user database.User
cookies []*http.Cookie
logger = api.Logger.Named(userAuthLoggerName)
auditor = *api.Auditor.Load()
dormantConvertAudit *audit.Request[database.User]
initDormantAuditOnce = sync.OnceFunc(func() {
dormantConvertAudit = params.initAuditRequest(&audit.RequestParams{
Audit: auditor,
Log: api.Logger,
Request: r,
Action: database.AuditActionWrite,
OrganizationID: uuid.Nil,
AdditionalFields: audit.BackgroundTaskFields(audit.BackgroundSubsystemDormancy),
})
})
)
var isConvertLoginType bool
@@ -1490,6 +1533,7 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C
Email: params.Email,
Username: params.Username,
OrganizationIDs: orgIDs,
UserStatus: ptr.Ref(codersdk.UserStatusActive),
},
LoginType: params.LoginType,
accountCreatorName: "oauth",
@@ -1501,6 +1545,11 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C
// Activate dormant user on sign-in
if user.Status == database.UserStatusDormant {
// This is necessary because transactions can be retried, and we
// only want to add the audit log a single time.
initDormantAuditOnce()
dormantConvertAudit.UserID = user.ID
dormantConvertAudit.Old = user
//nolint:gocritic // System needs to update status of the user account (dormant -> active).
user, err = tx.UpdateUserStatus(dbauthz.AsSystemRestricted(ctx), database.UpdateUserStatusParams{
ID: user.ID,
@@ -1511,6 +1560,7 @@ func (api *API) oauthLogin(r *http.Request, params *oauthLoginParams) ([]*http.C
logger.Error(ctx, "unable to update user status to active", slog.Error(err))
return xerrors.Errorf("update user status: %w", err)
}
dormantConvertAudit.New = user
}
debugContext, err := json.Marshal(params.DebugContext)
+44 -1
View File
@@ -1285,7 +1285,7 @@ func TestUserOIDC(t *testing.T) {
tc.AssertResponse(t, resp)
}
ctx := testutil.Context(t, testutil.WaitLong)
ctx := testutil.Context(t, testutil.WaitShort)
if tc.AssertUser != nil {
user, err := client.User(ctx, "me")
@@ -1300,6 +1300,49 @@ func TestUserOIDC(t *testing.T) {
})
}
t.Run("OIDCDormancy", func(t *testing.T) {
t.Parallel()
ctx := testutil.Context(t, testutil.WaitShort)
auditor := audit.NewMock()
fake := oidctest.NewFakeIDP(t,
oidctest.WithRefresh(func(_ string) error {
return xerrors.New("refreshing token should never occur")
}),
oidctest.WithServing(),
)
cfg := fake.OIDCConfig(t, nil, func(cfg *coderd.OIDCConfig) {
cfg.AllowSignups = true
})
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
owner, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
Auditor: auditor,
OIDCConfig: cfg,
Logger: &logger,
})
user := dbgen.User(t, db, database.User{
LoginType: database.LoginTypeOIDC,
Status: database.UserStatusDormant,
})
auditor.ResetLogs()
client, resp := fake.AttemptLogin(t, owner, jwt.MapClaims{
"email": user.Email,
})
require.Equal(t, http.StatusOK, resp.StatusCode)
auditor.Contains(t, database.AuditLog{
ResourceType: database.ResourceTypeUser,
AdditionalFields: json.RawMessage(`{"automatic_actor":"coder","automatic_subsystem":"dormancy"}`),
})
me, err := client.User(ctx, "me")
require.NoError(t, err)
require.Equal(t, codersdk.UserStatusActive, me.Status)
})
t.Run("OIDCConvert", func(t *testing.T) {
t.Parallel()
+13 -4
View File
@@ -28,6 +28,7 @@ import (
"github.com/coder/coder/v2/coderd/searchquery"
"github.com/coder/coder/v2/coderd/telemetry"
"github.com/coder/coder/v2/coderd/userpassword"
"github.com/coder/coder/v2/coderd/util/ptr"
"github.com/coder/coder/v2/coderd/util/slice"
"github.com/coder/coder/v2/codersdk"
)
@@ -188,10 +189,13 @@ func (api *API) postFirstUser(rw http.ResponseWriter, r *http.Request) {
//nolint:gocritic // needed to create first user
user, err := api.CreateUser(dbauthz.AsSystemRestricted(ctx), api.Database, CreateUserRequest{
CreateUserRequestWithOrgs: codersdk.CreateUserRequestWithOrgs{
Email: createUser.Email,
Username: createUser.Username,
Name: createUser.Name,
Password: createUser.Password,
Email: createUser.Email,
Username: createUser.Username,
Name: createUser.Name,
Password: createUser.Password,
// There's no reason to create the first user as dormant, since you have
// to login immediately anyways.
UserStatus: ptr.Ref(codersdk.UserStatusActive),
OrganizationIDs: []uuid.UUID{defaultOrg.ID},
},
LoginType: database.LoginTypePassword,
@@ -1343,6 +1347,10 @@ func (api *API) CreateUser(ctx context.Context, store database.Store, req Create
err := store.InTx(func(tx database.Store) error {
orgRoles := make([]string, 0)
status := ""
if req.UserStatus != nil {
status = string(*req.UserStatus)
}
params := database.InsertUserParams{
ID: uuid.New(),
Email: req.Email,
@@ -1354,6 +1362,7 @@ func (api *API) CreateUser(ctx context.Context, store database.Store, req Create
// All new users are defaulted to members of the site.
RBACRoles: []string{},
LoginType: req.LoginType,
Status: status,
}
// If a user signs up with OAuth, they can have no password!
if req.Password != "" {
+36
View File
@@ -30,6 +30,7 @@ import (
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/rbac"
"github.com/coder/coder/v2/coderd/util/ptr"
"github.com/coder/coder/v2/coderd/util/slice"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/testutil"
@@ -695,6 +696,41 @@ func TestPostUsers(t *testing.T) {
})
require.NoError(t, err)
// User should default to dormant.
require.Equal(t, codersdk.UserStatusDormant, user.Status)
require.Len(t, auditor.AuditLogs(), numLogs)
require.Equal(t, database.AuditActionCreate, auditor.AuditLogs()[numLogs-1].Action)
require.Equal(t, database.AuditActionLogin, auditor.AuditLogs()[numLogs-2].Action)
require.Len(t, user.OrganizationIDs, 1)
assert.Equal(t, firstUser.OrganizationID, user.OrganizationIDs[0])
})
t.Run("CreateWithStatus", func(t *testing.T) {
t.Parallel()
auditor := audit.NewMock()
client := coderdtest.New(t, &coderdtest.Options{Auditor: auditor})
numLogs := len(auditor.AuditLogs())
firstUser := coderdtest.CreateFirstUser(t, client)
numLogs++ // add an audit log for user create
numLogs++ // add an audit log for login
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
user, err := client.CreateUserWithOrgs(ctx, codersdk.CreateUserRequestWithOrgs{
OrganizationIDs: []uuid.UUID{firstUser.OrganizationID},
Email: "another@user.org",
Username: "someone-else",
Password: "SomeSecurePassword!",
UserStatus: ptr.Ref(codersdk.UserStatusActive),
})
require.NoError(t, err)
require.Equal(t, codersdk.UserStatusActive, user.Status)
require.Len(t, auditor.AuditLogs(), numLogs)
require.Equal(t, database.AuditActionCreate, auditor.AuditLogs()[numLogs-1].Action)
require.Equal(t, database.AuditActionLogin, auditor.AuditLogs()[numLogs-2].Action)
+33
View File
@@ -1313,6 +1313,39 @@ func TestWorkspaceFilterManual(t *testing.T) {
require.NoError(t, err)
require.Len(t, res.Workspaces, 0)
})
t.Run("Owner", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
user := coderdtest.CreateFirstUser(t, client)
otherUser, _ := coderdtest.CreateAnotherUser(t, client, user.OrganizationID, rbac.RoleOwner())
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
// Add a non-matching workspace
coderdtest.CreateWorkspace(t, otherUser, template.ID)
workspaces := []codersdk.Workspace{
coderdtest.CreateWorkspace(t, client, template.ID),
coderdtest.CreateWorkspace(t, client, template.ID),
}
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
sdkUser, err := client.User(ctx, codersdk.Me)
require.NoError(t, err)
// match owner name
res, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
FilterQuery: fmt.Sprintf("owner:%s", sdkUser.Username),
})
require.NoError(t, err)
require.Len(t, res.Workspaces, len(workspaces))
for _, found := range res.Workspaces {
require.Equal(t, found.OwnerName, sdkUser.Username)
}
})
t.Run("IDs", func(t *testing.T) {
t.Parallel()
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
+175 -3
View File
@@ -926,6 +926,23 @@ when required by your organization's security policy.`,
Name: "Config",
Description: `Use a YAML configuration file when your server launch become unwieldy.`,
}
deploymentGroupEmail = serpent.Group{
Name: "Email",
Description: "Configure how emails are sent.",
YAML: "email",
}
deploymentGroupEmailAuth = serpent.Group{
Name: "Email Authentication",
Parent: &deploymentGroupEmail,
Description: "Configure SMTP authentication options.",
YAML: "emailAuth",
}
deploymentGroupEmailTLS = serpent.Group{
Name: "Email TLS",
Parent: &deploymentGroupEmail,
Description: "Configure TLS for your SMTP server target.",
YAML: "emailTLS",
}
deploymentGroupNotifications = serpent.Group{
Name: "Notifications",
YAML: "notifications",
@@ -997,6 +1014,135 @@ when required by your organization's security policy.`,
Group: &deploymentGroupIntrospectionLogging,
YAML: "filter",
}
emailFrom := serpent.Option{
Name: "Email: From Address",
Description: "The sender's address to use.",
Flag: "email-from",
Env: "CODER_EMAIL_FROM",
Value: &c.Notifications.SMTP.From,
Group: &deploymentGroupEmail,
YAML: "from",
}
emailSmarthost := serpent.Option{
Name: "Email: Smarthost",
Description: "The intermediary SMTP host through which emails are sent.",
Flag: "email-smarthost",
Env: "CODER_EMAIL_SMARTHOST",
Default: "localhost:587", // To pass validation.
Value: &c.Notifications.SMTP.Smarthost,
Group: &deploymentGroupEmail,
YAML: "smarthost",
}
emailHello := serpent.Option{
Name: "Email: Hello",
Description: "The hostname identifying the SMTP server.",
Flag: "email-hello",
Env: "CODER_EMAIL_HELLO",
Default: "localhost",
Value: &c.Notifications.SMTP.Hello,
Group: &deploymentGroupEmail,
YAML: "hello",
}
emailForceTLS := serpent.Option{
Name: "Email: Force TLS",
Description: "Force a TLS connection to the configured SMTP smarthost.",
Flag: "email-force-tls",
Env: "CODER_EMAIL_FORCE_TLS",
Default: "false",
Value: &c.Notifications.SMTP.ForceTLS,
Group: &deploymentGroupEmail,
YAML: "forceTLS",
}
emailAuthIdentity := serpent.Option{
Name: "Email Auth: Identity",
Description: "Identity to use with PLAIN authentication.",
Flag: "email-auth-identity",
Env: "CODER_EMAIL_AUTH_IDENTITY",
Value: &c.Notifications.SMTP.Auth.Identity,
Group: &deploymentGroupEmailAuth,
YAML: "identity",
}
emailAuthUsername := serpent.Option{
Name: "Email Auth: Username",
Description: "Username to use with PLAIN/LOGIN authentication.",
Flag: "email-auth-username",
Env: "CODER_EMAIL_AUTH_USERNAME",
Value: &c.Notifications.SMTP.Auth.Username,
Group: &deploymentGroupEmailAuth,
YAML: "username",
}
emailAuthPassword := serpent.Option{
Name: "Email Auth: Password",
Description: "Password to use with PLAIN/LOGIN authentication.",
Flag: "email-auth-password",
Env: "CODER_EMAIL_AUTH_PASSWORD",
Annotations: serpent.Annotations{}.Mark(annotationSecretKey, "true"),
Value: &c.Notifications.SMTP.Auth.Password,
Group: &deploymentGroupEmailAuth,
}
emailAuthPasswordFile := serpent.Option{
Name: "Email Auth: Password File",
Description: "File from which to load password for use with PLAIN/LOGIN authentication.",
Flag: "email-auth-password-file",
Env: "CODER_EMAIL_AUTH_PASSWORD_FILE",
Value: &c.Notifications.SMTP.Auth.PasswordFile,
Group: &deploymentGroupEmailAuth,
YAML: "passwordFile",
}
emailTLSStartTLS := serpent.Option{
Name: "Email TLS: StartTLS",
Description: "Enable STARTTLS to upgrade insecure SMTP connections using TLS.",
Flag: "email-tls-starttls",
Env: "CODER_EMAIL_TLS_STARTTLS",
Value: &c.Notifications.SMTP.TLS.StartTLS,
Group: &deploymentGroupEmailTLS,
YAML: "startTLS",
}
emailTLSServerName := serpent.Option{
Name: "Email TLS: Server Name",
Description: "Server name to verify against the target certificate.",
Flag: "email-tls-server-name",
Env: "CODER_EMAIL_TLS_SERVERNAME",
Value: &c.Notifications.SMTP.TLS.ServerName,
Group: &deploymentGroupEmailTLS,
YAML: "serverName",
}
emailTLSSkipCertVerify := serpent.Option{
Name: "Email TLS: Skip Certificate Verification (Insecure)",
Description: "Skip verification of the target server's certificate (insecure).",
Flag: "email-tls-skip-verify",
Env: "CODER_EMAIL_TLS_SKIPVERIFY",
Value: &c.Notifications.SMTP.TLS.InsecureSkipVerify,
Group: &deploymentGroupEmailTLS,
YAML: "insecureSkipVerify",
}
emailTLSCertAuthorityFile := serpent.Option{
Name: "Email TLS: Certificate Authority File",
Description: "CA certificate file to use.",
Flag: "email-tls-ca-cert-file",
Env: "CODER_EMAIL_TLS_CACERTFILE",
Value: &c.Notifications.SMTP.TLS.CAFile,
Group: &deploymentGroupEmailTLS,
YAML: "caCertFile",
}
emailTLSCertFile := serpent.Option{
Name: "Email TLS: Certificate File",
Description: "Certificate file to use.",
Flag: "email-tls-cert-file",
Env: "CODER_EMAIL_TLS_CERTFILE",
Value: &c.Notifications.SMTP.TLS.CertFile,
Group: &deploymentGroupEmailTLS,
YAML: "certFile",
}
emailTLSCertKeyFile := serpent.Option{
Name: "Email TLS: Certificate Key File",
Description: "Certificate key file to use.",
Flag: "email-tls-cert-key-file",
Env: "CODER_EMAIL_TLS_CERTKEYFILE",
Value: &c.Notifications.SMTP.TLS.KeyFile,
Group: &deploymentGroupEmailTLS,
YAML: "certKeyFile",
}
opts := serpent.OptionSet{
{
Name: "Access URL",
@@ -2432,6 +2578,21 @@ Write out the current server config as YAML to stdout.`,
YAML: "thresholdDatabase",
Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"),
},
// Email options
emailFrom,
emailSmarthost,
emailHello,
emailForceTLS,
emailAuthIdentity,
emailAuthUsername,
emailAuthPassword,
emailAuthPasswordFile,
emailTLSStartTLS,
emailTLSServerName,
emailTLSSkipCertVerify,
emailTLSCertAuthorityFile,
emailTLSCertFile,
emailTLSCertKeyFile,
// Notifications Options
{
Name: "Notifications: Method",
@@ -2462,36 +2623,37 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.From,
Group: &deploymentGroupNotificationsEmail,
YAML: "from",
UseInstead: serpent.OptionSet{emailFrom},
},
{
Name: "Notifications: Email: Smarthost",
Description: "The intermediary SMTP host through which emails are sent.",
Flag: "notifications-email-smarthost",
Env: "CODER_NOTIFICATIONS_EMAIL_SMARTHOST",
Default: "localhost:587", // To pass validation.
Value: &c.Notifications.SMTP.Smarthost,
Group: &deploymentGroupNotificationsEmail,
YAML: "smarthost",
UseInstead: serpent.OptionSet{emailSmarthost},
},
{
Name: "Notifications: Email: Hello",
Description: "The hostname identifying the SMTP server.",
Flag: "notifications-email-hello",
Env: "CODER_NOTIFICATIONS_EMAIL_HELLO",
Default: "localhost",
Value: &c.Notifications.SMTP.Hello,
Group: &deploymentGroupNotificationsEmail,
YAML: "hello",
UseInstead: serpent.OptionSet{emailHello},
},
{
Name: "Notifications: Email: Force TLS",
Description: "Force a TLS connection to the configured SMTP smarthost.",
Flag: "notifications-email-force-tls",
Env: "CODER_NOTIFICATIONS_EMAIL_FORCE_TLS",
Default: "false",
Value: &c.Notifications.SMTP.ForceTLS,
Group: &deploymentGroupNotificationsEmail,
YAML: "forceTLS",
UseInstead: serpent.OptionSet{emailForceTLS},
},
{
Name: "Notifications: Email Auth: Identity",
@@ -2501,6 +2663,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.Auth.Identity,
Group: &deploymentGroupNotificationsEmailAuth,
YAML: "identity",
UseInstead: serpent.OptionSet{emailAuthIdentity},
},
{
Name: "Notifications: Email Auth: Username",
@@ -2510,6 +2673,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.Auth.Username,
Group: &deploymentGroupNotificationsEmailAuth,
YAML: "username",
UseInstead: serpent.OptionSet{emailAuthUsername},
},
{
Name: "Notifications: Email Auth: Password",
@@ -2519,6 +2683,7 @@ Write out the current server config as YAML to stdout.`,
Annotations: serpent.Annotations{}.Mark(annotationSecretKey, "true"),
Value: &c.Notifications.SMTP.Auth.Password,
Group: &deploymentGroupNotificationsEmailAuth,
UseInstead: serpent.OptionSet{emailAuthPassword},
},
{
Name: "Notifications: Email Auth: Password File",
@@ -2528,6 +2693,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.Auth.PasswordFile,
Group: &deploymentGroupNotificationsEmailAuth,
YAML: "passwordFile",
UseInstead: serpent.OptionSet{emailAuthPasswordFile},
},
{
Name: "Notifications: Email TLS: StartTLS",
@@ -2537,6 +2703,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.TLS.StartTLS,
Group: &deploymentGroupNotificationsEmailTLS,
YAML: "startTLS",
UseInstead: serpent.OptionSet{emailTLSStartTLS},
},
{
Name: "Notifications: Email TLS: Server Name",
@@ -2546,6 +2713,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.TLS.ServerName,
Group: &deploymentGroupNotificationsEmailTLS,
YAML: "serverName",
UseInstead: serpent.OptionSet{emailTLSServerName},
},
{
Name: "Notifications: Email TLS: Skip Certificate Verification (Insecure)",
@@ -2555,6 +2723,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.TLS.InsecureSkipVerify,
Group: &deploymentGroupNotificationsEmailTLS,
YAML: "insecureSkipVerify",
UseInstead: serpent.OptionSet{emailTLSSkipCertVerify},
},
{
Name: "Notifications: Email TLS: Certificate Authority File",
@@ -2564,6 +2733,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.TLS.CAFile,
Group: &deploymentGroupNotificationsEmailTLS,
YAML: "caCertFile",
UseInstead: serpent.OptionSet{emailTLSCertAuthorityFile},
},
{
Name: "Notifications: Email TLS: Certificate File",
@@ -2573,6 +2743,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.TLS.CertFile,
Group: &deploymentGroupNotificationsEmailTLS,
YAML: "certFile",
UseInstead: serpent.OptionSet{emailTLSCertFile},
},
{
Name: "Notifications: Email TLS: Certificate Key File",
@@ -2582,6 +2753,7 @@ Write out the current server config as YAML to stdout.`,
Value: &c.Notifications.SMTP.TLS.KeyFile,
Group: &deploymentGroupNotificationsEmailTLS,
YAML: "certKeyFile",
UseInstead: serpent.OptionSet{emailTLSCertKeyFile},
},
{
Name: "Notifications: Webhook: Endpoint",
+3
View File
@@ -78,6 +78,9 @@ func TestDeploymentValues_HighlyConfigurable(t *testing.T) {
"Provisioner Daemon Pre-shared Key (PSK)": {
yaml: true,
},
"Email Auth: Password": {
yaml: true,
},
"Notifications: Email Auth: Password": {
yaml: true,
},
+2
View File
@@ -139,6 +139,8 @@ type CreateUserRequestWithOrgs struct {
Password string `json:"password"`
// UserLoginType defaults to LoginTypePassword.
UserLoginType LoginType `json:"login_type"`
// UserStatus defaults to UserStatusDormant.
UserStatus *UserStatus `json:"user_status"`
// OrganizationIDs is a list of organization IDs that the user should be a member of.
OrganizationIDs []uuid.UUID `json:"organization_ids" validate:"" format:"uuid"`
}
+28 -28
View File
@@ -89,34 +89,34 @@ existing one.
**Server Settings:**
| Required | CLI | Env | Type | Description | Default |
| :------: | --------------------------------- | ------------------------------------- | ----------- | ----------------------------------------- | ------------- |
| ✔️ | `--notifications-email-from` | `CODER_NOTIFICATIONS_EMAIL_FROM` | `string` | The sender's address to use. | |
| ✔️ | `--notifications-email-smarthost` | `CODER_NOTIFICATIONS_EMAIL_SMARTHOST` | `host:port` | The SMTP relay to send messages through. | localhost:587 |
| ✔️ | `--notifications-email-hello` | `CODER_NOTIFICATIONS_EMAIL_HELLO` | `string` | The hostname identifying the SMTP server. | localhost |
| Required | CLI | Env | Type | Description | Default |
| :------: | ------------------- | ----------------------- | ----------- | ----------------------------------------- | ------------- |
| ✔️ | `--email-from` | `CODER_EMAIL_FROM` | `string` | The sender's address to use. | |
| ✔️ | `--email-smarthost` | `CODER_EMAIL_SMARTHOST` | `host:port` | The SMTP relay to send messages through. | localhost:587 |
| ✔️ | `--email-hello` | `CODER_EMAIL_HELLO` | `string` | The hostname identifying the SMTP server. | localhost |
**Authentication Settings:**
| Required | CLI | Env | Type | Description |
| :------: | ------------------------------------------ | ---------------------------------------------- | -------- | ------------------------------------------------------------------------- |
| - | `--notifications-email-auth-username` | `CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME` | `string` | Username to use with PLAIN/LOGIN authentication. |
| - | `--notifications-email-auth-password` | `CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD` | `string` | Password to use with PLAIN/LOGIN authentication. |
| - | `--notifications-email-auth-password-file` | `CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD_FILE` | `string` | File from which to load password for use with PLAIN/LOGIN authentication. |
| - | `--notifications-email-auth-identity` | `CODER_NOTIFICATIONS_EMAIL_AUTH_IDENTITY` | `string` | Identity to use with PLAIN authentication. |
| Required | CLI | Env | Type | Description |
| :------: | ---------------------------- | -------------------------------- | -------- | ------------------------------------------------------------------------- |
| - | `--email-auth-username` | `CODER_EMAIL_AUTH_USERNAME` | `string` | Username to use with PLAIN/LOGIN authentication. |
| - | `--email-auth-password` | `CODER_EMAIL_AUTH_PASSWORD` | `string` | Password to use with PLAIN/LOGIN authentication. |
| - | `--email-auth-password-file` | `CODER_EMAIL_AUTH_PASSWORD_FILE` | `string` | File from which to load password for use with PLAIN/LOGIN authentication. |
| - | `--email-auth-identity` | `CODER_EMAIL_AUTH_IDENTITY` | `string` | Identity to use with PLAIN authentication. |
**TLS Settings:**
| Required | CLI | Env | Type | Description | Default |
| :------: | ----------------------------------------- | ------------------------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| - | `--notifications-email-force-tls` | `CODER_NOTIFICATIONS_EMAIL_FORCE_TLS` | `bool` | Force a TLS connection to the configured SMTP smarthost. If port 465 is used, TLS will be forced. See https://datatracker.ietf.org/doc/html/rfc8314#section-3.3. | false |
| - | `--notifications-email-tls-starttls` | `CODER_NOTIFICATIONS_EMAIL_TLS_STARTTLS` | `bool` | Enable STARTTLS to upgrade insecure SMTP connections using TLS. Ignored if `CODER_NOTIFICATIONS_EMAIL_FORCE_TLS` is set. | false |
| - | `--notifications-email-tls-skip-verify` | `CODER_NOTIFICATIONS_EMAIL_TLS_SKIPVERIFY` | `bool` | Skip verification of the target server's certificate (**insecure**). | false |
| - | `--notifications-email-tls-server-name` | `CODER_NOTIFICATIONS_EMAIL_TLS_SERVERNAME` | `string` | Server name to verify against the target certificate. | |
| - | `--notifications-email-tls-cert-file` | `CODER_NOTIFICATIONS_EMAIL_TLS_CERTFILE` | `string` | Certificate file to use. | |
| - | `--notifications-email-tls-cert-key-file` | `CODER_NOTIFICATIONS_EMAIL_TLS_CERTKEYFILE` | `string` | Certificate key file to use. | |
| Required | CLI | Env | Type | Description | Default |
| :------: | --------------------------- | ----------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| - | `--email-force-tls` | `CODER_EMAIL_FORCE_TLS` | `bool` | Force a TLS connection to the configured SMTP smarthost. If port 465 is used, TLS will be forced. See https://datatracker.ietf.org/doc/html/rfc8314#section-3.3. | false |
| - | `--email-tls-starttls` | `CODER_EMAIL_TLS_STARTTLS` | `bool` | Enable STARTTLS to upgrade insecure SMTP connections using TLS. Ignored if `CODER_NOTIFICATIONS_EMAIL_FORCE_TLS` is set. | false |
| - | `--email-tls-skip-verify` | `CODER_EMAIL_TLS_SKIPVERIFY` | `bool` | Skip verification of the target server's certificate (**insecure**). | false |
| - | `--email-tls-server-name` | `CODER_EMAIL_TLS_SERVERNAME` | `string` | Server name to verify against the target certificate. | |
| - | `--email-tls-cert-file` | `CODER_EMAIL_TLS_CERTFILE` | `string` | Certificate file to use. | |
| - | `--email-tls-cert-key-file` | `CODER_EMAIL_TLS_CERTKEYFILE` | `string` | Certificate key file to use. | |
**NOTE:** you _MUST_ use `CODER_NOTIFICATIONS_EMAIL_FORCE_TLS` if your smarthost
supports TLS on a port other than `465`.
**NOTE:** you _MUST_ use `CODER_EMAIL_FORCE_TLS` if your smarthost supports TLS
on a port other than `465`.
### Send emails using G-Suite
@@ -126,9 +126,9 @@ After setting the required fields above:
account you wish to send from
2. Set the following configuration options:
```
CODER_NOTIFICATIONS_EMAIL_SMARTHOST=smtp.gmail.com:465
CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME=<user>@<domain>
CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD="<app password created above>"
CODER_EMAIL_SMARTHOST=smtp.gmail.com:465
CODER_EMAIL_AUTH_USERNAME=<user>@<domain>
CODER_EMAIL_AUTH_PASSWORD="<app password created above>"
```
See
@@ -142,10 +142,10 @@ After setting the required fields above:
1. Setup an account on Microsoft 365 or outlook.com
2. Set the following configuration options:
```
CODER_NOTIFICATIONS_EMAIL_SMARTHOST=smtp-mail.outlook.com:587
CODER_NOTIFICATIONS_EMAIL_TLS_STARTTLS=true
CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME=<user>@<domain>
CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD="<account password>"
CODER_EMAIL_SMARTHOST=smtp-mail.outlook.com:587
CODER_EMAIL_TLS_STARTTLS=true
CODER_EMAIL_AUTH_USERNAME=<user>@<domain>
CODER_EMAIL_AUTH_PASSWORD="<account password>"
```
See
+37 -49
View File
@@ -178,7 +178,8 @@ A provisioner can run a given build job if one of the below is true:
1. If a job has any explicit tags, it can only run on a provisioner with those
explicit tags (the provisioner could have additional tags).
The external provisioner in the above example can run build jobs with tags:
The external provisioner in the above example can run build jobs in the same
organization with tags:
- `environment=on_prem`
- `datacenter=chicago`
@@ -186,7 +187,8 @@ The external provisioner in the above example can run build jobs with tags:
However, it will not pick up any build jobs that do not have either of the
`environment` or `datacenter` tags set. It will also not pick up any build jobs
from templates with the tag `scope=user` set.
from templates with the tag `scope=user` set, or build jobs from templates in
different organizations.
> [!NOTE] If you only run tagged provisioners, you will need to specify a set of
> tags that matches at least one provisioner for _all_ template import jobs and
@@ -198,34 +200,35 @@ from templates with the tag `scope=user` set.
This is illustrated in the below table:
| Provisioner Tags | Job Tags | Can Run Job? |
| ----------------------------------------------------------------- | ---------------------------------------------------------------- | ------------ |
| scope=organization owner= | scope=organization owner= | ✅ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ✅ |
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem | ✅ |
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem datacenter=chicago | ✅ |
| scope=user owner=aaa | scope=user owner=aaa | ✅ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa | ✅ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem | ✅ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem | ✅ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ |
| scope=organization owner= | scope=organization owner= environment=on-prem | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem datacenter=chicago | ❌ |
| scope=organization owner= environment=on-prem datacenter=new_york | scope=organization owner= environment=on-prem datacenter=chicago | ❌ |
| scope=user owner=aaa | scope=organization owner= | ❌ |
| scope=user owner=aaa | scope=user owner=bbb | ❌ |
| scope=organization owner= | scope=user owner=aaa | ❌ |
| scope=organization owner= | scope=user owner=aaa environment=on-prem | ❌ |
| scope=user owner=aaa | scope=user owner=aaa environment=on-prem | ❌ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem datacenter=chicago | ❌ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=new_york | ❌ |
| Provisioner Tags | Job Tags | Same Org | Can Run Job? |
| ----------------------------------------------------------------- | ---------------------------------------------------------------- | -------- | ------------ |
| scope=organization owner= | scope=organization owner= | ✅ | ✅ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ✅ | ✅ |
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem | ✅ | ✅ |
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ✅ |
| scope=user owner=aaa | scope=user owner=aaa | ✅ | ✅ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa | ✅ | ✅ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem | ✅ | ✅ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem | ✅ | ✅ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ | ✅ |
| scope=organization owner= | scope=organization owner= environment=on-prem | ✅ | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= | ✅ | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ❌ |
| scope=organization owner= environment=on-prem datacenter=new_york | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ❌ |
| scope=user owner=aaa | scope=organization owner= | ✅ | ❌ |
| scope=user owner=aaa | scope=user owner=bbb | ✅ | ❌ |
| scope=organization owner= | scope=user owner=aaa | ✅ | ❌ |
| scope=organization owner= | scope=user owner=aaa environment=on-prem | ✅ | ❌ |
| scope=user owner=aaa | scope=user owner=aaa environment=on-prem | ✅ | ❌ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ | ❌ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=new_york | ✅ | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ❌ | ❌ |
> **Note to maintainers:** to generate this table, run the following command and
> copy the output:
>
> ```
> go test -v -count=1 ./coderd/provisionerserver/ -test.run='^TestAcquirer_MatchTags/GenTable$'
> go test -v -count=1 ./coderd/provisionerdserver/ -test.run='^TestAcquirer_MatchTags/GenTable$'
> ```
## Types of provisioners
@@ -288,8 +291,7 @@ will use in concert with the Helm chart for deploying the Coder server.
```sh
coder provisioner keys create my-cool-key --org default
# Optionally, you can specify tags for the provisioner key:
# coder provisioner keys create my-cool-key --org default --tags location=auh kind=k8s
```
# coder provisioner keys create my-cool-key --org default --tag location=auh --tag kind=k8s
Successfully created provisioner key kubernetes-key! Save this authentication
token, it will not be shown again.
@@ -300,25 +302,7 @@ will use in concert with the Helm chart for deploying the Coder server.
1. Store the key in a kubernetes secret:
```sh
kubectl create secret generic coder-provisioner-psk --from-literal=key1=`<key omitted>`
```
1. Modify your Coder `values.yaml` to include
```yaml
provisionerDaemon:
keySecretName: "coder-provisioner-keys"
keySecretKey: "key1"
```
1. Redeploy Coder with the new `values.yaml` to roll out the PSK. You can omit
`--version <your version>` to also upgrade Coder to the latest version.
```sh
helm upgrade coder coder-v2/coder \
--namespace coder \
--version <your version> \
--values values.yaml
kubectl create secret generic coder-provisioner-psk --from-literal=my-cool-key=`<key omitted>`
```
1. Create a `provisioner-values.yaml` file for the provisioner daemons Helm
@@ -331,13 +315,17 @@ will use in concert with the Helm chart for deploying the Coder server.
value: "https://coder.example.com"
replicaCount: 10
provisionerDaemon:
# NOTE: in older versions of the Helm chart (2.17.0 and below), it is required to set this to an empty string.
pskSecretName: ""
keySecretName: "coder-provisioner-keys"
keySecretKey: "key1"
keySecretKey: "my-cool-key"
```
This example creates a deployment of 10 provisioner daemons (for 10
concurrent builds) with the listed tags. For generic provisioners, remove the
tags.
concurrent builds) authenticating using the above key. The daemons will
authenticate using the provisioner key created in the previous step and
acquire jobs matching the tags specified when the provisioner key was
created. The set of tags is inferred automatically from the provisioner key.
> Refer to the
> [values.yaml](https://github.com/coder/coder/blob/main/helm/provisioner/values.yaml)
+14
View File
@@ -154,3 +154,17 @@ the top of the script to exit on error.
> **Note:** If you aren't seeing any logs, check that the `dir` directive points
> to a valid directory in the file system.
## Slow workspace startup times
If your workspaces are taking longer to start than expected, or longer than
desired, you can diagnose which steps have the highest impact in the workspace
build timings UI (available in v2.17 and beyond). Admins can can
programmatically pull startup times for individual workspace builds using our
[build timings API endpoint](../../reference/api/builds.md#get-workspace-build-timings-by-id).
See our
[guide on optimizing workspace build times](../../tutorials/best-practices/speed-up-templates.md)
to optimize your templates based on this data.
![Workspace build timings UI](../../images/admin/templates/troubleshooting/workspace-build-timings-ui.png)
+43
View File
@@ -31,6 +31,49 @@ Roles determine which actions users can take within the platform.
A user may have one or more roles. All users have an implicit Member role that
may use personal workspaces.
## Custom Roles (Premium) (Beta)
Starting in v2.16.0, Premium Coder deployments can configure custom roles on the
[Organization](./organizations.md) level. You can create and assign custom roles
in the dashboard under **Organizations** -> **My Organization** -> **Roles**.
> Note: This requires a Premium license.
> [Contact your account team](https://coder.com/contact) for more details.
![Custom roles](../../images/admin/users/roles/custom-roles.PNG)
### Example roles
- The `Banking Compliance Auditor` custom role cannot create workspaces, but can
read template source code and view audit logs
- The `Organization Lead` role can access user workspaces for troubleshooting
purposes, but cannot edit templates
- The `Platform Member` role cannot edit or create workspaces as they are
created via a third-party system
Custom roles can also be applied to
[headless user accounts](./headless-auth.md):
- A `Health Check` role can view deployment status but cannot create workspaces,
manage templates, or view users
- A `CI` role can update manage templates but cannot create workspaces or view
users
### Creating custom roles
Clicking "Create custom role" opens a UI to select the desired permissions for a
given persona.
![Creating a custom role](../../images/admin/users/roles/creating-custom-role.PNG)
From there, you can assign the custom role to any user in the organization under
the **Users** settings in the dashboard.
![Assigning a custom role](../../images/admin/users/roles/assigning-custom-role.PNG)
Note that these permissions only apply to the scope of an
[organization](./organizations.md), not across the deployment.
### Security notes
A malicious Template Admin could write a template that executes commands on the
+6 -1
View File
@@ -143,7 +143,12 @@ Confirm the user activation by typing **yes** and pressing **enter**.
## Reset a password
To reset a user's via the web UI:
As of 2.17.0, users can reset their password independently on the login screen
by clicking "Forgot Password." This feature requires
[email notifications](../monitoring/notifications/index.md#smtp-email) to be
configured on the deployment.
To reset a user's password as an administrator via the web UI:
1. Go to **Users**.
2. Find the user whose password you want to reset, click the vertical ellipsis
Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

+29 -17
View File
@@ -1,6 +1,6 @@
# Install Coder on Kubernetes
You can install Coder on Kubernetes using Helm. We run on most Kubernetes
You can install Coder on Kubernetes (K8s) using Helm. We run on most Kubernetes
distributions, including [OpenShift](./openshift.md).
## Requirements
@@ -121,27 +121,27 @@ coder:
We support two release channels: mainline and stable - read the
[Releases](./releases.md) page to learn more about which best suits your team.
For the **mainline** Coder release:
- **Mainline** Coder release:
<!-- autoversion(mainline): "--version [version]" -->
<!-- autoversion(mainline): "--version [version]" -->
```shell
helm install coder coder-v2/coder \
--namespace coder \
--values values.yaml \
--version 2.15.0
```
```shell
helm install coder coder-v2/coder \
--namespace coder \
--values values.yaml \
--version 2.15.0
```
For the **stable** Coder release:
- **Stable** Coder release:
<!-- autoversion(stable): "--version [version]" -->
<!-- autoversion(stable): "--version [version]" -->
```shell
helm install coder coder-v2/coder \
--namespace coder \
--values values.yaml \
--version 2.15.1
```
```shell
helm install coder coder-v2/coder \
--namespace coder \
--values values.yaml \
--version 2.15.1
```
You can watch Coder start up by running `kubectl get pods -n coder`. Once Coder
has started, the `coder-*` pods should enter the `Running` state.
@@ -167,6 +167,18 @@ helm upgrade coder coder-v2/coder \
-f values.yaml
```
## Coder Observability Chart
Use the [Observability Helm chart](https://github.com/coder/observability) for a
pre-built set of dashboards to monitor your control plane over time. It includes
Grafana, Prometheus, Loki, and Alert Manager out-of-the-box, and can be deployed
on your existing Grafana instance.
We recommend that all administrators deploying on Kubernetes set the
observability bundle up with the control plane from the start. For installation
instructions, visit the
[observability repository](https://github.com/coder/observability?tab=readme-ov-file#installation).
## Kubernetes Security Reference
Below are common requirements we see from our enterprise customers when
+12
View File
@@ -723,6 +723,18 @@
"title": "FAQs",
"description": "Miscellaneous FAQs from our community",
"path": "./tutorials/faqs.md"
},
{
"title": "Best practices",
"description": "Guides to help you make the most of your Coder experience",
"path": "./tutorials/best-practices/index.md",
"children": [
{
"title": "Speed up your workspaces",
"description": "Speed up your Coder templates and workspaces",
"path": "./tutorials/best-practices/speed-up-templates.md"
}
]
}
]
},
+10 -8
View File
@@ -1342,20 +1342,22 @@ AuthorizationObject can represent a "set" of objects, such as: all workspaces in
"name": "string",
"organization_ids": ["497f6eca-6276-4993-bfeb-53cbbbba6f08"],
"password": "string",
"user_status": "active",
"username": "string"
}
```
### Properties
| Name | Type | Required | Restrictions | Description |
| ------------------ | ---------------------------------------- | -------- | ------------ | ----------------------------------------------------------------------------------- |
| `email` | string | true | | |
| `login_type` | [codersdk.LoginType](#codersdklogintype) | false | | Login type defaults to LoginTypePassword. |
| `name` | string | false | | |
| `organization_ids` | array of string | false | | Organization ids is a list of organization IDs that the user should be a member of. |
| `password` | string | false | | |
| `username` | string | true | | |
| Name | Type | Required | Restrictions | Description |
| ------------------ | ------------------------------------------ | -------- | ------------ | ----------------------------------------------------------------------------------- |
| `email` | string | true | | |
| `login_type` | [codersdk.LoginType](#codersdklogintype) | false | | Login type defaults to LoginTypePassword. |
| `name` | string | false | | |
| `organization_ids` | array of string | false | | Organization ids is a list of organization IDs that the user should be a member of. |
| `password` | string | false | | |
| `user_status` | [codersdk.UserStatus](#codersdkuserstatus) | false | | User status defaults to UserStatusDormant. |
| `username` | string | true | | |
## codersdk.CreateWorkspaceBuildRequest
+1
View File
@@ -86,6 +86,7 @@ curl -X POST http://coder-server:8080/api/v2/users \
"name": "string",
"organization_ids": ["497f6eca-6276-4993-bfeb-53cbbbba6f08"],
"password": "string",
"user_status": "active",
"username": "string"
}
```
+142 -3
View File
@@ -1249,6 +1249,148 @@ Refresh interval for healthchecks.
The threshold for the database health check. If the median latency of the database exceeds this threshold over 5 attempts, the database is considered unhealthy. The default value is 15ms.
### --email-from
| | |
| ----------- | ------------------------------ |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_FROM</code> |
| YAML | <code>email.from</code> |
The sender's address to use.
### --email-smarthost
| | |
| ----------- | ----------------------------------- |
| Type | <code>host:port</code> |
| Environment | <code>$CODER_EMAIL_SMARTHOST</code> |
| YAML | <code>email.smarthost</code> |
| Default | <code>localhost:587</code> |
The intermediary SMTP host through which emails are sent.
### --email-hello
| | |
| ----------- | ------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_HELLO</code> |
| YAML | <code>email.hello</code> |
| Default | <code>localhost</code> |
The hostname identifying the SMTP server.
### --email-force-tls
| | |
| ----------- | ----------------------------------- |
| Type | <code>bool</code> |
| Environment | <code>$CODER_EMAIL_FORCE_TLS</code> |
| YAML | <code>email.forceTLS</code> |
| Default | <code>false</code> |
Force a TLS connection to the configured SMTP smarthost.
### --email-auth-identity
| | |
| ----------- | --------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_AUTH_IDENTITY</code> |
| YAML | <code>email.emailAuth.identity</code> |
Identity to use with PLAIN authentication.
### --email-auth-username
| | |
| ----------- | --------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_AUTH_USERNAME</code> |
| YAML | <code>email.emailAuth.username</code> |
Username to use with PLAIN/LOGIN authentication.
### --email-auth-password
| | |
| ----------- | --------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_AUTH_PASSWORD</code> |
Password to use with PLAIN/LOGIN authentication.
### --email-auth-password-file
| | |
| ----------- | -------------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_AUTH_PASSWORD_FILE</code> |
| YAML | <code>email.emailAuth.passwordFile</code> |
File from which to load password for use with PLAIN/LOGIN authentication.
### --email-tls-starttls
| | |
| ----------- | -------------------------------------- |
| Type | <code>bool</code> |
| Environment | <code>$CODER_EMAIL_TLS_STARTTLS</code> |
| YAML | <code>email.emailTLS.startTLS</code> |
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
### --email-tls-server-name
| | |
| ----------- | ---------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_TLS_SERVERNAME</code> |
| YAML | <code>email.emailTLS.serverName</code> |
Server name to verify against the target certificate.
### --email-tls-skip-verify
| | |
| ----------- | ---------------------------------------------- |
| Type | <code>bool</code> |
| Environment | <code>$CODER_EMAIL_TLS_SKIPVERIFY</code> |
| YAML | <code>email.emailTLS.insecureSkipVerify</code> |
Skip verification of the target server's certificate (insecure).
### --email-tls-ca-cert-file
| | |
| ----------- | ---------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_TLS_CACERTFILE</code> |
| YAML | <code>email.emailTLS.caCertFile</code> |
CA certificate file to use.
### --email-tls-cert-file
| | |
| ----------- | -------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_TLS_CERTFILE</code> |
| YAML | <code>email.emailTLS.certFile</code> |
Certificate file to use.
### --email-tls-cert-key-file
| | |
| ----------- | ----------------------------------------- |
| Type | <code>string</code> |
| Environment | <code>$CODER_EMAIL_TLS_CERTKEYFILE</code> |
| YAML | <code>email.emailTLS.certKeyFile</code> |
Certificate key file to use.
### --notifications-method
| | |
@@ -1288,7 +1430,6 @@ The sender's address to use.
| Type | <code>host:port</code> |
| Environment | <code>$CODER_NOTIFICATIONS_EMAIL_SMARTHOST</code> |
| YAML | <code>notifications.email.smarthost</code> |
| Default | <code>localhost:587</code> |
The intermediary SMTP host through which emails are sent.
@@ -1299,7 +1440,6 @@ The intermediary SMTP host through which emails are sent.
| Type | <code>string</code> |
| Environment | <code>$CODER_NOTIFICATIONS_EMAIL_HELLO</code> |
| YAML | <code>notifications.email.hello</code> |
| Default | <code>localhost</code> |
The hostname identifying the SMTP server.
@@ -1310,7 +1450,6 @@ The hostname identifying the SMTP server.
| Type | <code>bool</code> |
| Environment | <code>$CODER_NOTIFICATIONS_EMAIL_FORCE_TLS</code> |
| YAML | <code>notifications.email.forceTLS</code> |
| Default | <code>false</code> |
Force a TLS connection to the configured SMTP smarthost.
+5
View File
@@ -0,0 +1,5 @@
# Best practices
Guides to help you make the most of your Coder experience.
<children></children>
@@ -0,0 +1,143 @@
# Speed up your Coder templates and workspaces
October 31, 2024
---
If it takes your workspace a long time to start, find out why and make some
changes to your Coder templates to help speed things up.
## Monitoring
You can monitor [Coder logs](../../admin/monitoring/logs.md) through the
system-native tools on your deployment platform, or stream logs to tools like
Splunk, Datadog, Grafana Loki, and others.
### Workspace build timeline
Use the **Build timeline** to monitor the time it takes to start specific
workspaces. Identify long scripts, resources, and other things you can
potentially optimize within the template.
![Screenshot of a workspace and its build timeline](../../images/best-practice/build-timeline.png)
Adjust this request to match your Coder access URL and workspace:
```shell
curl -X GET https://coder.example.com/api/v2/workspacebuilds/{workspacebuild}/timings \
-H 'Accept: application/json' \
-H 'Coder-Session-Token: API_KEY'
```
Visit the
[API documentation](../../reference/api/builds.md#get-workspace-build-timings-by-id)
for more information.
### Coder Observability Chart
Use the [Observability Helm chart](https://github.com/coder/observability) for a
pre-built set of dashboards to monitor your control plane over time. It includes
Grafana, Prometheus, Loki, and Alert Manager out-of-the-box, and can be deployed
on your existing Grafana instance.
We recommend that all administrators deploying on Kubernetes or on an existing
Prometheus or Grafana stack set the observability bundle up with the control
plane from the start. For installation instructions, visit the
[observability repository](https://github.com/coder/observability?tab=readme-ov-file#installation),
or our [Kubernetes installation guide](../../install/kubernetes.md).
### Enable Prometheus metrics for Coder
[Prometheus.io](https://prometheus.io/docs/introduction/overview/#what-is-prometheus)
is included as part of the [observability chart](#coder-observability-chart). It
offers a variety of
[available metrics](../../admin/integrations/prometheus.md#available-metrics),
such as `coderd_provisionerd_job_timings_seconds` and
`coderd_agentstats_startup_script_seconds`, which measure how long the workspace
takes to provision and how long the startup script takes.
You can
[install it separately](https://prometheus.io/docs/prometheus/latest/getting_started/)
if you prefer.
## Provisioners
`coder server` defaults to three provisioner daemons. Each provisioner daemon
can handle one single job, such as start, stop, or delete at a time and can be
resource intensive. When all provisioners are busy, workspaces enter a "pending"
state until a provisioner becomes available.
### Increase provisioner daemons
Provisioners are queue-based to reduce unpredictable load to the Coder server.
However, they can be scaled up to allow more concurrent provisioners. You risk
overloading the central Coder server if you use too many built-in provisioners,
so we recommend a maximum of five provisioners. For more than five provisioners,
we recommend that you move to
[external provisioners](../../admin/provisioners.md).
If you cant move to external provisioners, use the `provisioner-daemons` flag
to increase the number of provisioner daemons to five:
```shell
coder server --provisioner-daemons=5
```
Visit the
[CLI documentation](../../reference/cli/server.md#--provisioner-daemons) for
more information about increasing provisioner daemons, configuring external
provisioners, and other options.
### Adjust provisioner CPU/memory
We recommend that you deploy Coder to its own respective Kubernetes cluster,
separate from production applications. Keep in mind that Coder runs development
workloads, so the cluster should be deployed as such, without production-level
configurations.
Adjust the CPU and memory values as shown in
[Helm provisioner values.yaml](https://github.com/coder/coder/blob/main/helm/provisioner/values.yaml#L134-L141):
```yaml
resources:
limits:
cpu: "0.25"
memory: "1Gi"
requests:
cpu: "0.25"
memory: "1Gi"
```
Visit the
[validated architecture documentation](../../admin/infrastructure/validated-architectures/index.md#workspace-nodes)
for more information.
## Set up Terraform provider caching
By default, Coder downloads each Terraform provider when a workspace starts.
This can create unnecessary network and disk I/O.
`terraform init` generates a `.terraform.lock.hcl` which instructs Coder
provisioners to cache specific versions of your providers.
To use `terraform init` to cache providers:
1. Pull the templates to your local device:
```shell
coder templates pull
```
1. Run `terraform init` to initialize the directory:
```shell
terraform init
```
1. Push the templates back to your Coder deployment:
```shell
coder templates push
```
+13
View File
@@ -109,6 +109,19 @@ your template's Terraform file and the target resources on your infrastructure.
Unhealthy workspaces are usually caused by a misconfiguration in the agent or
workspace startup scripts.
## Workspace build times
After a successful build, you can see a timing breakdown of the workspace
startup process from the dashboard (starting in v2.17). We capture and display
both time taken to provision the workspace's compute and agent startup steps.
These include any
[`coder_script`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/script)s
such as [dotfiles](./workspace-dotfiles.md) or
[`coder_app`](https://registry.terraform.io/providers/coder/coder/latest/docs/resources/app)
startups.
![Workspace build timings UI](../images/admin/templates/troubleshooting/workspace-build-timings-ui.png)
### Next steps
- [Connecting to your workspace](./index.md)
+2 -1
View File
@@ -23,6 +23,7 @@ import (
"github.com/coder/coder/v2/enterprise/dbcrypt"
"github.com/coder/coder/v2/enterprise/trialer"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/quartz"
"github.com/coder/serpent"
agplcoderd "github.com/coder/coder/v2/coderd"
@@ -95,7 +96,7 @@ func (r *RootCmd) Server(_ func()) *serpent.Command {
DefaultQuietHoursSchedule: options.DeploymentValues.UserQuietHoursSchedule.DefaultSchedule.Value(),
ProvisionerDaemonPSK: options.DeploymentValues.Provisioner.DaemonPSK.Value(),
CheckInactiveUsersCancelFunc: dormancy.CheckInactiveUsers(ctx, options.Logger, options.Database),
CheckInactiveUsersCancelFunc: dormancy.CheckInactiveUsers(ctx, options.Logger, quartz.NewReal(), options.Database, options.Auditor),
}
if encKeys := options.DeploymentValues.ExternalTokenEncryptionKeys.Value(); len(encKeys) != 0 {
+69 -3
View File
@@ -107,6 +107,58 @@ Use a YAML configuration file when your server launch become unwieldy.
Write out the current server config as YAML to stdout.
EMAIL OPTIONS:
Configure how emails are sent.
--email-force-tls bool, $CODER_EMAIL_FORCE_TLS (default: false)
Force a TLS connection to the configured SMTP smarthost.
--email-from string, $CODER_EMAIL_FROM
The sender's address to use.
--email-hello string, $CODER_EMAIL_HELLO (default: localhost)
The hostname identifying the SMTP server.
--email-smarthost host:port, $CODER_EMAIL_SMARTHOST (default: localhost:587)
The intermediary SMTP host through which emails are sent.
EMAIL / EMAIL AUTHENTICATION OPTIONS:
Configure SMTP authentication options.
--email-auth-identity string, $CODER_EMAIL_AUTH_IDENTITY
Identity to use with PLAIN authentication.
--email-auth-password string, $CODER_EMAIL_AUTH_PASSWORD
Password to use with PLAIN/LOGIN authentication.
--email-auth-password-file string, $CODER_EMAIL_AUTH_PASSWORD_FILE
File from which to load password for use with PLAIN/LOGIN
authentication.
--email-auth-username string, $CODER_EMAIL_AUTH_USERNAME
Username to use with PLAIN/LOGIN authentication.
EMAIL / EMAIL TLS OPTIONS:
Configure TLS for your SMTP server target.
--email-tls-ca-cert-file string, $CODER_EMAIL_TLS_CACERTFILE
CA certificate file to use.
--email-tls-cert-file string, $CODER_EMAIL_TLS_CERTFILE
Certificate file to use.
--email-tls-cert-key-file string, $CODER_EMAIL_TLS_CERTKEYFILE
Certificate key file to use.
--email-tls-server-name string, $CODER_EMAIL_TLS_SERVERNAME
Server name to verify against the target certificate.
--email-tls-skip-verify bool, $CODER_EMAIL_TLS_SKIPVERIFY
Skip verification of the target server's certificate (insecure).
--email-tls-starttls bool, $CODER_EMAIL_TLS_STARTTLS
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
INTROSPECTION / HEALTH CHECK OPTIONS:
--health-check-refresh duration, $CODER_HEALTH_CHECK_REFRESH (default: 10m0s)
Refresh interval for healthchecks.
@@ -350,54 +402,68 @@ Configure how notifications are processed and delivered.
NOTIFICATIONS / EMAIL OPTIONS:
Configure how email notifications are sent.
--notifications-email-force-tls bool, $CODER_NOTIFICATIONS_EMAIL_FORCE_TLS (default: false)
--notifications-email-force-tls bool, $CODER_NOTIFICATIONS_EMAIL_FORCE_TLS
Force a TLS connection to the configured SMTP smarthost.
DEPRECATED: Use --email-force-tls instead.
--notifications-email-from string, $CODER_NOTIFICATIONS_EMAIL_FROM
The sender's address to use.
DEPRECATED: Use --email-from instead.
--notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO (default: localhost)
--notifications-email-hello string, $CODER_NOTIFICATIONS_EMAIL_HELLO
The hostname identifying the SMTP server.
DEPRECATED: Use --email-hello instead.
--notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST (default: localhost:587)
--notifications-email-smarthost host:port, $CODER_NOTIFICATIONS_EMAIL_SMARTHOST
The intermediary SMTP host through which emails are sent.
DEPRECATED: Use --email-smarthost instead.
NOTIFICATIONS / EMAIL / EMAIL AUTHENTICATION OPTIONS:
Configure SMTP authentication options.
--notifications-email-auth-identity string, $CODER_NOTIFICATIONS_EMAIL_AUTH_IDENTITY
Identity to use with PLAIN authentication.
DEPRECATED: Use --email-auth-identity instead.
--notifications-email-auth-password string, $CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD
Password to use with PLAIN/LOGIN authentication.
DEPRECATED: Use --email-auth-password instead.
--notifications-email-auth-password-file string, $CODER_NOTIFICATIONS_EMAIL_AUTH_PASSWORD_FILE
File from which to load password for use with PLAIN/LOGIN
authentication.
DEPRECATED: Use --email-auth-password-file instead.
--notifications-email-auth-username string, $CODER_NOTIFICATIONS_EMAIL_AUTH_USERNAME
Username to use with PLAIN/LOGIN authentication.
DEPRECATED: Use --email-auth-username instead.
NOTIFICATIONS / EMAIL / EMAIL TLS OPTIONS:
Configure TLS for your SMTP server target.
--notifications-email-tls-ca-cert-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CACERTFILE
CA certificate file to use.
DEPRECATED: Use --email-tls-ca-cert-file instead.
--notifications-email-tls-cert-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CERTFILE
Certificate file to use.
DEPRECATED: Use --email-tls-cert-file instead.
--notifications-email-tls-cert-key-file string, $CODER_NOTIFICATIONS_EMAIL_TLS_CERTKEYFILE
Certificate key file to use.
DEPRECATED: Use --email-tls-cert-key-file instead.
--notifications-email-tls-server-name string, $CODER_NOTIFICATIONS_EMAIL_TLS_SERVERNAME
Server name to verify against the target certificate.
DEPRECATED: Use --email-tls-server-name instead.
--notifications-email-tls-skip-verify bool, $CODER_NOTIFICATIONS_EMAIL_TLS_SKIPVERIFY
Skip verification of the target server's certificate (insecure).
DEPRECATED: Use --email-tls-skip-verify instead.
--notifications-email-tls-starttls bool, $CODER_NOTIFICATIONS_EMAIL_TLS_STARTTLS
Enable STARTTLS to upgrade insecure SMTP connections using TLS.
DEPRECATED: Use --email-tls-starttls instead.
NOTIFICATIONS / WEBHOOK OPTIONS:
--notifications-webhook-endpoint url, $CODER_NOTIFICATIONS_WEBHOOK_ENDPOINT
+1
View File
@@ -172,6 +172,7 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
}
apiKeyMiddleware := httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
DB: options.Database,
ActivateDormantUser: coderd.ActivateDormantUser(options.Logger, &api.AGPL.Auditor, options.Database),
OAuth2Configs: oauthConfigs,
RedirectToLogin: false,
DisableSessionExpiryRefresh: options.DeploymentValues.Sessions.DisableExpiryRefresh.Value(),
+35 -33
View File
@@ -3,14 +3,17 @@ package dormancy
import (
"context"
"database/sql"
"net/http"
"time"
"golang.org/x/xerrors"
"cdr.dev/slog"
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/quartz"
)
const (
@@ -22,50 +25,49 @@ const (
// CheckInactiveUsers function updates status of inactive users from active to dormant
// using default parameters.
func CheckInactiveUsers(ctx context.Context, logger slog.Logger, db database.Store) func() {
return CheckInactiveUsersWithOptions(ctx, logger, db, jobInterval, accountDormancyPeriod)
func CheckInactiveUsers(ctx context.Context, logger slog.Logger, clk quartz.Clock, db database.Store, auditor audit.Auditor) func() {
return CheckInactiveUsersWithOptions(ctx, logger, clk, db, auditor, jobInterval, accountDormancyPeriod)
}
// CheckInactiveUsersWithOptions function updates status of inactive users from active to dormant
// using provided parameters.
func CheckInactiveUsersWithOptions(ctx context.Context, logger slog.Logger, db database.Store, checkInterval, dormancyPeriod time.Duration) func() {
func CheckInactiveUsersWithOptions(ctx context.Context, logger slog.Logger, clk quartz.Clock, db database.Store, auditor audit.Auditor, checkInterval, dormancyPeriod time.Duration) func() {
logger = logger.Named("dormancy")
ctx, cancelFunc := context.WithCancel(ctx)
done := make(chan struct{})
ticker := time.NewTicker(checkInterval)
go func() {
defer close(done)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
}
tf := clk.TickerFunc(ctx, checkInterval, func() error {
startTime := time.Now()
lastSeenAfter := dbtime.Now().Add(-dormancyPeriod)
logger.Debug(ctx, "check inactive user accounts", slog.F("dormancy_period", dormancyPeriod), slog.F("last_seen_after", lastSeenAfter))
startTime := time.Now()
lastSeenAfter := dbtime.Now().Add(-dormancyPeriod)
logger.Debug(ctx, "check inactive user accounts", slog.F("dormancy_period", dormancyPeriod), slog.F("last_seen_after", lastSeenAfter))
updatedUsers, err := db.UpdateInactiveUsersToDormant(ctx, database.UpdateInactiveUsersToDormantParams{
LastSeenAfter: lastSeenAfter,
UpdatedAt: dbtime.Now(),
})
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
logger.Error(ctx, "can't mark inactive users as dormant", slog.Error(err))
continue
}
for _, u := range updatedUsers {
logger.Info(ctx, "account has been marked as dormant", slog.F("email", u.Email), slog.F("last_seen_at", u.LastSeenAt))
}
logger.Debug(ctx, "checking user accounts is done", slog.F("num_dormant_accounts", len(updatedUsers)), slog.F("execution_time", time.Since(startTime)))
updatedUsers, err := db.UpdateInactiveUsersToDormant(ctx, database.UpdateInactiveUsersToDormantParams{
LastSeenAfter: lastSeenAfter,
UpdatedAt: dbtime.Now(),
})
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
logger.Error(ctx, "can't mark inactive users as dormant", slog.Error(err))
return nil
}
}()
for _, u := range updatedUsers {
logger.Info(ctx, "account has been marked as dormant", slog.F("email", u.Email), slog.F("last_seen_at", u.LastSeenAt))
audit.BackgroundAudit(ctx, &audit.BackgroundAuditParams[database.User]{
Audit: auditor,
Log: logger,
UserID: u.ID,
Action: database.AuditActionWrite,
Old: database.User{ID: u.ID, Username: u.Username, Status: database.UserStatusActive},
New: database.User{ID: u.ID, Username: u.Username, Status: database.UserStatusDormant},
Status: http.StatusOK,
AdditionalFields: audit.BackgroundTaskFieldsBytes(ctx, logger, audit.BackgroundSubsystemDormancy),
})
}
logger.Debug(ctx, "checking user accounts is done", slog.F("num_dormant_accounts", len(updatedUsers)), slog.F("execution_time", time.Since(startTime)))
return nil
})
return func() {
cancelFunc()
<-done
_ = tf.Wait()
}
}
@@ -10,10 +10,11 @@ import (
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/audit"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbmem"
"github.com/coder/coder/v2/enterprise/coderd/dormancy"
"github.com/coder/coder/v2/testutil"
"github.com/coder/quartz"
)
func TestCheckInactiveUsers(t *testing.T) {
@@ -42,29 +43,34 @@ func TestCheckInactiveUsers(t *testing.T) {
suspendedUser2 := setupUser(ctx, t, db, "suspended-user-2@coder.com", database.UserStatusSuspended, time.Now().Add(-dormancyPeriod).Add(-time.Hour))
suspendedUser3 := setupUser(ctx, t, db, "suspended-user-3@coder.com", database.UserStatusSuspended, time.Now().Add(-dormancyPeriod).Add(-6*time.Hour))
mAudit := audit.NewMock()
mClock := quartz.NewMock(t)
// Run the periodic job
closeFunc := dormancy.CheckInactiveUsersWithOptions(ctx, logger, db, interval, dormancyPeriod)
closeFunc := dormancy.CheckInactiveUsersWithOptions(ctx, logger, mClock, db, mAudit, interval, dormancyPeriod)
t.Cleanup(closeFunc)
var rows []database.GetUsersRow
var err error
require.Eventually(t, func() bool {
rows, err = db.GetUsers(ctx, database.GetUsersParams{})
if err != nil {
return false
}
dur, w := mClock.AdvanceNext()
require.Equal(t, interval, dur)
w.MustWait(ctx)
var dormant, suspended int
for _, row := range rows {
if row.Status == database.UserStatusDormant {
dormant++
} else if row.Status == database.UserStatusSuspended {
suspended++
}
rows, err := db.GetUsers(ctx, database.GetUsersParams{})
require.NoError(t, err)
var dormant, suspended int
for _, row := range rows {
if row.Status == database.UserStatusDormant {
dormant++
} else if row.Status == database.UserStatusSuspended {
suspended++
}
// 6 users in total, 3 dormant, 3 suspended
return len(rows) == 9 && dormant == 3 && suspended == 3
}, testutil.WaitShort, testutil.IntervalMedium)
}
// 9 users in total, 3 active, 3 dormant, 3 suspended
require.Len(t, rows, 9)
require.Equal(t, 3, dormant)
require.Equal(t, 3, suspended)
require.Len(t, mAudit.AuditLogs(), 3)
allUsers := ignoreUpdatedAt(database.ConvertUserRows(rows))
+560
View File
@@ -2,11 +2,13 @@ package coderd_test
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/http"
"sync"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
@@ -14,6 +16,11 @@ import (
"github.com/coder/coder/v2/coderd/coderdtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbauthz"
"github.com/coder/coder/v2/coderd/database/dbfake"
"github.com/coder/coder/v2/coderd/database/dbgen"
"github.com/coder/coder/v2/coderd/database/dbtestutil"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/util/ptr"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/enterprise/coderd/coderdenttest"
@@ -295,6 +302,497 @@ func TestWorkspaceQuota(t *testing.T) {
})
}
// nolint:paralleltest,tparallel // Tests must run serially
func TestWorkspaceSerialization(t *testing.T) {
t.Parallel()
if !dbtestutil.WillUsePostgres() {
t.Skip("Serialization errors only occur in postgres")
}
db, _ := dbtestutil.NewDB(t)
user := dbgen.User(t, db, database.User{})
otherUser := dbgen.User(t, db, database.User{})
org := dbfake.Organization(t, db).
EveryoneAllowance(20).
Members(user, otherUser).
Group(database.Group{
QuotaAllowance: 10,
}, user, otherUser).
Group(database.Group{
QuotaAllowance: 10,
}, user).
Do()
otherOrg := dbfake.Organization(t, db).
EveryoneAllowance(20).
Members(user, otherUser).
Group(database.Group{
QuotaAllowance: 10,
}, user, otherUser).
Group(database.Group{
QuotaAllowance: 10,
}, user).
Do()
// TX mixing tests. **DO NOT** run these in parallel.
// The goal here is to mess around with different ordering of
// transactions and queries.
// UpdateBuildDeadline bumps a workspace deadline while doing a quota
// commit to the same workspace build.
//
// Note: This passes if the interrupt is run before 'GetQuota()'
// Passing orders:
// - BeginTX -> Bump! -> GetQuota -> GetAllowance -> UpdateCost -> EndTx
// - BeginTX -> GetQuota -> GetAllowance -> UpdateCost -> Bump! -> EndTx
t.Run("UpdateBuildDeadline", func(t *testing.T) {
t.Log("Expected to fail. As long as quota & deadline are on the same " +
" table and affect the same row, this will likely always fail.")
// +------------------------------+------------------+
// | Begin Tx | |
// +------------------------------+------------------+
// | GetQuota(user) | |
// +------------------------------+------------------+
// | | BumpDeadline(w1) |
// +------------------------------+------------------+
// | GetAllowance(user) | |
// +------------------------------+------------------+
// | UpdateWorkspaceBuildCost(w1) | |
// +------------------------------+------------------+
// | CommitTx() | |
// +------------------------------+------------------+
// pq: could not serialize access due to concurrent update
ctx := testutil.Context(t, testutil.WaitLong)
//nolint:gocritic // testing
ctx = dbauthz.AsSystemRestricted(ctx)
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).Do()
bumpDeadline := func() {
err := db.InTx(func(db database.Store) error {
err := db.UpdateWorkspaceBuildDeadlineByID(ctx, database.UpdateWorkspaceBuildDeadlineByIDParams{
Deadline: dbtime.Now(),
MaxDeadline: dbtime.Now(),
UpdatedAt: dbtime.Now(),
ID: myWorkspace.Build.ID,
})
return err
}, &database.TxOptions{
Isolation: sql.LevelSerializable,
})
assert.NoError(t, err)
}
// Start TX
// Run order
quota := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
quota.GetQuota(ctx, t) // Step 1
bumpDeadline() // Interrupt
quota.GetAllowance(ctx, t) // Step 2
err := quota.DBTx.UpdateWorkspaceBuildCostByID(ctx, database.UpdateWorkspaceBuildCostByIDParams{
ID: myWorkspace.Build.ID,
DailyCost: 10,
}) // Step 3
require.ErrorContains(t, err, "could not serialize access due to concurrent update")
// End commit
require.ErrorContains(t, quota.Done(), "failed transaction")
})
// UpdateOtherBuildDeadline bumps a user's other workspace deadline
// while doing a quota commit.
t.Run("UpdateOtherBuildDeadline", func(t *testing.T) {
// +------------------------------+------------------+
// | Begin Tx | |
// +------------------------------+------------------+
// | GetQuota(user) | |
// +------------------------------+------------------+
// | | BumpDeadline(w2) |
// +------------------------------+------------------+
// | GetAllowance(user) | |
// +------------------------------+------------------+
// | UpdateWorkspaceBuildCost(w1) | |
// +------------------------------+------------------+
// | CommitTx() | |
// +------------------------------+------------------+
// Works!
ctx := testutil.Context(t, testutil.WaitLong)
//nolint:gocritic // testing
ctx = dbauthz.AsSystemRestricted(ctx)
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).Do()
// Use the same template
otherWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).
Seed(database.WorkspaceBuild{
TemplateVersionID: myWorkspace.TemplateVersion.ID,
}).
Do()
bumpDeadline := func() {
err := db.InTx(func(db database.Store) error {
err := db.UpdateWorkspaceBuildDeadlineByID(ctx, database.UpdateWorkspaceBuildDeadlineByIDParams{
Deadline: dbtime.Now(),
MaxDeadline: dbtime.Now(),
UpdatedAt: dbtime.Now(),
ID: otherWorkspace.Build.ID,
})
return err
}, &database.TxOptions{
Isolation: sql.LevelSerializable,
})
assert.NoError(t, err)
}
// Start TX
// Run order
quota := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
quota.GetQuota(ctx, t) // Step 1
bumpDeadline() // Interrupt
quota.GetAllowance(ctx, t) // Step 2
quota.UpdateWorkspaceBuildCostByID(ctx, t, 10) // Step 3
// End commit
require.NoError(t, quota.Done())
})
t.Run("ActivityBump", func(t *testing.T) {
t.Log("Expected to fail. As long as quota & deadline are on the same " +
" table and affect the same row, this will likely always fail.")
// +---------------------+----------------------------------+
// | W1 Quota Tx | |
// +---------------------+----------------------------------+
// | Begin Tx | |
// +---------------------+----------------------------------+
// | GetQuota(w1) | |
// +---------------------+----------------------------------+
// | GetAllowance(w1) | |
// +---------------------+----------------------------------+
// | | ActivityBump(w1) |
// +---------------------+----------------------------------+
// | UpdateBuildCost(w1) | |
// +---------------------+----------------------------------+
// | CommitTx() | |
// +---------------------+----------------------------------+
// pq: could not serialize access due to concurrent update
ctx := testutil.Context(t, testutil.WaitShort)
//nolint:gocritic // testing
ctx = dbauthz.AsSystemRestricted(ctx)
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).
Seed(database.WorkspaceBuild{
// Make sure the bump does something
Deadline: dbtime.Now().Add(time.Hour * -20),
}).
Do()
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
// Run order
one.GetQuota(ctx, t)
one.GetAllowance(ctx, t)
err := db.ActivityBumpWorkspace(ctx, database.ActivityBumpWorkspaceParams{
NextAutostart: time.Now(),
WorkspaceID: myWorkspace.Workspace.ID,
})
assert.NoError(t, err)
err = one.DBTx.UpdateWorkspaceBuildCostByID(ctx, database.UpdateWorkspaceBuildCostByIDParams{
ID: myWorkspace.Build.ID,
DailyCost: 10,
})
require.ErrorContains(t, err, "could not serialize access due to concurrent update")
// End commit
assert.ErrorContains(t, one.Done(), "failed transaction")
})
t.Run("BumpLastUsedAt", func(t *testing.T) {
// +---------------------+----------------------------------+
// | W1 Quota Tx | |
// +---------------------+----------------------------------+
// | Begin Tx | |
// +---------------------+----------------------------------+
// | GetQuota(w1) | |
// +---------------------+----------------------------------+
// | GetAllowance(w1) | |
// +---------------------+----------------------------------+
// | | UpdateWorkspaceLastUsedAt(w1) |
// +---------------------+----------------------------------+
// | UpdateBuildCost(w1) | |
// +---------------------+----------------------------------+
// | CommitTx() | |
// +---------------------+----------------------------------+
ctx := testutil.Context(t, testutil.WaitShort)
//nolint:gocritic // testing
ctx = dbauthz.AsSystemRestricted(ctx)
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).Do()
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
// Run order
one.GetQuota(ctx, t)
one.GetAllowance(ctx, t)
err := db.UpdateWorkspaceLastUsedAt(ctx, database.UpdateWorkspaceLastUsedAtParams{
ID: myWorkspace.Workspace.ID,
LastUsedAt: dbtime.Now(),
})
assert.NoError(t, err)
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
// End commit
assert.NoError(t, one.Done())
})
t.Run("UserMod", func(t *testing.T) {
// +---------------------+----------------------------------+
// | W1 Quota Tx | |
// +---------------------+----------------------------------+
// | Begin Tx | |
// +---------------------+----------------------------------+
// | GetQuota(w1) | |
// +---------------------+----------------------------------+
// | GetAllowance(w1) | |
// +---------------------+----------------------------------+
// | | RemoveUserFromOrg |
// +---------------------+----------------------------------+
// | UpdateBuildCost(w1) | |
// +---------------------+----------------------------------+
// | CommitTx() | |
// +---------------------+----------------------------------+
// Works!
ctx := testutil.Context(t, testutil.WaitShort)
//nolint:gocritic // testing
ctx = dbauthz.AsSystemRestricted(ctx)
var err error
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).Do()
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
// Run order
one.GetQuota(ctx, t)
one.GetAllowance(ctx, t)
err = db.DeleteOrganizationMember(ctx, database.DeleteOrganizationMemberParams{
OrganizationID: myWorkspace.Workspace.OrganizationID,
UserID: user.ID,
})
assert.NoError(t, err)
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
// End commit
assert.NoError(t, one.Done())
})
// QuotaCommit 2 workspaces in different orgs.
// Workspaces do not share templates, owners, or orgs
t.Run("DoubleQuotaUnrelatedWorkspaces", func(t *testing.T) {
// +---------------------+---------------------+
// | W1 Quota Tx | W2 Quota Tx |
// +---------------------+---------------------+
// | Begin Tx | |
// +---------------------+---------------------+
// | | Begin Tx |
// +---------------------+---------------------+
// | GetQuota(w1) | |
// +---------------------+---------------------+
// | GetAllowance(w1) | |
// +---------------------+---------------------+
// | UpdateBuildCost(w1) | |
// +---------------------+---------------------+
// | | UpdateBuildCost(w2) |
// +---------------------+---------------------+
// | | GetQuota(w2) |
// +---------------------+---------------------+
// | | GetAllowance(w2) |
// +---------------------+---------------------+
// | CommitTx() | |
// +---------------------+---------------------+
// | | CommitTx() |
// +---------------------+---------------------+
ctx := testutil.Context(t, testutil.WaitLong)
//nolint:gocritic // testing
ctx = dbauthz.AsSystemRestricted(ctx)
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).Do()
myOtherWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: otherOrg.Org.ID, // Different org!
OwnerID: otherUser.ID,
}).Do()
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
two := newCommitter(t, db, myOtherWorkspace.Workspace, myOtherWorkspace.Build)
// Run order
one.GetQuota(ctx, t)
one.GetAllowance(ctx, t)
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
two.GetQuota(ctx, t)
two.GetAllowance(ctx, t)
two.UpdateWorkspaceBuildCostByID(ctx, t, 10)
// End commit
assert.NoError(t, one.Done())
assert.NoError(t, two.Done())
})
// QuotaCommit 2 workspaces in different orgs.
// Workspaces do not share templates or orgs
t.Run("DoubleQuotaUserWorkspacesDiffOrgs", func(t *testing.T) {
// +---------------------+---------------------+
// | W1 Quota Tx | W2 Quota Tx |
// +---------------------+---------------------+
// | Begin Tx | |
// +---------------------+---------------------+
// | | Begin Tx |
// +---------------------+---------------------+
// | GetQuota(w1) | |
// +---------------------+---------------------+
// | GetAllowance(w1) | |
// +---------------------+---------------------+
// | UpdateBuildCost(w1) | |
// +---------------------+---------------------+
// | | UpdateBuildCost(w2) |
// +---------------------+---------------------+
// | | GetQuota(w2) |
// +---------------------+---------------------+
// | | GetAllowance(w2) |
// +---------------------+---------------------+
// | CommitTx() | |
// +---------------------+---------------------+
// | | CommitTx() |
// +---------------------+---------------------+
ctx := testutil.Context(t, testutil.WaitLong)
//nolint:gocritic // testing
ctx = dbauthz.AsSystemRestricted(ctx)
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).Do()
myOtherWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: otherOrg.Org.ID, // Different org!
OwnerID: user.ID,
}).Do()
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
two := newCommitter(t, db, myOtherWorkspace.Workspace, myOtherWorkspace.Build)
// Run order
one.GetQuota(ctx, t)
one.GetAllowance(ctx, t)
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
two.GetQuota(ctx, t)
two.GetAllowance(ctx, t)
two.UpdateWorkspaceBuildCostByID(ctx, t, 10)
// End commit
assert.NoError(t, one.Done())
assert.NoError(t, two.Done())
})
// QuotaCommit 2 workspaces in the same org.
// Workspaces do not share templates
t.Run("DoubleQuotaUserWorkspaces", func(t *testing.T) {
t.Log("Setting a new build cost to a workspace in a org affects other " +
"workspaces in the same org. This is expected to fail.")
// +---------------------+---------------------+
// | W1 Quota Tx | W2 Quota Tx |
// +---------------------+---------------------+
// | Begin Tx | |
// +---------------------+---------------------+
// | | Begin Tx |
// +---------------------+---------------------+
// | GetQuota(w1) | |
// +---------------------+---------------------+
// | GetAllowance(w1) | |
// +---------------------+---------------------+
// | UpdateBuildCost(w1) | |
// +---------------------+---------------------+
// | | UpdateBuildCost(w2) |
// +---------------------+---------------------+
// | | GetQuota(w2) |
// +---------------------+---------------------+
// | | GetAllowance(w2) |
// +---------------------+---------------------+
// | CommitTx() | |
// +---------------------+---------------------+
// | | CommitTx() |
// +---------------------+---------------------+
// pq: could not serialize access due to read/write dependencies among transactions
ctx := testutil.Context(t, testutil.WaitLong)
//nolint:gocritic // testing
ctx = dbauthz.AsSystemRestricted(ctx)
myWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).Do()
myOtherWorkspace := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
OrganizationID: org.Org.ID,
OwnerID: user.ID,
}).Do()
one := newCommitter(t, db, myWorkspace.Workspace, myWorkspace.Build)
two := newCommitter(t, db, myOtherWorkspace.Workspace, myOtherWorkspace.Build)
// Run order
one.GetQuota(ctx, t)
one.GetAllowance(ctx, t)
one.UpdateWorkspaceBuildCostByID(ctx, t, 10)
two.GetQuota(ctx, t)
two.GetAllowance(ctx, t)
two.UpdateWorkspaceBuildCostByID(ctx, t, 10)
// End commit
assert.NoError(t, one.Done())
assert.ErrorContains(t, two.Done(), "could not serialize access due to read/write dependencies among transactions")
})
}
func deprecatedQuotaEndpoint(ctx context.Context, client *codersdk.Client, userID string) (codersdk.WorkspaceQuota, error) {
res, err := client.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/workspace-quota/%s", userID), nil)
if err != nil {
@@ -335,3 +833,65 @@ func applyWithCost(cost int32) []*proto.Response {
},
}}
}
// committer does what the CommitQuota does, but allows
// stepping through the actions in the tx and controlling the
// timing.
// This is a nice wrapper to make the tests more concise.
type committer struct {
DBTx *dbtestutil.DBTx
w database.WorkspaceTable
b database.WorkspaceBuild
}
func newCommitter(t *testing.T, db database.Store, workspace database.WorkspaceTable, build database.WorkspaceBuild) *committer {
quotaTX := dbtestutil.StartTx(t, db, &database.TxOptions{
Isolation: sql.LevelSerializable,
ReadOnly: false,
})
return &committer{DBTx: quotaTX, w: workspace, b: build}
}
// GetQuota touches:
// - workspace_builds
// - workspaces
func (c *committer) GetQuota(ctx context.Context, t *testing.T) int64 {
t.Helper()
consumed, err := c.DBTx.GetQuotaConsumedForUser(ctx, database.GetQuotaConsumedForUserParams{
OwnerID: c.w.OwnerID,
OrganizationID: c.w.OrganizationID,
})
require.NoError(t, err)
return consumed
}
// GetAllowance touches:
// - group_members_expanded
// - users
// - groups
// - org_members
func (c *committer) GetAllowance(ctx context.Context, t *testing.T) int64 {
t.Helper()
allowance, err := c.DBTx.GetQuotaAllowanceForUser(ctx, database.GetQuotaAllowanceForUserParams{
UserID: c.w.OwnerID,
OrganizationID: c.w.OrganizationID,
})
require.NoError(t, err)
return allowance
}
func (c *committer) UpdateWorkspaceBuildCostByID(ctx context.Context, t *testing.T, cost int32) bool {
t.Helper()
err := c.DBTx.UpdateWorkspaceBuildCostByID(ctx, database.UpdateWorkspaceBuildCostByIDParams{
ID: c.b.ID,
DailyCost: cost,
})
return assert.NoError(t, err)
}
func (c *committer) Done() error {
return c.DBTx.Done()
}
+12 -11
View File
@@ -34,22 +34,23 @@ env:
value: "0.0.0.0:2112"
{{- if and (empty .Values.provisionerDaemon.pskSecretName) (empty .Values.provisionerDaemon.keySecretName) }}
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified." }}
{{- else if and (.Values.provisionerDaemon.pskSecretName) (.Values.provisionerDaemon.keySecretName) }}
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both." }}
{{- end }}
{{- if .Values.provisionerDaemon.pskSecretName }}
- name: CODER_PROVISIONER_DAEMON_PSK
valueFrom:
secretKeyRef:
name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
key: psk
{{- end }}
{{- if and .Values.provisionerDaemon.keySecretName .Values.provisionerDaemon.keySecretKey }}
{{- else if and .Values.provisionerDaemon.keySecretName .Values.provisionerDaemon.keySecretKey }}
{{- if and (not (empty .Values.provisionerDaemon.pskSecretName)) (ne .Values.provisionerDaemon.pskSecretName "coder-provisioner-psk") }}
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both." }}
{{- else if .Values.provisionerDaemon.tags }}
{{ fail "provisionerDaemon.tags may not be specified with provisionerDaemon.keySecretName." }}
{{- end }}
- name: CODER_PROVISIONER_DAEMON_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.provisionerDaemon.keySecretName | quote }}
key: {{ .Values.provisionerDaemon.keySecretKey | quote }}
{{- else }}
- name: CODER_PROVISIONER_DAEMON_PSK
valueFrom:
secretKeyRef:
name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
key: psk
{{- end }}
{{- if include "provisioner.tags" . }}
- name: CODER_PROVISIONERD_TAGS
+10
View File
@@ -56,6 +56,12 @@ var testCases = []testCase{
name: "provisionerd_key",
expectedError: "",
},
// Test explicitly for the workaround where setting provisionerDaemon.pskSecretName=""
// was required to use provisioner keys.
{
name: "provisionerd_key_psk_empty_workaround",
expectedError: "",
},
{
name: "provisionerd_psk_and_key",
expectedError: `Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both.`,
@@ -64,6 +70,10 @@ var testCases = []testCase{
name: "provisionerd_no_psk_or_key",
expectedError: `Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified.`,
},
{
name: "provisionerd_key_tags",
expectedError: `provisionerDaemon.tags may not be specified with provisionerDaemon.keySecretName.`,
},
{
name: "extra_templates",
expectedError: "",
@@ -112,8 +112,6 @@ spec:
secretKeyRef:
key: provisionerd-key
name: coder-provisionerd-key
- name: CODER_PROVISIONERD_TAGS
value: clusterType=k8s,location=auh
- name: CODER_URL
value: http://coder.default.svc.cluster.local
image: ghcr.io/coder/coder:latest
-4
View File
@@ -2,9 +2,5 @@ coder:
image:
tag: latest
provisionerDaemon:
pskSecretName: ""
keySecretName: "coder-provisionerd-key"
keySecretKey: "provisionerd-key"
tags:
location: auh
clusterType: k8s
@@ -0,0 +1,135 @@
---
# Source: coder-provisioner/templates/coder.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: {}
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: coder-provisioner
app.kubernetes.io/part-of: coder-provisioner
app.kubernetes.io/version: 0.1.0
helm.sh/chart: coder-provisioner-0.1.0
name: coder-provisioner
---
# Source: coder-provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: coder-provisioner-workspace-perms
rules:
- apiGroups: [""]
resources: ["pods"]
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
---
# Source: coder-provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "coder-provisioner"
subjects:
- kind: ServiceAccount
name: "coder-provisioner"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: coder-provisioner-workspace-perms
---
# Source: coder-provisioner/templates/coder.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: {}
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: coder-provisioner
app.kubernetes.io/part-of: coder-provisioner
app.kubernetes.io/version: 0.1.0
helm.sh/chart: coder-provisioner-0.1.0
name: coder-provisioner
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: coder-provisioner
template:
metadata:
annotations: {}
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: coder-provisioner
app.kubernetes.io/part-of: coder-provisioner
app.kubernetes.io/version: 0.1.0
helm.sh/chart: coder-provisioner-0.1.0
spec:
containers:
- args:
- provisionerd
- start
command:
- /opt/coder
env:
- name: CODER_PROMETHEUS_ADDRESS
value: 0.0.0.0:2112
- name: CODER_PROVISIONER_DAEMON_KEY
valueFrom:
secretKeyRef:
key: provisionerd-key
name: coder-provisionerd-key
- name: CODER_URL
value: http://coder.default.svc.cluster.local
image: ghcr.io/coder/coder:latest
imagePullPolicy: IfNotPresent
lifecycle: {}
name: coder
ports: null
resources: {}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: null
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts: []
restartPolicy: Always
serviceAccountName: coder-provisioner
terminationGracePeriodSeconds: 600
volumes: []
@@ -0,0 +1,7 @@
coder:
image:
tag: latest
provisionerDaemon:
pskSecretName: ""
keySecretName: "coder-provisionerd-key"
keySecretKey: "provisionerd-key"
@@ -0,0 +1,9 @@
coder:
image:
tag: latest
provisionerDaemon:
keySecretName: "coder-provisionerd-key"
keySecretKey: "provisionerd-key"
tags:
location: auh
clusterType: k8s
@@ -4,6 +4,3 @@ coder:
provisionerDaemon:
pskSecretName: ""
keySecretName: ""
tags:
location: auh
clusterType: k8s
+1 -1
View File
@@ -111,7 +111,7 @@ spec:
valueFrom:
secretKeyRef:
key: psk
name: coder-provisionerd-psk
name: not-the-default-coder-provisioner-psk
- name: CODER_PROVISIONERD_TAGS
value: clusterType=k8s,location=auh
- name: CODER_URL
+1 -1
View File
@@ -2,7 +2,7 @@ coder:
image:
tag: latest
provisionerDaemon:
pskSecretName: "coder-provisionerd-psk"
pskSecretName: "not-the-default-coder-provisioner-psk"
tags:
location: auh
clusterType: k8s
@@ -2,7 +2,7 @@ coder:
image:
tag: latest
provisionerDaemon:
pskSecretName: "coder-provisionerd-psk"
pskSecretName: "not-the-default-coder-provisioner-psk"
keySecretName: "coder-provisionerd-key"
keySecretKey: "provisionerd-key"
tags:
+10 -1
View File
@@ -204,14 +204,23 @@ provisionerDaemon:
# provisionerDaemon.keySecretName -- The name of the Kubernetes
# secret that contains a provisioner key to use to authenticate with Coder.
# See: https://coder.com/docs/admin/provisioners#authentication
# NOTE: it is not permitted to specify both provisionerDaemon.keySecretName
# and provisionerDaemon.pskSecretName. An exception is made for the purposes
# of backwards-compatibility: if provisionerDaemon.pskSecretName is unchanged
# from the default value and provisionerDaemon.keySecretName is set, then
# provisionerDaemon.keySecretName and provisionerDaemon.keySecretKey will take
# precedence over provisionerDaemon.pskSecretName.
keySecretName: ""
# provisionerDaemon.keySecretKey -- The key of the Kubernetes
# secret specified in provisionerDaemon.keySecretName that contains
# the provisioner key. Defaults to "key".
keySecretKey: "key"
# provisionerDaemon.tags -- Tags to filter provisioner jobs by.
# provisionerDaemon.tags -- If using a PSK, specify the set of provisioner
# job tags for which this provisioner daemon is responsible.
# See: https://coder.com/docs/admin/provisioners#provisioner-tags
# NOTE: it is not permitted to specify both provisionerDaemon.tags and
# provsionerDaemon.keySecretName.
tags:
{}
# location: usa
+1
View File
@@ -328,6 +328,7 @@ export interface CreateUserRequestWithOrgs {
readonly name: string;
readonly password: string;
readonly login_type: LoginType;
readonly user_status?: UserStatus;
readonly organization_ids: Readonly<Array<string>>;
}
@@ -22,9 +22,3 @@ export default meta;
type Story = StoryObj<typeof ActiveUserChart>;
export const Example: Story = {};
export const UserLimit: Story = {
args: {
userLimit: 10,
},
};
@@ -14,7 +14,6 @@ import {
Tooltip,
defaults,
} from "chart.js";
import annotationPlugin from "chartjs-plugin-annotation";
import {
HelpTooltip,
HelpTooltipContent,
@@ -36,21 +35,16 @@ ChartJS.register(
Title,
Tooltip,
Legend,
annotationPlugin,
);
const USER_LIMIT_DISPLAY_THRESHOLD = 60;
export interface ActiveUserChartProps {
data: readonly { date: string; amount: number }[];
interval: "day" | "week";
userLimit: number | undefined;
}
export const ActiveUserChart: FC<ActiveUserChartProps> = ({
data,
interval,
userLimit,
}) => {
const theme = useTheme();
@@ -64,24 +58,6 @@ export const ActiveUserChart: FC<ActiveUserChartProps> = ({
responsive: true,
animation: false,
plugins: {
annotation: {
annotations: [
{
type: "line",
scaleID: "y",
display: shouldDisplayUserLimit(userLimit, chartData),
value: userLimit,
borderColor: theme.palette.secondary.contrastText,
borderWidth: 5,
label: {
content: "User limit",
color: theme.palette.primary.contrastText,
display: true,
font: { weight: "normal" },
},
},
],
},
legend: {
display: false,
},
@@ -103,7 +79,6 @@ export const ActiveUserChart: FC<ActiveUserChartProps> = ({
precision: 0,
},
},
x: {
grid: { color: theme.palette.divider },
ticks: {
@@ -138,32 +113,26 @@ export const ActiveUserChart: FC<ActiveUserChartProps> = ({
);
};
export const ActiveUsersTitle: FC = () => {
type ActiveUsersTitleProps = {
interval: "day" | "week";
};
export const ActiveUsersTitle: FC<ActiveUsersTitleProps> = ({ interval }) => {
return (
<div css={{ display: "flex", alignItems: "center", gap: 8 }}>
Active Users
{interval === "day" ? "Daily" : "Weekly"} Active Users
<HelpTooltip>
<HelpTooltipTrigger size="small" />
<HelpTooltipContent>
<HelpTooltipTitle>How do we calculate active users?</HelpTooltipTitle>
<HelpTooltipText>
When a connection is initiated to a user&apos;s workspace they are
considered an active user. e.g. apps, web terminal, SSH
considered an active user. e.g. apps, web terminal, SSH. This is for
measuring user activity and has no connection to license
consumption.
</HelpTooltipText>
</HelpTooltipContent>
</HelpTooltip>
</div>
);
};
function shouldDisplayUserLimit(
userLimit: number | undefined,
activeUsers: number[],
): boolean {
if (!userLimit || activeUsers.length === 0) {
return false;
}
return (
Math.max(...activeUsers) >= (userLimit * USER_LIMIT_DISPLAY_THRESHOLD) / 100
);
}
@@ -23,7 +23,7 @@ export const AuditLogDescription: FC<AuditLogDescriptionProps> = ({
target = "";
}
// This occurs when SCIM creates a user.
// This occurs when SCIM creates a user, or dormancy changes a users status.
if (
auditLog.resource_type === "user" &&
auditLog.additional_fields?.automatic_actor === "coder"
@@ -50,13 +50,6 @@ type Story = StoryObj<typeof GeneralSettingsPageView>;
export const Page: Story = {};
export const WithUserLimit: Story = {
args: {
deploymentDAUs: MockDeploymentDAUResponse,
entitlements: MockEntitlementsWithUserLimit,
},
};
export const NoDAUs: Story = {
args: {
deploymentDAUs: undefined,
@@ -49,16 +49,8 @@ export const GeneralSettingsPageView: FC<GeneralSettingsPageViewProps> = ({
)}
{deploymentDAUs && (
<div css={{ marginBottom: 24, height: 200 }}>
<ChartSection title={<ActiveUsersTitle />}>
<ActiveUserChart
data={deploymentDAUs.entries}
interval="day"
userLimit={
entitlements?.features.user_limit.enabled
? entitlements?.features.user_limit.limit
: undefined
}
/>
<ChartSection title={<ActiveUsersTitle interval="day" />}>
<ActiveUserChart data={deploymentDAUs.entries} interval="day" />
</ChartSection>
</div>
)}
@@ -868,11 +868,3 @@ export const Loaded: Story = {
},
},
};
export const LoadedWithUserLimit: Story = {
...Loaded,
args: {
...Loaded.args,
entitlements: MockEntitlementsWithUserLimit,
},
};
@@ -249,7 +249,7 @@ const ActiveUsersPanel: FC<ActiveUsersPanelProps> = ({
<Panel {...panelProps}>
<PanelHeader>
<PanelTitle>
<ActiveUsersTitle />
<ActiveUsersTitle interval={interval} />
</PanelTitle>
</PanelHeader>
<PanelContent>
@@ -258,7 +258,6 @@ const ActiveUsersPanel: FC<ActiveUsersPanelProps> = ({
{data && data.length > 0 && (
<ActiveUserChart
interval={interval}
userLimit={userLimit}
data={data.map((d) => ({
amount: d.active_users,
date: d.start_time,
+1
View File
@@ -35,6 +35,7 @@
"dotfiles.svg",
"dotnet.svg",
"fedora.svg",
"filebrowser.svg",
"fleet.svg",
"fly.io.svg",
"folder.svg",
+147
View File
@@ -0,0 +1,147 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xml:space="preserve"
width="560"
height="560"
version="1.1"
style="clip-rule:evenodd;fill-rule:evenodd;image-rendering:optimizeQuality;shape-rendering:geometricPrecision;text-rendering:geometricPrecision"
viewBox="0 0 560 560"
id="svg44"
sodipodi:docname="icon_raw.svg"
inkscape:version="0.92.3 (2405546, 2018-03-11)"
inkscape:export-filename="/home/umarcor/filebrowser/logo/icon_raw.svg.png"
inkscape:export-xdpi="96"
inkscape:export-ydpi="96"><metadata
id="metadata48"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
objecttolerance="10"
gridtolerance="10"
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="1366"
inkscape:window-height="711"
id="namedview46"
showgrid="false"
inkscape:zoom="0.33714286"
inkscape:cx="-172.33051"
inkscape:cy="280"
inkscape:window-x="0"
inkscape:window-y="20"
inkscape:window-maximized="1"
inkscape:current-layer="svg44" />
<defs
id="defs4">
<style
type="text/css"
id="style2">
<![CDATA[
.fil1 {fill:#FEFEFE}
.fil6 {fill:#006498}
.fil7 {fill:#0EA5EB}
.fil8 {fill:#2979FF}
.fil3 {fill:#2BBCFF}
.fil0 {fill:#455A64}
.fil4 {fill:#53C6FC}
.fil5 {fill:#BDEAFF}
.fil2 {fill:#332C2B;fill-opacity:0.149020}
]]>
</style>
</defs>
<g
id="g85"
transform="translate(-70,-70)"><path
class="fil1"
d="M 350,71 C 504,71 629,196 629,350 629,504 504,629 350,629 196,629 71,504 71,350 71,196 196,71 350,71 Z"
id="path9"
inkscape:connector-curvature="0"
style="fill:#fefefe" /><path
class="fil2"
d="M 475,236 593,387 C 596,503 444,639 301,585 L 225,486 339,330 c 0,0 138,-95 136,-94 z"
id="path11"
inkscape:connector-curvature="0"
style="fill:#332c2b;fill-opacity:0.14902003" /><path
class="fil3"
d="m 231,211 h 208 l 38,24 v 246 c 0,5 -3,8 -8,8 H 231 c -5,0 -8,-3 -8,-8 V 219 c 0,-5 3,-8 8,-8 z"
id="path13"
inkscape:connector-curvature="0"
style="fill:#2bbcff" /><path
class="fil4"
d="m 231,211 h 208 l 38,24 v 2 L 440,214 H 231 c -4,0 -7,3 -7,7 v 263 c -1,-1 -1,-2 -1,-3 V 219 c 0,-5 3,-8 8,-8 z"
id="path15"
inkscape:connector-curvature="0"
style="fill:#53c6fc" /><polygon
class="fil5"
points="305,212 418,212 418,310 305,310 "
id="polygon17"
style="fill:#bdeaff" /><path
class="fil5"
d="m 255,363 h 189 c 3,0 5,2 5,4 V 483 H 250 V 367 c 0,-2 2,-4 5,-4 z"
id="path19"
inkscape:connector-curvature="0"
style="fill:#bdeaff" /><polygon
class="fil6"
points="250,470 449,470 449,483 250,483 "
id="polygon21"
style="fill:#006498" /><path
class="fil6"
d="m 380,226 h 10 c 3,0 6,2 6,5 v 40 c 0,3 -3,6 -6,6 h -10 c -3,0 -6,-3 -6,-6 v -40 c 0,-3 3,-5 6,-5 z"
id="path23"
inkscape:connector-curvature="0"
style="fill:#006498" /><path
class="fil1"
d="m 254,226 c 10,0 17,7 17,17 0,9 -7,16 -17,16 -9,0 -17,-7 -17,-16 0,-10 8,-17 17,-17 z"
id="path25"
inkscape:connector-curvature="0"
style="fill:#fefefe" /><path
class="fil6"
d="m 267,448 h 165 c 2,0 3,1 3,3 v 0 c 0,1 -1,3 -3,3 H 267 c -2,0 -3,-2 -3,-3 v 0 c 0,-2 1,-3 3,-3 z"
id="path27"
inkscape:connector-curvature="0"
style="fill:#006498" /><path
class="fil6"
d="m 267,415 h 165 c 2,0 3,1 3,3 v 0 c 0,1 -1,2 -3,2 H 267 c -2,0 -3,-1 -3,-2 v 0 c 0,-2 1,-3 3,-3 z"
id="path29"
inkscape:connector-curvature="0"
style="fill:#006498" /><path
class="fil6"
d="m 267,381 h 165 c 2,0 3,2 3,3 v 0 c 0,2 -1,3 -3,3 H 267 c -2,0 -3,-1 -3,-3 v 0 c 0,-1 1,-3 3,-3 z"
id="path31"
inkscape:connector-curvature="0"
style="fill:#006498" /><path
class="fil1"
d="m 236,472 c 3,0 5,2 5,5 0,2 -2,4 -5,4 -3,0 -5,-2 -5,-4 0,-3 2,-5 5,-5 z"
id="path33"
inkscape:connector-curvature="0"
style="fill:#fefefe" /><path
class="fil1"
d="m 463,472 c 3,0 5,2 5,5 0,2 -2,4 -5,4 -3,0 -5,-2 -5,-4 0,-3 2,-5 5,-5 z"
id="path35"
inkscape:connector-curvature="0"
style="fill:#fefefe" /><polygon
class="fil6"
points="305,212 284,212 284,310 305,310 "
id="polygon37"
style="fill:#006498" /><path
class="fil7"
d="m 477,479 v 2 c 0,5 -3,8 -8,8 H 231 c -5,0 -8,-3 -8,-8 v -2 c 0,4 3,8 8,8 h 238 c 5,0 8,-4 8,-8 z"
id="path39"
inkscape:connector-curvature="0"
style="fill:#0ea5eb" /><path
class="fil8"
d="M 350,70 C 505,70 630,195 630,350 630,505 505,630 350,630 195,630 70,505 70,350 70,195 195,70 350,70 Z m 0,46 C 479,116 584,221 584,350 584,479 479,584 350,584 221,584 116,479 116,350 116,221 221,116 350,116 Z"
id="path41"
inkscape:connector-curvature="0"
style="fill:#2979ff" /></g>
</svg>

After

Width:  |  Height:  |  Size: 5.4 KiB