Compare commits
70 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e2bb3a090e | |||
| 4b3889e4f9 | |||
| 7224977fa6 | |||
| 47a621cd4e | |||
| a35f9810d0 | |||
| 06039a51ff | |||
| 1591f42d9b | |||
| 0822cbdafe | |||
| 6ed10c05af | |||
| 0df864fb88 | |||
| ebd7ab11cb | |||
| 00713385fb | |||
| 6d41d98b65 | |||
| 01f06671a1 | |||
| a613ffa3d6 | |||
| df84cea924 | |||
| 55d1a32424 | |||
| bcb437d281 | |||
| 45280d5516 | |||
| 8e947e506f | |||
| b7f08811c3 | |||
| a9180d406e | |||
| b1d5f77cf0 | |||
| ef25baf581 | |||
| 2cc8cc59fa | |||
| 5f3be6b288 | |||
| b1f48f8d47 | |||
| 21d4d0196d | |||
| 1e1d312cab | |||
| c9909817a8 | |||
| 72438a0e55 | |||
| 220b9f3cc5 | |||
| 60e3ab7632 | |||
| 35c7cda760 | |||
| adc7775405 | |||
| 194d79402e | |||
| 47b8ca940c | |||
| 7f7ff9cd40 | |||
| 5cf97955a0 | |||
| 8e9638c750 | |||
| fcf431c1d7 | |||
| 0938981ebf | |||
| 87b382cc85 | |||
| be94af386c | |||
| e27c4dcd92 | |||
| c2c2b6f16f | |||
| 058f8f1f7c | |||
| 0ab54fd63a | |||
| 6ac0244960 | |||
| 6338be3b30 | |||
| 72d7b6567b | |||
| 342d2e4bed | |||
| 8bcfeab500 | |||
| 5224387c5a | |||
| 52af6eac68 | |||
| 8990a107a0 | |||
| 53ceea918b | |||
| 19d24075da | |||
| d017c27eaf | |||
| 0bab4a2042 | |||
| f3cd74d9d8 | |||
| e3b4099c9d | |||
| fa2481c650 | |||
| 2c0ffdd590 | |||
| e8fa04404f | |||
| f11a8086b0 | |||
| 95b3bc9c7a | |||
| 93b000776f | |||
| e6fbf501ac | |||
| d3036d569e |
@@ -0,0 +1,4 @@
|
||||
# All artifacts of the build processed are dumped here.
|
||||
# Ignore it for docker context, as all Dockerfiles should build their own
|
||||
# binaries.
|
||||
build
|
||||
@@ -181,7 +181,7 @@ jobs:
|
||||
echo "LINT_CACHE_DIR=$dir" >> "$GITHUB_ENV"
|
||||
|
||||
- name: golangci-lint cache
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
with:
|
||||
path: |
|
||||
${{ env.LINT_CACHE_DIR }}
|
||||
@@ -241,7 +241,9 @@ jobs:
|
||||
|
||||
lint-actions:
|
||||
needs: changes
|
||||
if: needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
# Only run this job if changes to CI workflow files are detected. This job
|
||||
# can flake as it reaches out to GitHub to check referenced actions.
|
||||
if: needs.changes.outputs.ci == 'true'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
@@ -1184,7 +1186,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -1391,7 +1393,7 @@ jobs:
|
||||
id: attest_main
|
||||
if: github.ref == 'refs/heads/main'
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: "ghcr.io/coder/coder-preview:main"
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -1428,7 +1430,7 @@ jobs:
|
||||
id: attest_latest
|
||||
if: github.ref == 'refs/heads/main'
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: "ghcr.io/coder/coder-preview:latest"
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -1465,7 +1467,7 @@ jobs:
|
||||
id: attest_version
|
||||
if: github.ref == 'refs/heads/main'
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: "ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}"
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Docker login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
# on version 2.29 and above.
|
||||
nix_version: "2.28.5"
|
||||
|
||||
- uses: nix-community/cache-nix-action@106bba72ed8e29c8357661199511ef07790175e9 # v7.0.1
|
||||
- uses: nix-community/cache-nix-action@7df957e333c1e5da7721f60227dbba6d06080569 # v7.0.2
|
||||
with:
|
||||
# restore and save a cache using this key
|
||||
primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
@@ -248,7 +248,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -233,7 +233,7 @@ jobs:
|
||||
cat "$CODER_RELEASE_NOTES_FILE"
|
||||
|
||||
- name: Docker Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -448,7 +448,7 @@ jobs:
|
||||
id: attest_base
|
||||
if: ${{ !inputs.dry_run && steps.image-base-tag.outputs.tag != '' }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: ${{ steps.image-base-tag.outputs.tag }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -564,7 +564,7 @@ jobs:
|
||||
id: attest_main
|
||||
if: ${{ !inputs.dry_run }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: ${{ steps.build_docker.outputs.multiarch_image }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -608,7 +608,7 @@ jobs:
|
||||
id: attest_latest
|
||||
if: ${{ !inputs.dry_run && steps.build_docker.outputs.created_latest_tag == 'true' }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@7667f588f2f73a90cea6c7ac70e78266c4f76616 # v3.1.0
|
||||
uses: actions/attest@e59cbc1ad1ac2d59339667419eb8cdde6eb61e3d # v3.2.0
|
||||
with:
|
||||
subject-name: ${{ steps.latest_tag.outputs.tag }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
|
||||
@@ -909,7 +909,10 @@ site/src/api/countriesGenerated.ts: site/node_modules/.installed scripts/typegen
|
||||
(cd site/ && pnpm exec biome format --write src/api/countriesGenerated.ts)
|
||||
touch "$@"
|
||||
|
||||
docs/admin/integrations/prometheus.md: node_modules/.installed scripts/metricsdocgen/main.go scripts/metricsdocgen/metrics
|
||||
scripts/metricsdocgen/generated_metrics: $(GO_SRC_FILES)
|
||||
go run ./scripts/metricsdocgen/scanner > $@
|
||||
|
||||
docs/admin/integrations/prometheus.md: node_modules/.installed scripts/metricsdocgen/main.go scripts/metricsdocgen/metrics scripts/metricsdocgen/generated_metrics
|
||||
go run scripts/metricsdocgen/main.go
|
||||
pnpm exec markdownlint-cli2 --fix ./docs/admin/integrations/prometheus.md
|
||||
pnpm exec markdown-table-formatter ./docs/admin/integrations/prometheus.md
|
||||
|
||||
+25
-21
@@ -3,11 +3,11 @@
|
||||
"enabled": true,
|
||||
"clientKind": "git",
|
||||
"useIgnoreFile": true,
|
||||
"defaultBranch": "main"
|
||||
"defaultBranch": "main",
|
||||
},
|
||||
"files": {
|
||||
"includes": ["**", "!**/pnpm-lock.yaml"],
|
||||
"ignoreUnknown": true
|
||||
"ignoreUnknown": true,
|
||||
},
|
||||
"linter": {
|
||||
"rules": {
|
||||
@@ -15,18 +15,18 @@
|
||||
"noSvgWithoutTitle": "off",
|
||||
"useButtonType": "off",
|
||||
"useSemanticElements": "off",
|
||||
"noStaticElementInteractions": "off"
|
||||
"noStaticElementInteractions": "off",
|
||||
},
|
||||
"correctness": {
|
||||
"noUnusedImports": "warn",
|
||||
"correctness": {
|
||||
"noUnusedImports": "warn",
|
||||
"useUniqueElementIds": "off", // TODO: This is new but we want to fix it
|
||||
"noNestedComponentDefinitions": "off", // TODO: Investigate, since it is used by shadcn components
|
||||
"noUnusedVariables": {
|
||||
"level": "warn",
|
||||
"noUnusedVariables": {
|
||||
"level": "warn",
|
||||
"options": {
|
||||
"ignoreRestSiblings": true
|
||||
}
|
||||
}
|
||||
"ignoreRestSiblings": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"style": {
|
||||
"noNonNullAssertion": "off",
|
||||
@@ -45,6 +45,10 @@
|
||||
"level": "error",
|
||||
"options": {
|
||||
"paths": {
|
||||
"react": {
|
||||
"message": "React 19 no longer requires forwardRef. Use ref as a prop instead.",
|
||||
"importNames": ["forwardRef"],
|
||||
},
|
||||
// "@mui/material/Alert": "Use components/Alert/Alert instead.",
|
||||
// "@mui/material/AlertTitle": "Use components/Alert/Alert instead.",
|
||||
// "@mui/material/Autocomplete": "Use shadcn/ui Combobox instead.",
|
||||
@@ -111,10 +115,10 @@
|
||||
"@emotion/styled": "Use Tailwind CSS instead.",
|
||||
// "@emotion/cache": "Use Tailwind CSS instead.",
|
||||
// "components/Stack/Stack": "Use Tailwind flex utilities instead (e.g., <div className='flex flex-col gap-4'>).",
|
||||
"lodash": "Use lodash/<name> instead."
|
||||
}
|
||||
}
|
||||
}
|
||||
"lodash": "Use lodash/<name> instead.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"suspicious": {
|
||||
"noArrayIndexKey": "off",
|
||||
@@ -125,14 +129,14 @@
|
||||
"noConsole": {
|
||||
"level": "error",
|
||||
"options": {
|
||||
"allow": ["error", "info", "warn"]
|
||||
}
|
||||
}
|
||||
"allow": ["error", "info", "warn"],
|
||||
},
|
||||
},
|
||||
},
|
||||
"complexity": {
|
||||
"noImportantStyles": "off" // TODO: check and fix !important styles
|
||||
}
|
||||
}
|
||||
"noImportantStyles": "off", // TODO: check and fix !important styles
|
||||
},
|
||||
},
|
||||
},
|
||||
"$schema": "./node_modules/@biomejs/biome/configuration_schema.json"
|
||||
"$schema": "./node_modules/@biomejs/biome/configuration_schema.json",
|
||||
}
|
||||
|
||||
+15
-4
@@ -884,16 +884,27 @@ func (o *OrganizationContext) Selected(inv *serpent.Invocation, client *codersdk
|
||||
index := slices.IndexFunc(orgs, func(org codersdk.Organization) bool {
|
||||
return org.Name == o.FlagSelect || org.ID.String() == o.FlagSelect
|
||||
})
|
||||
if index >= 0 {
|
||||
return orgs[index], nil
|
||||
}
|
||||
|
||||
if index < 0 {
|
||||
// Not in membership list - try direct fetch.
|
||||
// This allows site-wide admins (e.g., Owners) to use orgs they aren't
|
||||
// members of.
|
||||
org, err := client.OrganizationByName(inv.Context(), o.FlagSelect)
|
||||
if err != nil {
|
||||
var names []string
|
||||
for _, org := range orgs {
|
||||
names = append(names, org.Name)
|
||||
}
|
||||
return codersdk.Organization{}, xerrors.Errorf("organization %q not found, are you sure you are a member of this organization? "+
|
||||
"Valid options for '--org=' are [%s].", o.FlagSelect, strings.Join(names, ", "))
|
||||
var sdkErr *codersdk.Error
|
||||
if errors.As(err, &sdkErr) && sdkErr.StatusCode() == http.StatusNotFound {
|
||||
return codersdk.Organization{}, xerrors.Errorf("organization %q not found, are you sure you are a member of this organization? "+
|
||||
"Valid options for '--org=' are [%s].", o.FlagSelect, strings.Join(names, ", "))
|
||||
}
|
||||
return codersdk.Organization{}, xerrors.Errorf("get organization %q: %w", o.FlagSelect, err)
|
||||
}
|
||||
return orgs[index], nil
|
||||
return org, nil
|
||||
}
|
||||
|
||||
if len(orgs) == 1 {
|
||||
|
||||
+8
-1
@@ -95,6 +95,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/webpush"
|
||||
"github.com/coder/coder/v2/coderd/workspaceapps/appurl"
|
||||
"github.com/coder/coder/v2/coderd/workspacestats"
|
||||
"github.com/coder/coder/v2/coderd/wsbuilder"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/drpcsdk"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
@@ -935,6 +936,12 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
options.StatsBatcher = batcher
|
||||
defer closeBatcher()
|
||||
|
||||
wsBuilderMetrics, err := wsbuilder.NewMetrics(options.PrometheusRegistry)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to register workspace builder metrics: %w", err)
|
||||
}
|
||||
options.WorkspaceBuilderMetrics = wsBuilderMetrics
|
||||
|
||||
// Manage notifications.
|
||||
var (
|
||||
notificationsCfg = options.DeploymentValues.Notifications
|
||||
@@ -1118,7 +1125,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
autobuildTicker := time.NewTicker(vals.AutobuildPollInterval.Value())
|
||||
defer autobuildTicker.Stop()
|
||||
autobuildExecutor := autobuild.NewExecutor(
|
||||
ctx, options.Database, options.Pubsub, coderAPI.FileCache, options.PrometheusRegistry, coderAPI.TemplateScheduleStore, &coderAPI.Auditor, coderAPI.AccessControlStore, coderAPI.BuildUsageChecker, logger, autobuildTicker.C, options.NotificationsEnqueuer, coderAPI.Experiments)
|
||||
ctx, options.Database, options.Pubsub, coderAPI.FileCache, options.PrometheusRegistry, coderAPI.TemplateScheduleStore, &coderAPI.Auditor, coderAPI.AccessControlStore, coderAPI.BuildUsageChecker, logger, autobuildTicker.C, options.NotificationsEnqueuer, coderAPI.Experiments, coderAPI.WorkspaceBuilderMetrics)
|
||||
autobuildExecutor.Run()
|
||||
|
||||
jobReaperTicker := time.NewTicker(vals.JobReaperDetectorInterval.Value())
|
||||
|
||||
+24
-19
@@ -2244,6 +2244,7 @@ type runServerOpts struct {
|
||||
waitForSnapshot bool
|
||||
telemetryDisabled bool
|
||||
waitForTelemetryDisabledCheck bool
|
||||
name string
|
||||
}
|
||||
|
||||
func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
@@ -2266,25 +2267,23 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
"--cache-dir", cacheDir,
|
||||
"--log-filter", ".*",
|
||||
)
|
||||
finished := make(chan bool, 2)
|
||||
inv.Logger = inv.Logger.Named(opts.name)
|
||||
|
||||
errChan := make(chan error, 1)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
pty := ptytest.New(t).Named(opts.name).Attach(inv)
|
||||
go func() {
|
||||
errChan <- inv.WithContext(ctx).Run()
|
||||
finished <- true
|
||||
// close the pty here so that we can start tearing down resources. This test creates multiple servers with
|
||||
// associated ptys. There is a `t.Cleanup()` that does this, but it waits until the whole test is complete.
|
||||
_ = pty.Close()
|
||||
}()
|
||||
go func() {
|
||||
defer func() {
|
||||
finished <- true
|
||||
}()
|
||||
if opts.waitForSnapshot {
|
||||
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "submitted snapshot")
|
||||
}
|
||||
if opts.waitForTelemetryDisabledCheck {
|
||||
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "finished telemetry status check")
|
||||
}
|
||||
}()
|
||||
<-finished
|
||||
|
||||
if opts.waitForSnapshot {
|
||||
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "submitted snapshot")
|
||||
}
|
||||
if opts.waitForTelemetryDisabledCheck {
|
||||
pty.ExpectMatchContext(testutil.Context(t, testutil.WaitLong), "finished telemetry status check")
|
||||
}
|
||||
return errChan, cancelFunc
|
||||
}
|
||||
waitForShutdown := func(t *testing.T, errChan chan error) error {
|
||||
@@ -2298,7 +2297,9 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
|
||||
errChan, cancelFunc := runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
|
||||
errChan, cancelFunc := runServer(t, runServerOpts{
|
||||
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "0disabled",
|
||||
})
|
||||
cancelFunc()
|
||||
require.NoError(t, waitForShutdown(t, errChan))
|
||||
|
||||
@@ -2306,7 +2307,7 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
require.Empty(t, deployment)
|
||||
require.Empty(t, snapshot)
|
||||
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{waitForSnapshot: true})
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{waitForSnapshot: true, name: "1enabled"})
|
||||
cancelFunc()
|
||||
require.NoError(t, waitForShutdown(t, errChan))
|
||||
// we expect to see a deployment and a snapshot twice:
|
||||
@@ -2325,7 +2326,9 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{
|
||||
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "2disabled",
|
||||
})
|
||||
cancelFunc()
|
||||
require.NoError(t, waitForShutdown(t, errChan))
|
||||
|
||||
@@ -2341,7 +2344,9 @@ func TestServer_TelemetryDisabled_FinalReport(t *testing.T) {
|
||||
t.Fatalf("timed out waiting for snapshot")
|
||||
}
|
||||
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{telemetryDisabled: true, waitForTelemetryDisabledCheck: true})
|
||||
errChan, cancelFunc = runServer(t, runServerOpts{
|
||||
telemetryDisabled: true, waitForTelemetryDisabledCheck: true, name: "3disabled",
|
||||
})
|
||||
cancelFunc()
|
||||
require.NoError(t, waitForShutdown(t, errChan))
|
||||
// Since telemetry is disabled and we've already sent a snapshot, we expect no
|
||||
|
||||
-58
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/gofrs/flock"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/shirou/gopsutil/v4/process"
|
||||
"github.com/spf13/afero"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
gosshagent "golang.org/x/crypto/ssh/agent"
|
||||
@@ -85,9 +84,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
|
||||
containerName string
|
||||
containerUser string
|
||||
|
||||
// Used in tests to simulate the parent exiting.
|
||||
testForcePPID int64
|
||||
)
|
||||
cmd := &serpent.Command{
|
||||
Annotations: workspaceCommand,
|
||||
@@ -179,24 +175,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// When running as a ProxyCommand (stdio mode), monitor the parent process
|
||||
// and exit if it dies to avoid leaving orphaned processes. This is
|
||||
// particularly important when editors like VSCode/Cursor spawn SSH
|
||||
// connections and then crash or are killed - we don't want zombie
|
||||
// `coder ssh` processes accumulating.
|
||||
// Note: using gopsutil to check the parent process as this handles
|
||||
// windows processes as well in a standard way.
|
||||
if stdio {
|
||||
ppid := int32(os.Getppid()) // nolint:gosec
|
||||
checkParentInterval := 10 * time.Second // Arbitrary interval to not be too frequent
|
||||
if testForcePPID > 0 {
|
||||
ppid = int32(testForcePPID) // nolint:gosec
|
||||
checkParentInterval = 100 * time.Millisecond // Shorter interval for testing
|
||||
}
|
||||
ctx, cancel = watchParentContext(ctx, quartz.NewReal(), ppid, process.PidExistsWithContext, checkParentInterval)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Prevent unnecessary logs from the stdlib from messing up the TTY.
|
||||
// See: https://github.com/coder/coder/issues/13144
|
||||
log.SetOutput(io.Discard)
|
||||
@@ -797,12 +775,6 @@ func (r *RootCmd) ssh() *serpent.Command {
|
||||
Value: serpent.BoolOf(&forceNewTunnel),
|
||||
Hidden: true,
|
||||
},
|
||||
{
|
||||
Flag: "test.force-ppid",
|
||||
Description: "Override the parent process ID to simulate a different parent process. ONLY USE THIS IN TESTS.",
|
||||
Value: serpent.Int64Of(&testForcePPID),
|
||||
Hidden: true,
|
||||
},
|
||||
sshDisableAutostartOption(serpent.BoolOf(&disableAutostart)),
|
||||
}
|
||||
return cmd
|
||||
@@ -1690,33 +1662,3 @@ func normalizeWorkspaceInput(input string) string {
|
||||
return input // Fallback
|
||||
}
|
||||
}
|
||||
|
||||
// watchParentContext returns a context that is canceled when the parent process
|
||||
// dies. It polls using the provided clock and checks if the parent is alive
|
||||
// using the provided pidExists function.
|
||||
func watchParentContext(ctx context.Context, clock quartz.Clock, originalPPID int32, pidExists func(context.Context, int32) (bool, error), interval time.Duration) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(ctx) // intentionally shadowed
|
||||
|
||||
go func() {
|
||||
ticker := clock.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
alive, err := pidExists(ctx, originalPPID)
|
||||
// If we get an error checking the parent process (e.g., permission
|
||||
// denied, the process is in an unknown state), we assume the parent
|
||||
// is still alive to avoid disrupting the SSH connection. We only
|
||||
// cancel when we definitively know the parent is gone (alive=false, err=nil).
|
||||
if !alive && err == nil {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
@@ -312,102 +312,6 @@ type fakeCloser struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func TestWatchParentContext(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("CancelsWhenParentDies", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
parentAlive := true
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return parentAlive, nil
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we simulate parent death and advance the clock
|
||||
parentAlive = false
|
||||
mClock.AdvanceNext()
|
||||
|
||||
// Then: The context should be canceled
|
||||
_ = testutil.TryReceive(ctx, t, childCtx.Done())
|
||||
})
|
||||
|
||||
t.Run("DoesNotCancelWhenParentAlive", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return true, nil // Parent always alive
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we advance the clock several times with the parent alive
|
||||
for range 3 {
|
||||
mClock.AdvanceNext()
|
||||
}
|
||||
|
||||
// Then: context should not be canceled
|
||||
require.NoError(t, childCtx.Err())
|
||||
})
|
||||
|
||||
t.Run("RespectsParentContext", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancelParent := context.WithCancel(context.Background())
|
||||
mClock := quartz.NewMock(t)
|
||||
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return true, nil
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// When: we cancel the parent context
|
||||
cancelParent()
|
||||
|
||||
// Then: The context should be canceled
|
||||
require.ErrorIs(t, childCtx.Err(), context.Canceled)
|
||||
})
|
||||
|
||||
t.Run("DoesNotCancelOnError", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
mClock := quartz.NewMock(t)
|
||||
trap := mClock.Trap().NewTicker()
|
||||
defer trap.Close()
|
||||
|
||||
// Simulate an error checking parent status (e.g., permission denied).
|
||||
// We should not cancel the context in this case to avoid disrupting
|
||||
// the SSH connection.
|
||||
childCtx, cancel := watchParentContext(ctx, mClock, 1234, func(context.Context, int32) (bool, error) {
|
||||
return false, xerrors.New("permission denied")
|
||||
}, testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the ticker to be created
|
||||
trap.MustWait(ctx).MustRelease(ctx)
|
||||
|
||||
// When: we advance clock several times
|
||||
for range 3 {
|
||||
mClock.AdvanceNext()
|
||||
}
|
||||
|
||||
// Context should NOT be canceled since we got an error (not a definitive "not alive")
|
||||
require.NoError(t, childCtx.Err(), "context was canceled even though pidExists returned an error")
|
||||
})
|
||||
}
|
||||
|
||||
func (c *fakeCloser) Close() error {
|
||||
*c.closes = append(*c.closes, c)
|
||||
return c.err
|
||||
|
||||
-101
@@ -1122,107 +1122,6 @@ func TestSSH(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// This test ensures that the SSH session exits when the parent process dies.
|
||||
t.Run("StdioExitOnParentDeath", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
|
||||
defer cancel()
|
||||
|
||||
// sleepStart -> agentReady -> sessionStarted -> sleepKill -> sleepDone -> cmdDone
|
||||
sleepStart := make(chan int)
|
||||
agentReady := make(chan struct{})
|
||||
sessionStarted := make(chan struct{})
|
||||
sleepKill := make(chan struct{})
|
||||
sleepDone := make(chan struct{})
|
||||
|
||||
// Start a sleep process which we will pretend is the parent.
|
||||
go func() {
|
||||
sleepCmd := exec.Command("sleep", "infinity")
|
||||
if !assert.NoError(t, sleepCmd.Start(), "failed to start sleep command") {
|
||||
return
|
||||
}
|
||||
sleepStart <- sleepCmd.Process.Pid
|
||||
defer close(sleepDone)
|
||||
<-sleepKill
|
||||
sleepCmd.Process.Kill()
|
||||
_ = sleepCmd.Wait()
|
||||
}()
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t)
|
||||
go func() {
|
||||
defer close(agentReady)
|
||||
_ = agenttest.New(t, client.URL, agentToken)
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).WaitFor(coderdtest.AgentsReady)
|
||||
}()
|
||||
|
||||
clientOutput, clientInput := io.Pipe()
|
||||
serverOutput, serverInput := io.Pipe()
|
||||
defer func() {
|
||||
for _, c := range []io.Closer{clientOutput, clientInput, serverOutput, serverInput} {
|
||||
_ = c.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a connection to the agent once it's ready
|
||||
go func() {
|
||||
<-agentReady
|
||||
conn, channels, requests, err := ssh.NewClientConn(&testutil.ReaderWriterConn{
|
||||
Reader: serverOutput,
|
||||
Writer: clientInput,
|
||||
}, "", &ssh.ClientConfig{
|
||||
// #nosec
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
})
|
||||
if !assert.NoError(t, err, "failed to create SSH client connection") {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
sshClient := ssh.NewClient(conn, channels, requests)
|
||||
defer sshClient.Close()
|
||||
|
||||
session, err := sshClient.NewSession()
|
||||
if !assert.NoError(t, err, "failed to create SSH session") {
|
||||
return
|
||||
}
|
||||
close(sessionStarted)
|
||||
<-sleepDone
|
||||
// Ref: https://github.com/coder/internal/issues/1289
|
||||
// This may return either a nil error or io.EOF.
|
||||
// There is an inherent race here:
|
||||
// 1. Sleep process is killed -> sleepDone is closed.
|
||||
// 2. watchParentContext detects parent death, cancels context,
|
||||
// causing SSH session teardown.
|
||||
// 3. We receive from sleepDone and attempt to call session.Close()
|
||||
// Now either:
|
||||
// a. Session teardown completes before we call Close(), resulting in io.EOF
|
||||
// b. We call Close() first, resulting in a nil error.
|
||||
_ = session.Close()
|
||||
}()
|
||||
|
||||
// Wait for our "parent" process to start
|
||||
sleepPid := testutil.RequireReceive(ctx, t, sleepStart)
|
||||
// Wait for the agent to be ready
|
||||
testutil.SoftTryReceive(ctx, t, agentReady)
|
||||
inv, root := clitest.New(t, "ssh", "--stdio", workspace.Name, "--test.force-ppid", fmt.Sprintf("%d", sleepPid))
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdin = clientOutput
|
||||
inv.Stdout = serverInput
|
||||
inv.Stderr = io.Discard
|
||||
|
||||
// Start the command
|
||||
clitest.Start(t, inv.WithContext(ctx))
|
||||
|
||||
// Wait for a session to be established
|
||||
testutil.SoftTryReceive(ctx, t, sessionStarted)
|
||||
// Now kill the fake "parent"
|
||||
close(sleepKill)
|
||||
// The sleep process should exit
|
||||
testutil.SoftTryReceive(ctx, t, sleepDone)
|
||||
// And then the command should exit. This is tracked by clitest.Start.
|
||||
})
|
||||
|
||||
t.Run("ForwardAgent", func(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Test not supported on windows")
|
||||
|
||||
@@ -17,6 +17,7 @@ func (r *RootCmd) tasksCommand() *serpent.Command {
|
||||
r.taskDelete(),
|
||||
r.taskList(),
|
||||
r.taskLogs(),
|
||||
r.taskPause(),
|
||||
r.taskSend(),
|
||||
r.taskStatus(),
|
||||
},
|
||||
|
||||
+5
-10
@@ -41,8 +41,7 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client // user already has access to their own workspace
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.Name, "--output", "json")
|
||||
output := clitest.Capture(inv)
|
||||
@@ -65,8 +64,7 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.ID.String(), "--output", "json")
|
||||
output := clitest.Capture(inv)
|
||||
@@ -89,8 +87,7 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
userClient := client
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsOK(testMessages))
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.ID.String())
|
||||
output := clitest.Capture(inv)
|
||||
@@ -144,8 +141,7 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsErr(assert.AnError))
|
||||
userClient := client
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskLogsErr(assert.AnError))
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.ID.String())
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
@@ -201,8 +197,7 @@ func Test_TaskLogs_Golden(t *testing.T) {
|
||||
t.Run("SnapshotWithoutLogs_NoSnapshotCaptured", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, task := setupCLITaskTestWithoutSnapshot(t, codersdk.TaskStatusPaused)
|
||||
userClient := client
|
||||
userClient, task := setupCLITaskTestWithoutSnapshot(t, codersdk.TaskStatusPaused)
|
||||
|
||||
inv, root := clitest.New(t, "task", "logs", task.Name)
|
||||
output := clitest.Capture(inv)
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/pretty"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (r *RootCmd) taskPause() *serpent.Command {
|
||||
cmd := &serpent.Command{
|
||||
Use: "pause <task>",
|
||||
Short: "Pause a task",
|
||||
Long: FormatExamples(
|
||||
Example{
|
||||
Description: "Pause a task by name",
|
||||
Command: "coder task pause my-task",
|
||||
},
|
||||
Example{
|
||||
Description: "Pause another user's task",
|
||||
Command: "coder task pause alice/my-task",
|
||||
},
|
||||
Example{
|
||||
Description: "Pause a task without confirmation",
|
||||
Command: "coder task pause my-task --yes",
|
||||
},
|
||||
),
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(1),
|
||||
),
|
||||
Options: serpent.OptionSet{
|
||||
cliui.SkipPromptOption(),
|
||||
},
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
client, err := r.InitClient(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
task, err := client.TaskByIdentifier(ctx, inv.Args[0])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("resolve task %q: %w", inv.Args[0], err)
|
||||
}
|
||||
|
||||
display := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
|
||||
|
||||
if task.Status == codersdk.TaskStatusPaused {
|
||||
return xerrors.Errorf("task %q is already paused", display)
|
||||
}
|
||||
|
||||
_, err = cliui.Prompt(inv, cliui.PromptOptions{
|
||||
Text: fmt.Sprintf("Pause task %s?", pretty.Sprint(cliui.DefaultStyles.Code, display)),
|
||||
IsConfirm: true,
|
||||
Default: cliui.ConfirmNo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.PauseTask(ctx, task.OwnerName, task.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("pause task %q: %w", display, err)
|
||||
}
|
||||
|
||||
if resp.WorkspaceBuild == nil {
|
||||
return xerrors.Errorf("pause task %q: no workspace build returned", display)
|
||||
}
|
||||
|
||||
err = cliui.WorkspaceBuild(ctx, inv.Stdout, client, resp.WorkspaceBuild.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("watch pause build for task %q: %w", display, err)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(
|
||||
inv.Stdout,
|
||||
"\nThe %s task has been paused at %s!\n",
|
||||
cliui.Keyword(task.Name),
|
||||
cliui.Timestamp(time.Now()),
|
||||
)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestExpTaskPause(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("WithYesFlag", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A running task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
|
||||
// When: We attempt to pause the task
|
||||
inv, root := clitest.New(t, "task", "pause", task.Name, "--yes")
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// Then: Expect the task to be paused
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, output.Stdout(), "has been paused")
|
||||
|
||||
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.TaskStatusPaused, updated.Status)
|
||||
})
|
||||
|
||||
// OtherUserTask verifies that an admin can pause a task owned by
|
||||
// another user using the "owner/name" identifier format.
|
||||
t.Run("OtherUserTask", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A different user's running task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
adminClient, _, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
|
||||
// When: We attempt to pause their task
|
||||
identifier := fmt.Sprintf("%s/%s", task.OwnerName, task.Name)
|
||||
inv, root := clitest.New(t, "task", "pause", identifier, "--yes")
|
||||
output := clitest.Capture(inv)
|
||||
clitest.SetupConfig(t, adminClient, root)
|
||||
|
||||
// Then: We expect the task to be paused
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, output.Stdout(), "has been paused")
|
||||
|
||||
updated, err := adminClient.TaskByIdentifier(ctx, identifier)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.TaskStatusPaused, updated.Status)
|
||||
})
|
||||
|
||||
t.Run("PromptConfirm", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A running task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
|
||||
// When: We attempt to pause the task
|
||||
inv, root := clitest.New(t, "task", "pause", task.Name)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// And: We confirm we want to pause the task
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
inv = inv.WithContext(ctx)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
w := clitest.StartWithWaiter(t, inv)
|
||||
pty.ExpectMatchContext(ctx, "Pause task")
|
||||
pty.WriteLine("yes")
|
||||
|
||||
// Then: We expect the task to be paused
|
||||
pty.ExpectMatchContext(ctx, "has been paused")
|
||||
require.NoError(t, w.Wait())
|
||||
|
||||
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.TaskStatusPaused, updated.Status)
|
||||
})
|
||||
|
||||
t.Run("PromptDecline", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A running task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
|
||||
// When: We attempt to pause the task
|
||||
inv, root := clitest.New(t, "task", "pause", task.Name)
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// But: We say no at the confirmation screen
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
inv = inv.WithContext(ctx)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
w := clitest.StartWithWaiter(t, inv)
|
||||
pty.ExpectMatchContext(ctx, "Pause task")
|
||||
pty.WriteLine("no")
|
||||
require.Error(t, w.Wait())
|
||||
|
||||
// Then: We expect the task to not be paused
|
||||
updated, err := userClient.TaskByIdentifier(ctx, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, codersdk.TaskStatusPaused, updated.Status)
|
||||
})
|
||||
|
||||
t.Run("TaskAlreadyPaused", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: A running task
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, nil)
|
||||
|
||||
// And: We paused the running task
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
resp, err := userClient.PauseTask(ctx, task.OwnerName, task.ID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp.WorkspaceBuild)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, resp.WorkspaceBuild.ID)
|
||||
|
||||
// When: We attempt to pause the task again
|
||||
inv, root := clitest.New(t, "task", "pause", task.Name, "--yes")
|
||||
clitest.SetupConfig(t, userClient, root)
|
||||
|
||||
// Then: We expect to get an error that the task is already paused
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "is already paused")
|
||||
})
|
||||
}
|
||||
@@ -25,8 +25,7 @@ func Test_TaskSend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "task", "send", task.Name, "carry on with the task")
|
||||
@@ -42,8 +41,7 @@ func Test_TaskSend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "task", "send", task.ID.String(), "carry on with the task")
|
||||
@@ -59,8 +57,7 @@ func Test_TaskSend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
client, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
userClient := client
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendOK(t, "carry on with the task", "you got it"))
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "task", "send", task.Name, "--stdin")
|
||||
@@ -113,7 +110,7 @@ func Test_TaskSend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupCtx := testutil.Context(t, testutil.WaitLong)
|
||||
userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
|
||||
_, userClient, task := setupCLITaskTest(setupCtx, t, fakeAgentAPITaskSendErr(t, assert.AnError))
|
||||
|
||||
var stdout strings.Builder
|
||||
inv, root := clitest.New(t, "task", "send", task.Name, "some task input")
|
||||
|
||||
+27
-10
@@ -120,6 +120,23 @@ func Test_Tasks(t *testing.T) {
|
||||
require.Equal(t, logs[2].Type, codersdk.TaskLogTypeOutput, "third message should be an output")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pause task",
|
||||
cmdArgs: []string{"task", "pause", taskName, "--yes"},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
require.Contains(t, stdout, "has been paused", "pause output should confirm task was paused")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get task status after pause",
|
||||
cmdArgs: []string{"task", "status", taskName, "--output", "json"},
|
||||
assertFn: func(stdout string, userClient *codersdk.Client) {
|
||||
var task codersdk.Task
|
||||
require.NoError(t, json.NewDecoder(strings.NewReader(stdout)).Decode(&task), "should unmarshal task status")
|
||||
require.Equal(t, taskName, task.Name, "task name should match")
|
||||
require.Equal(t, codersdk.TaskStatusPaused, task.Status, "task should be paused")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete task",
|
||||
cmdArgs: []string{"task", "delete", taskName, "--yes"},
|
||||
@@ -238,17 +255,17 @@ func fakeAgentAPIEcho(ctx context.Context, t testing.TB, initMsg agentapisdk.Mes
|
||||
// setupCLITaskTest creates a test workspace with an AI task template and agent,
|
||||
// with a fake agent API configured with the provided set of handlers.
|
||||
// Returns the user client and workspace.
|
||||
func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[string]http.HandlerFunc) (*codersdk.Client, codersdk.Task) {
|
||||
func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[string]http.HandlerFunc) (ownerClient *codersdk.Client, memberClient *codersdk.Client, task codersdk.Task) {
|
||||
t.Helper()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
ownerClient = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
userClient, _ := coderdtest.CreateAnotherUser(t, ownerClient, owner.OrganizationID)
|
||||
|
||||
fakeAPI := startFakeAgentAPI(t, agentAPIHandlers)
|
||||
|
||||
authToken := uuid.NewString()
|
||||
template := createAITaskTemplate(t, client, owner.OrganizationID, withSidebarURL(fakeAPI.URL()), withAgentToken(authToken))
|
||||
template := createAITaskTemplate(t, ownerClient, owner.OrganizationID, withSidebarURL(fakeAPI.URL()), withAgentToken(authToken))
|
||||
|
||||
wantPrompt := "test prompt"
|
||||
task, err := userClient.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
|
||||
@@ -262,17 +279,17 @@ func setupCLITaskTest(ctx context.Context, t *testing.T, agentAPIHandlers map[st
|
||||
require.True(t, task.WorkspaceID.Valid, "task should have a workspace ID")
|
||||
workspace, err := userClient.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, userClient, workspace.LatestBuild.ID)
|
||||
|
||||
agentClient := agentsdk.New(client.URL, agentsdk.WithFixedToken(authToken))
|
||||
_ = agenttest.New(t, client.URL, authToken, func(o *agent.Options) {
|
||||
agentClient := agentsdk.New(userClient.URL, agentsdk.WithFixedToken(authToken))
|
||||
_ = agenttest.New(t, userClient.URL, authToken, func(o *agent.Options) {
|
||||
o.Client = agentClient
|
||||
})
|
||||
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, client, workspace.ID).
|
||||
coderdtest.NewWorkspaceAgentWaiter(t, userClient, workspace.ID).
|
||||
WaitFor(coderdtest.AgentsReady)
|
||||
|
||||
return userClient, task
|
||||
return ownerClient, userClient, task
|
||||
}
|
||||
|
||||
// setupCLITaskTestWithSnapshot creates a task in the specified status with a log snapshot.
|
||||
|
||||
+1
@@ -12,6 +12,7 @@ SUBCOMMANDS:
|
||||
delete Delete tasks
|
||||
list List tasks
|
||||
logs Show a task's logs
|
||||
pause Pause a task
|
||||
send Send input to a task
|
||||
status Show the status of a task.
|
||||
|
||||
|
||||
+25
@@ -0,0 +1,25 @@
|
||||
coder v0.0.0-devel
|
||||
|
||||
USAGE:
|
||||
coder task pause [flags] <task>
|
||||
|
||||
Pause a task
|
||||
|
||||
- Pause a task by name:
|
||||
|
||||
$ coder task pause my-task
|
||||
|
||||
- Pause another user's task:
|
||||
|
||||
$ coder task pause alice/my-task
|
||||
|
||||
- Pause a task without confirmation:
|
||||
|
||||
$ coder task pause my-task --yes
|
||||
|
||||
OPTIONS:
|
||||
-y, --yes bool
|
||||
Bypass confirmation prompts.
|
||||
|
||||
———
|
||||
Run `coder --help` for a list of global options.
|
||||
@@ -1,24 +0,0 @@
|
||||
//go:build !windows && !darwin
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (*RootCmd) vpnDaemonRun() *serpent.Command {
|
||||
cmd := &serpent.Command{
|
||||
Use: "run",
|
||||
Short: "Run the VPN daemon on Windows.",
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(0),
|
||||
),
|
||||
Handler: func(_ *serpent.Invocation) error {
|
||||
return xerrors.New("vpn-daemon subcommand is not supported on this platform")
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows
|
||||
//go:build windows || linux
|
||||
|
||||
package cli
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (r *RootCmd) vpnDaemonRun() *serpent.Command {
|
||||
func (*RootCmd) vpnDaemonRun() *serpent.Command {
|
||||
var (
|
||||
rpcReadHandleInt int64
|
||||
rpcWriteHandleInt int64
|
||||
@@ -19,7 +19,7 @@ func (r *RootCmd) vpnDaemonRun() *serpent.Command {
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "run",
|
||||
Short: "Run the VPN daemon on Windows.",
|
||||
Short: "Run the VPN daemon on Windows and Linux.",
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(0),
|
||||
),
|
||||
@@ -53,8 +53,8 @@ func (r *RootCmd) vpnDaemonRun() *serpent.Command {
|
||||
return xerrors.Errorf("rpc-read-handle (%v) and rpc-write-handle (%v) must be different", rpcReadHandleInt, rpcWriteHandleInt)
|
||||
}
|
||||
|
||||
// We don't need to worry about duplicating the handles on Windows,
|
||||
// which is different from Unix.
|
||||
// The manager passes the read and write descriptors directly to the
|
||||
// daemon, so we can open the RPC pipe from the raw values.
|
||||
logger.Info(ctx, "opening bidirectional RPC pipe", slog.F("rpc_read_handle", rpcReadHandleInt), slog.F("rpc_write_handle", rpcWriteHandleInt))
|
||||
pipe, err := vpn.NewBidirectionalPipe(uintptr(rpcReadHandleInt), uintptr(rpcWriteHandleInt))
|
||||
if err != nil {
|
||||
@@ -62,7 +62,7 @@ func (r *RootCmd) vpnDaemonRun() *serpent.Command {
|
||||
}
|
||||
defer pipe.Close()
|
||||
|
||||
logger.Info(ctx, "starting tunnel")
|
||||
logger.Info(ctx, "starting VPN tunnel")
|
||||
tunnel, err := vpn.NewTunnel(ctx, logger, pipe, vpn.NewClient(), vpn.UseOSNetworkingStack())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create new tunnel for client: %w", err)
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows
|
||||
//go:build windows || linux
|
||||
|
||||
package cli_test
|
||||
|
||||
@@ -67,22 +67,35 @@ func TestVPNDaemonRun(t *testing.T) {
|
||||
|
||||
r1, w1, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
defer r1.Close()
|
||||
defer w1.Close()
|
||||
|
||||
r2, w2, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
defer r2.Close()
|
||||
defer w2.Close()
|
||||
|
||||
// The daemon closes the handles passed via NewBidirectionalPipe. Since our
|
||||
// CLI tests run in-process, pass duplicated handles so we can close the
|
||||
// originals without risking a double-close on FD reuse.
|
||||
rpcReadHandle := dupHandle(t, r1)
|
||||
rpcWriteHandle := dupHandle(t, w2)
|
||||
require.NoError(t, r1.Close())
|
||||
require.NoError(t, w2.Close())
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
inv, _ := clitest.New(t, "vpn-daemon", "run", "--rpc-read-handle", fmt.Sprint(r1.Fd()), "--rpc-write-handle", fmt.Sprint(w2.Fd()))
|
||||
inv, _ := clitest.New(t,
|
||||
"vpn-daemon",
|
||||
"run",
|
||||
"--rpc-read-handle",
|
||||
fmt.Sprint(rpcReadHandle),
|
||||
"--rpc-write-handle",
|
||||
fmt.Sprint(rpcWriteHandle),
|
||||
)
|
||||
waiter := clitest.StartWithWaiter(t, inv.WithContext(ctx))
|
||||
|
||||
// Send garbage which should cause the handshake to fail and the daemon
|
||||
// to exit.
|
||||
_, err = w1.Write([]byte("garbage"))
|
||||
// Send an invalid header, including a newline delimiter, so the handshake
|
||||
// fails without requiring context cancellation.
|
||||
_, err = w1.Write([]byte("garbage\n"))
|
||||
require.NoError(t, err)
|
||||
waiter.Cancel()
|
||||
err = waiter.Wait()
|
||||
require.ErrorContains(t, err, "handshake failed")
|
||||
})
|
||||
@@ -0,0 +1,19 @@
|
||||
//go:build linux
|
||||
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func dupHandle(t *testing.T, f *os.File) uintptr {
|
||||
t.Helper()
|
||||
|
||||
dupFD, err := unix.Dup(int(f.Fd()))
|
||||
require.NoError(t, err)
|
||||
return uintptr(dupFD)
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
//go:build windows
|
||||
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func dupHandle(t *testing.T, f *os.File) uintptr {
|
||||
t.Helper()
|
||||
|
||||
src := syscall.Handle(f.Fd())
|
||||
var dup syscall.Handle
|
||||
|
||||
proc, err := syscall.GetCurrentProcess()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = syscall.DuplicateHandle(
|
||||
proc,
|
||||
src,
|
||||
proc,
|
||||
&dup,
|
||||
0,
|
||||
false,
|
||||
syscall.DUPLICATE_SAME_ACCESS,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return uintptr(dup)
|
||||
}
|
||||
@@ -1244,3 +1244,150 @@ func (api *API) postWorkspaceAgentTaskLogSnapshot(rw http.ResponseWriter, r *htt
|
||||
|
||||
rw.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// @Summary Pause task
|
||||
// @ID pause-task
|
||||
// @Security CoderSessionToken
|
||||
// @Accept json
|
||||
// @Tags Tasks
|
||||
// @Param user path string true "Username, user ID, or 'me' for the authenticated user"
|
||||
// @Param task path string true "Task ID" format(uuid)
|
||||
// @Success 202 {object} codersdk.PauseTaskResponse
|
||||
// @Router /tasks/{user}/{task}/pause [post]
|
||||
func (api *API) pauseTask(rw http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
apiKey = httpmw.APIKey(r)
|
||||
task = httpmw.TaskParam(r)
|
||||
)
|
||||
|
||||
if !task.WorkspaceID.Valid {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Task does not have a workspace.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
workspace, err := api.Database.GetWorkspaceByID(ctx, task.WorkspaceID.UUID)
|
||||
if err != nil {
|
||||
if httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching task workspace.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
buildReq := codersdk.CreateWorkspaceBuildRequest{
|
||||
Transition: codersdk.WorkspaceTransitionStop,
|
||||
Reason: codersdk.CreateWorkspaceBuildReasonTaskManualPause,
|
||||
}
|
||||
build, err := api.postWorkspaceBuildsInternal(
|
||||
ctx,
|
||||
apiKey,
|
||||
workspace,
|
||||
buildReq,
|
||||
func(action policy.Action, object rbac.Objecter) bool {
|
||||
return api.Authorize(r, action, object)
|
||||
},
|
||||
audit.WorkspaceBuildBaggageFromRequest(r),
|
||||
)
|
||||
if err != nil {
|
||||
httperror.WriteWorkspaceBuildError(ctx, rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusAccepted, codersdk.PauseTaskResponse{
|
||||
WorkspaceBuild: &build,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Resume task
|
||||
// @ID resume-task
|
||||
// @Security CoderSessionToken
|
||||
// @Accept json
|
||||
// @Tags Tasks
|
||||
// @Param user path string true "Username, user ID, or 'me' for the authenticated user"
|
||||
// @Param task path string true "Task ID" format(uuid)
|
||||
// @Success 202 {object} codersdk.ResumeTaskResponse
|
||||
// @Router /tasks/{user}/{task}/resume [post]
|
||||
func (api *API) resumeTask(rw http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
apiKey = httpmw.APIKey(r)
|
||||
task = httpmw.TaskParam(r)
|
||||
)
|
||||
|
||||
if !task.WorkspaceID.Valid {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Task does not have a workspace.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
workspace, err := api.Database.GetWorkspaceByID(ctx, task.WorkspaceID.UUID)
|
||||
if err != nil {
|
||||
if httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching task workspace.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
latestBuild, err := api.Database.GetLatestWorkspaceBuildByWorkspaceID(ctx, workspace.ID)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching task workspace build.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
job, err := api.Database.GetProvisionerJobByID(ctx, latestBuild.JobID)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching task workspace build job.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
workspaceStatus := codersdk.ConvertWorkspaceStatus(
|
||||
codersdk.ProvisionerJobStatus(job.JobStatus),
|
||||
codersdk.WorkspaceTransition(latestBuild.Transition),
|
||||
)
|
||||
if workspaceStatus == codersdk.WorkspaceStatusRunning {
|
||||
httpapi.Write(ctx, rw, http.StatusConflict, codersdk.Response{
|
||||
Message: "Task workspace is already running.",
|
||||
Detail: fmt.Sprintf("Workspace status is %q.", workspaceStatus),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
buildReq := codersdk.CreateWorkspaceBuildRequest{
|
||||
Transition: codersdk.WorkspaceTransitionStart,
|
||||
Reason: codersdk.CreateWorkspaceBuildReasonTaskResume,
|
||||
}
|
||||
build, err := api.postWorkspaceBuildsInternal(
|
||||
ctx,
|
||||
apiKey,
|
||||
workspace,
|
||||
buildReq,
|
||||
func(action policy.Action, object rbac.Objecter) bool {
|
||||
return api.Authorize(r, action, object)
|
||||
},
|
||||
audit.WorkspaceBuildBaggageFromRequest(r),
|
||||
)
|
||||
if err != nil {
|
||||
httperror.WriteWorkspaceBuildError(ctx, rw, err)
|
||||
return
|
||||
}
|
||||
httpapi.Write(ctx, rw, http.StatusAccepted, codersdk.ResumeTaskResponse{
|
||||
WorkspaceBuild: &build,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
agentapisdk "github.com/coder/agentapi-sdk-go"
|
||||
"github.com/coder/coder/v2/agent"
|
||||
@@ -26,11 +27,14 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/notifications/notificationstest"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
@@ -100,6 +104,36 @@ func createTaskInState(db database.Store, ownerSubject rbac.Subject, ownerOrgID,
|
||||
}
|
||||
}
|
||||
|
||||
type aiTaskStoreWrapper struct {
|
||||
database.Store
|
||||
getWorkspaceByID func(ctx context.Context, id uuid.UUID) (database.Workspace, error)
|
||||
insertWorkspaceBuild func(ctx context.Context, arg database.InsertWorkspaceBuildParams) error
|
||||
}
|
||||
|
||||
func (s aiTaskStoreWrapper) GetWorkspaceByID(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
if s.getWorkspaceByID != nil {
|
||||
return s.getWorkspaceByID(ctx, id)
|
||||
}
|
||||
return s.Store.GetWorkspaceByID(ctx, id)
|
||||
}
|
||||
|
||||
func (s aiTaskStoreWrapper) InsertWorkspaceBuild(ctx context.Context, arg database.InsertWorkspaceBuildParams) error {
|
||||
if s.insertWorkspaceBuild != nil {
|
||||
return s.insertWorkspaceBuild(ctx, arg)
|
||||
}
|
||||
return s.Store.InsertWorkspaceBuild(ctx, arg)
|
||||
}
|
||||
|
||||
func (s aiTaskStoreWrapper) InTx(fn func(database.Store) error, opts *database.TxOptions) error {
|
||||
return s.Store.InTx(func(tx database.Store) error {
|
||||
return fn(aiTaskStoreWrapper{
|
||||
Store: tx,
|
||||
getWorkspaceByID: s.getWorkspaceByID,
|
||||
insertWorkspaceBuild: s.insertWorkspaceBuild,
|
||||
})
|
||||
}, opts)
|
||||
}
|
||||
|
||||
func TestTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -2422,3 +2456,664 @@ func TestPostWorkspaceAgentTaskSnapshot(t *testing.T) {
|
||||
require.Equal(t, http.StatusUnauthorized, res.StatusCode)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPauseTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupClient := func(t *testing.T, db database.Store, ps pubsub.Pubsub, authorizer rbac.Authorizer) *codersdk.Client {
|
||||
t.Helper()
|
||||
client, _, _ := coderdtest.NewWithAPI(t, &coderdtest.Options{
|
||||
Database: db,
|
||||
Pubsub: ps,
|
||||
Authorizer: authorizer,
|
||||
})
|
||||
return client
|
||||
}
|
||||
|
||||
setupWorkspaceTask := func(t *testing.T, db database.Store, user codersdk.CreateFirstUserResponse) (database.Task, uuid.UUID) {
|
||||
t.Helper()
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithTask(database.TaskTable{
|
||||
Prompt: "pause me",
|
||||
}, nil).Do()
|
||||
return workspaceBuild.Task, workspaceBuild.Workspace.ID
|
||||
}
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionGraph: []*proto.Response{
|
||||
{Type: &proto.Response_Graph{Graph: &proto.GraphComplete{
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
task, err := client.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "pause me",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, task.WorkspaceID.Valid)
|
||||
|
||||
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
resp, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
|
||||
// Verify that the request was accepted correctly:
|
||||
require.NoError(t, err)
|
||||
build := *resp.WorkspaceBuild
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStop, build.Transition)
|
||||
require.Equal(t, task.WorkspaceID.UUID, build.WorkspaceID)
|
||||
require.Equal(t, workspace.LatestBuild.BuildNumber+1, build.BuildNumber)
|
||||
require.Equal(t, string(codersdk.CreateWorkspaceBuildReasonTaskManualPause), string(build.Reason))
|
||||
|
||||
// Verify that the accepted request was processed correctly:
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, build.ID)
|
||||
workspace, err = client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.WorkspaceStatusStopped, workspace.LatestBuild.Status)
|
||||
})
|
||||
|
||||
t.Run("Non-owner role access", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
roles []rbac.RoleIdentifier
|
||||
expectedStatus int
|
||||
}{
|
||||
{
|
||||
name: "org_member",
|
||||
expectedStatus: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
name: "org_admin",
|
||||
roles: []rbac.RoleIdentifier{rbac.ScopedRoleOrgAdmin(owner.OrganizationID)},
|
||||
expectedStatus: http.StatusAccepted,
|
||||
},
|
||||
{
|
||||
name: "sitewide_member",
|
||||
roles: []rbac.RoleIdentifier{rbac.RoleMember()},
|
||||
expectedStatus: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
name: "sitewide_admin",
|
||||
roles: []rbac.RoleIdentifier{rbac.RoleOwner()},
|
||||
expectedStatus: http.StatusAccepted,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
task, _ := setupWorkspaceTask(t, db, owner)
|
||||
userClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, tc.roles...)
|
||||
|
||||
resp, err := userClient.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
if tc.expectedStatus == http.StatusAccepted {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp.WorkspaceBuild)
|
||||
require.NotEqual(t, uuid.Nil, resp.WorkspaceBuild.ID)
|
||||
return
|
||||
}
|
||||
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, tc.expectedStatus, apiErr.StatusCode())
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Task not found", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, uuid.New())
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Task lookup forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionRead && object.Type == rbac.ResourceTask.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Workspace lookup forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionRead && object.Type == rbac.ResourceWorkspace.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("No Workspace for Task", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).Do()
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
TemplateVersionID: workspaceBuild.Build.TemplateVersionID,
|
||||
Prompt: "no workspace",
|
||||
})
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
require.Equal(t, "Task does not have a workspace.", apiErr.Message)
|
||||
})
|
||||
|
||||
t.Run("Workspace not found", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
var workspaceID uuid.UUID
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
getWorkspaceByID: func(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
if id == workspaceID && id != uuid.Nil {
|
||||
return database.Workspace{}, sql.ErrNoRows
|
||||
}
|
||||
return db.GetWorkspaceByID(ctx, id)
|
||||
},
|
||||
}
|
||||
client := setupClient(t, wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, workspaceIDValue := setupWorkspaceTask(t, db, user)
|
||||
workspaceID = workspaceIDValue
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Workspace lookup internal error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
var workspaceID uuid.UUID
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
getWorkspaceByID: func(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
if id == workspaceID && id != uuid.Nil {
|
||||
return database.Workspace{}, xerrors.New("boom")
|
||||
}
|
||||
return db.GetWorkspaceByID(ctx, id)
|
||||
},
|
||||
}
|
||||
client := setupClient(t, wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, workspaceIDValue := setupWorkspaceTask(t, db, user)
|
||||
workspaceID = workspaceIDValue
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
require.Equal(t, "Internal error fetching task workspace.", apiErr.Message)
|
||||
})
|
||||
|
||||
t.Run("Build Forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionWorkspaceStop && object.Type == rbac.ResourceWorkspace.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusForbidden, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Job already in progress", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: "pause me",
|
||||
}, nil).
|
||||
Starting().
|
||||
Do()
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, workspaceBuild.Task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusConflict, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Build Internal Error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
insertWorkspaceBuild: func(ctx context.Context, arg database.InsertWorkspaceBuildParams) error {
|
||||
return xerrors.New("insert failed")
|
||||
},
|
||||
}
|
||||
client := setupClient(t, wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestResumeTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupClient := func(t *testing.T, db database.Store, ps pubsub.Pubsub, authorizer rbac.Authorizer) *codersdk.Client {
|
||||
t.Helper()
|
||||
client, _, _ := coderdtest.NewWithAPI(t, &coderdtest.Options{
|
||||
Database: db,
|
||||
Pubsub: ps,
|
||||
Authorizer: authorizer,
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
return client
|
||||
}
|
||||
|
||||
setupWorkspaceTask := func(t *testing.T, db database.Store, user codersdk.CreateFirstUserResponse) (database.Task, uuid.UUID) {
|
||||
t.Helper()
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithTask(database.TaskTable{
|
||||
Prompt: "resume me",
|
||||
}, nil).Do()
|
||||
return workspaceBuild.Task, workspaceBuild.Workspace.ID
|
||||
}
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionGraph: []*proto.Response{
|
||||
{Type: &proto.Response_Graph{Graph: &proto.GraphComplete{
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
task, err := client.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "resume me",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
pauseResp, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, pauseResp.WorkspaceBuild.ID)
|
||||
|
||||
resumeResp, err := client.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
require.NoError(t, err)
|
||||
build := *resumeResp.WorkspaceBuild
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStart, build.Transition)
|
||||
require.Equal(t, task.WorkspaceID.UUID, build.WorkspaceID)
|
||||
require.Equal(t, workspace.LatestBuild.BuildNumber+2, build.BuildNumber)
|
||||
require.Equal(t, string(codersdk.CreateWorkspaceBuildReasonTaskResume), string(build.Reason))
|
||||
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, build.ID)
|
||||
workspace, err = client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.WorkspaceStatusRunning, workspace.LatestBuild.Status)
|
||||
})
|
||||
|
||||
t.Run("Resume a task that is not paused", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: "pause me",
|
||||
}, nil).
|
||||
Succeeded().
|
||||
Do()
|
||||
|
||||
_, err := client.ResumeTask(ctx, codersdk.Me, workspaceBuild.Task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusConflict, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Task not found", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
_, err := client.ResumeTask(ctx, codersdk.Me, uuid.New())
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Task lookup forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionRead && object.Type == rbac.ResourceTask.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Workspace lookup forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionRead && object.Type == rbac.ResourceWorkspace.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
_, err := client.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("No Workspace for Task", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).Do()
|
||||
task := dbgen.Task(t, db, database.TaskTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
TemplateVersionID: workspaceBuild.Build.TemplateVersionID,
|
||||
Prompt: "no workspace",
|
||||
})
|
||||
|
||||
_, err := client.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
require.Equal(t, "Task does not have a workspace.", apiErr.Message)
|
||||
})
|
||||
|
||||
t.Run("Workspace not found", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
var workspaceID uuid.UUID
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
getWorkspaceByID: func(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
if id == workspaceID && id != uuid.Nil {
|
||||
return database.Workspace{}, sql.ErrNoRows
|
||||
}
|
||||
return db.GetWorkspaceByID(ctx, id)
|
||||
},
|
||||
}
|
||||
client := setupClient(t, wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, workspaceIDValue := setupWorkspaceTask(t, db, user)
|
||||
workspaceID = workspaceIDValue
|
||||
|
||||
_, err := client.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Workspace lookup internal error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
var workspaceID uuid.UUID
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
getWorkspaceByID: func(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
if id == workspaceID && id != uuid.Nil {
|
||||
return database.Workspace{}, xerrors.New("boom")
|
||||
}
|
||||
return db.GetWorkspaceByID(ctx, id)
|
||||
},
|
||||
}
|
||||
client := setupClient(t, wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, workspaceIDValue := setupWorkspaceTask(t, db, user)
|
||||
workspaceID = workspaceIDValue
|
||||
|
||||
_, err := client.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
require.Equal(t, "Internal error fetching task workspace.", apiErr.Message)
|
||||
})
|
||||
|
||||
t.Run("Build Forbidden", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
auth := &coderdtest.FakeAuthorizer{
|
||||
ConditionalReturn: func(_ context.Context, _ rbac.Subject, action policy.Action, object rbac.Object) error {
|
||||
if action == policy.ActionWorkspaceStart && object.Type == rbac.ResourceWorkspace.Type {
|
||||
return rbac.UnauthorizedError{}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
client := setupClient(t, db, ps, auth)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
task, _ := setupWorkspaceTask(t, db, user)
|
||||
|
||||
pauseResp, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, pauseResp.WorkspaceBuild.ID)
|
||||
|
||||
_, err = client.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusForbidden, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Job already in progress", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
client := setupClient(t, db, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
workspaceBuild := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).
|
||||
WithTask(database.TaskTable{
|
||||
Prompt: "resume me",
|
||||
}, nil).
|
||||
Starting().
|
||||
Do()
|
||||
|
||||
_, err := client.ResumeTask(ctx, codersdk.Me, workspaceBuild.Task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusConflict, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Build Internal Error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
db, ps := dbtestutil.NewDB(t)
|
||||
wrapped := aiTaskStoreWrapper{
|
||||
Store: db,
|
||||
}
|
||||
|
||||
client := setupClient(t, &wrapped, ps, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ApplyComplete,
|
||||
ProvisionGraph: []*proto.Response{
|
||||
{Type: &proto.Response_Graph{Graph: &proto.GraphComplete{
|
||||
HasAiTasks: true,
|
||||
}}},
|
||||
},
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
task, err := client.CreateTask(ctx, codersdk.Me, codersdk.CreateTaskRequest{
|
||||
TemplateVersionID: template.ActiveVersionID,
|
||||
Input: "resume me",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
workspace, err := client.Workspace(ctx, task.WorkspaceID.UUID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
pauseResp, err := client.PauseTask(ctx, codersdk.Me, task.ID)
|
||||
require.NoError(t, err)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, pauseResp.WorkspaceBuild.ID)
|
||||
|
||||
// Induce a transient failure in the database after the task has been paused.
|
||||
wrapped.insertWorkspaceBuild = func(ctx context.Context, arg database.InsertWorkspaceBuildParams) error {
|
||||
return xerrors.New("insert failed")
|
||||
}
|
||||
_, err = client.ResumeTask(ctx, codersdk.Me, task.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusInternalServerError, apiErr.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
Generated
+111
-3
@@ -5824,6 +5824,90 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/pause": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Tasks"
|
||||
],
|
||||
"summary": "Pause task",
|
||||
"operationId": "pause-task",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Username, user ID, or 'me' for the authenticated user",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"202": {
|
||||
"description": "Accepted",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.PauseTaskResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/resume": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Tasks"
|
||||
],
|
||||
"summary": "Resume task",
|
||||
"operationId": "resume-task",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Username, user ID, or 'me' for the authenticated user",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"202": {
|
||||
"description": "Accepted",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.ResumeTaskResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/send": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -14102,14 +14186,18 @@ const docTemplate = `{
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_manual_pause",
|
||||
"task_resume"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"CreateWorkspaceBuildReasonDashboard",
|
||||
"CreateWorkspaceBuildReasonCLI",
|
||||
"CreateWorkspaceBuildReasonSSHConnection",
|
||||
"CreateWorkspaceBuildReasonVSCodeConnection",
|
||||
"CreateWorkspaceBuildReasonJetbrainsConnection"
|
||||
"CreateWorkspaceBuildReasonJetbrainsConnection",
|
||||
"CreateWorkspaceBuildReasonTaskManualPause",
|
||||
"CreateWorkspaceBuildReasonTaskResume"
|
||||
]
|
||||
},
|
||||
"codersdk.CreateWorkspaceBuildRequest": {
|
||||
@@ -14143,7 +14231,8 @@ const docTemplate = `{
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_manual_pause"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
@@ -17014,6 +17103,14 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.PauseTaskResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"workspace_build": {
|
||||
"$ref": "#/definitions/codersdk.WorkspaceBuild"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.Permission": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -18182,6 +18279,14 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ResumeTaskResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"workspace_build": {
|
||||
"$ref": "#/definitions/codersdk.WorkspaceBuild"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.RetentionConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -18814,6 +18919,9 @@ const docTemplate = `{
|
||||
"default_ttl_ms": {
|
||||
"type": "integer"
|
||||
},
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"deprecated": {
|
||||
"type": "boolean"
|
||||
},
|
||||
|
||||
Generated
+103
-3
@@ -5147,6 +5147,82 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/pause": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": ["application/json"],
|
||||
"tags": ["Tasks"],
|
||||
"summary": "Pause task",
|
||||
"operationId": "pause-task",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Username, user ID, or 'me' for the authenticated user",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"202": {
|
||||
"description": "Accepted",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.PauseTaskResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/resume": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": ["application/json"],
|
||||
"tags": ["Tasks"],
|
||||
"summary": "Resume task",
|
||||
"operationId": "resume-task",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Username, user ID, or 'me' for the authenticated user",
|
||||
"name": "user",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "uuid",
|
||||
"description": "Task ID",
|
||||
"name": "task",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"202": {
|
||||
"description": "Accepted",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.ResumeTaskResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/tasks/{user}/{task}/send": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -12662,14 +12738,18 @@
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_manual_pause",
|
||||
"task_resume"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"CreateWorkspaceBuildReasonDashboard",
|
||||
"CreateWorkspaceBuildReasonCLI",
|
||||
"CreateWorkspaceBuildReasonSSHConnection",
|
||||
"CreateWorkspaceBuildReasonVSCodeConnection",
|
||||
"CreateWorkspaceBuildReasonJetbrainsConnection"
|
||||
"CreateWorkspaceBuildReasonJetbrainsConnection",
|
||||
"CreateWorkspaceBuildReasonTaskManualPause",
|
||||
"CreateWorkspaceBuildReasonTaskResume"
|
||||
]
|
||||
},
|
||||
"codersdk.CreateWorkspaceBuildRequest": {
|
||||
@@ -12699,7 +12779,8 @@
|
||||
"cli",
|
||||
"ssh_connection",
|
||||
"vscode_connection",
|
||||
"jetbrains_connection"
|
||||
"jetbrains_connection",
|
||||
"task_manual_pause"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
@@ -15477,6 +15558,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.PauseTaskResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"workspace_build": {
|
||||
"$ref": "#/definitions/codersdk.WorkspaceBuild"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.Permission": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -16598,6 +16687,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ResumeTaskResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"workspace_build": {
|
||||
"$ref": "#/definitions/codersdk.WorkspaceBuild"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.RetentionConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -17209,6 +17306,9 @@
|
||||
"default_ttl_ms": {
|
||||
"type": "integer"
|
||||
},
|
||||
"deleted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"deprecated": {
|
||||
"type": "boolean"
|
||||
},
|
||||
|
||||
@@ -400,7 +400,7 @@ func TestAPIKey_Deleted(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusBadRequest, apiErr.StatusCode())
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
}
|
||||
|
||||
func TestAPIKey_SetDefault(t *testing.T) {
|
||||
|
||||
@@ -48,9 +48,10 @@ type Executor struct {
|
||||
tick <-chan time.Time
|
||||
statsCh chan<- Stats
|
||||
// NotificationsEnqueuer handles enqueueing notifications for delivery by SMTP, webhook, etc.
|
||||
notificationsEnqueuer notifications.Enqueuer
|
||||
reg prometheus.Registerer
|
||||
experiments codersdk.Experiments
|
||||
notificationsEnqueuer notifications.Enqueuer
|
||||
reg prometheus.Registerer
|
||||
experiments codersdk.Experiments
|
||||
workspaceBuilderMetrics *wsbuilder.Metrics
|
||||
|
||||
metrics executorMetrics
|
||||
}
|
||||
@@ -67,23 +68,24 @@ type Stats struct {
|
||||
}
|
||||
|
||||
// New returns a new wsactions executor.
|
||||
func NewExecutor(ctx context.Context, db database.Store, ps pubsub.Pubsub, fc *files.Cache, reg prometheus.Registerer, tss *atomic.Pointer[schedule.TemplateScheduleStore], auditor *atomic.Pointer[audit.Auditor], acs *atomic.Pointer[dbauthz.AccessControlStore], buildUsageChecker *atomic.Pointer[wsbuilder.UsageChecker], log slog.Logger, tick <-chan time.Time, enqueuer notifications.Enqueuer, exp codersdk.Experiments) *Executor {
|
||||
func NewExecutor(ctx context.Context, db database.Store, ps pubsub.Pubsub, fc *files.Cache, reg prometheus.Registerer, tss *atomic.Pointer[schedule.TemplateScheduleStore], auditor *atomic.Pointer[audit.Auditor], acs *atomic.Pointer[dbauthz.AccessControlStore], buildUsageChecker *atomic.Pointer[wsbuilder.UsageChecker], log slog.Logger, tick <-chan time.Time, enqueuer notifications.Enqueuer, exp codersdk.Experiments, workspaceBuilderMetrics *wsbuilder.Metrics) *Executor {
|
||||
factory := promauto.With(reg)
|
||||
le := &Executor{
|
||||
//nolint:gocritic // Autostart has a limited set of permissions.
|
||||
ctx: dbauthz.AsAutostart(ctx),
|
||||
db: db,
|
||||
ps: ps,
|
||||
fileCache: fc,
|
||||
templateScheduleStore: tss,
|
||||
tick: tick,
|
||||
log: log.Named("autobuild"),
|
||||
auditor: auditor,
|
||||
accessControlStore: acs,
|
||||
buildUsageChecker: buildUsageChecker,
|
||||
notificationsEnqueuer: enqueuer,
|
||||
reg: reg,
|
||||
experiments: exp,
|
||||
ctx: dbauthz.AsAutostart(ctx),
|
||||
db: db,
|
||||
ps: ps,
|
||||
fileCache: fc,
|
||||
templateScheduleStore: tss,
|
||||
tick: tick,
|
||||
log: log.Named("autobuild"),
|
||||
auditor: auditor,
|
||||
accessControlStore: acs,
|
||||
buildUsageChecker: buildUsageChecker,
|
||||
notificationsEnqueuer: enqueuer,
|
||||
reg: reg,
|
||||
experiments: exp,
|
||||
workspaceBuilderMetrics: workspaceBuilderMetrics,
|
||||
metrics: executorMetrics{
|
||||
autobuildExecutionDuration: factory.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: "coderd",
|
||||
@@ -335,7 +337,8 @@ func (e *Executor) runOnce(t time.Time) Stats {
|
||||
SetLastWorkspaceBuildInTx(&latestBuild).
|
||||
SetLastWorkspaceBuildJobInTx(&latestJob).
|
||||
Experiments(e.experiments).
|
||||
Reason(reason)
|
||||
Reason(reason).
|
||||
BuildMetrics(e.workspaceBuilderMetrics)
|
||||
log.Debug(e.ctx, "auto building workspace", slog.F("transition", nextTransition))
|
||||
if nextTransition == database.WorkspaceTransitionStart &&
|
||||
useActiveVersion(accessControl, ws) {
|
||||
|
||||
@@ -245,6 +245,7 @@ type Options struct {
|
||||
MetadataBatcherOptions []metadatabatcher.Option
|
||||
|
||||
ProvisionerdServerMetrics *provisionerdserver.Metrics
|
||||
WorkspaceBuilderMetrics *wsbuilder.Metrics
|
||||
|
||||
// WorkspaceAppAuditSessionTimeout allows changing the timeout for audit
|
||||
// sessions. Raising or lowering this value will directly affect the write
|
||||
@@ -1078,6 +1079,8 @@ func New(options *Options) *API {
|
||||
r.Patch("/input", api.taskUpdateInput)
|
||||
r.Post("/send", api.taskSend)
|
||||
r.Get("/logs", api.taskLogs)
|
||||
r.Post("/pause", api.pauseTask)
|
||||
r.Post("/resume", api.resumeTask)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -191,6 +191,7 @@ type Options struct {
|
||||
TelemetryReporter telemetry.Reporter
|
||||
|
||||
ProvisionerdServerMetrics *provisionerdserver.Metrics
|
||||
WorkspaceBuilderMetrics *wsbuilder.Metrics
|
||||
UsageInserter usage.Inserter
|
||||
}
|
||||
|
||||
@@ -399,6 +400,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
options.AutobuildTicker,
|
||||
options.NotificationsEnqueuer,
|
||||
experiments,
|
||||
options.WorkspaceBuilderMetrics,
|
||||
).WithStatsChannel(options.AutobuildStats)
|
||||
|
||||
lifecycleExecutor.Run()
|
||||
@@ -620,6 +622,7 @@ func NewOptions(t testing.TB, options *Options) (func(http.Handler), context.Can
|
||||
AppEncryptionKeyCache: options.APIKeyEncryptionCache,
|
||||
OIDCConvertKeyCache: options.OIDCConvertKeyCache,
|
||||
ProvisionerdServerMetrics: options.ProvisionerdServerMetrics,
|
||||
WorkspaceBuilderMetrics: options.WorkspaceBuilderMetrics,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,4 +17,6 @@ const (
|
||||
CheckTelemetryLockEventTypeConstraint CheckConstraint = "telemetry_lock_event_type_constraint" // telemetry_locks
|
||||
CheckValidationMonotonicOrder CheckConstraint = "validation_monotonic_order" // template_version_parameters
|
||||
CheckUsageEventTypeCheck CheckConstraint = "usage_event_type_check" // usage_events
|
||||
CheckGroupAclIsObject CheckConstraint = "group_acl_is_object" // workspaces
|
||||
CheckUserAclIsObject CheckConstraint = "user_acl_is_object" // workspaces
|
||||
)
|
||||
|
||||
@@ -93,7 +93,6 @@ type TxOptions struct {
|
||||
|
||||
// IncrementExecutionCount is a helper function for external packages
|
||||
// to increment the unexported count.
|
||||
// Mainly for `dbmem`.
|
||||
func IncrementExecutionCount(opts *TxOptions) {
|
||||
opts.executionCount++
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog/v3"
|
||||
"github.com/coder/coder/v2/coderd/apikey"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/db2sdk"
|
||||
@@ -30,7 +29,6 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/rbac/rolestore"
|
||||
"github.com/coder/coder/v2/coderd/taskname"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
"github.com/coder/coder/v2/provisionerd/proto"
|
||||
@@ -1664,13 +1662,12 @@ func Task(t testing.TB, db database.Store, orig database.TaskTable) database.Tas
|
||||
parameters = json.RawMessage([]byte("{}"))
|
||||
}
|
||||
|
||||
taskName := taskname.Generate(genCtx, slog.Make(), orig.Prompt)
|
||||
task, err := db.InsertTask(genCtx, database.InsertTaskParams{
|
||||
ID: takeFirst(orig.ID, uuid.New()),
|
||||
OrganizationID: orig.OrganizationID,
|
||||
OwnerID: orig.OwnerID,
|
||||
Name: takeFirst(orig.Name, taskName.Name),
|
||||
DisplayName: takeFirst(orig.DisplayName, taskName.DisplayName),
|
||||
Name: takeFirst(orig.Name, testutil.GetRandomNameHyphenated(t)),
|
||||
DisplayName: takeFirst(orig.DisplayName, testutil.GetRandomNameHyphenated(t)),
|
||||
WorkspaceID: orig.WorkspaceID,
|
||||
TemplateVersionID: orig.TemplateVersionID,
|
||||
TemplateParameters: parameters,
|
||||
|
||||
Generated
+3
-1
@@ -2736,7 +2736,9 @@ CREATE TABLE workspaces (
|
||||
favorite boolean DEFAULT false NOT NULL,
|
||||
next_start_at timestamp with time zone,
|
||||
group_acl jsonb DEFAULT '{}'::jsonb NOT NULL,
|
||||
user_acl jsonb DEFAULT '{}'::jsonb NOT NULL
|
||||
user_acl jsonb DEFAULT '{}'::jsonb NOT NULL,
|
||||
CONSTRAINT group_acl_is_object CHECK ((jsonb_typeof(group_acl) = 'object'::text)),
|
||||
CONSTRAINT user_acl_is_object CHECK ((jsonb_typeof(user_acl) = 'object'::text))
|
||||
);
|
||||
|
||||
COMMENT ON COLUMN workspaces.favorite IS 'Favorite is true if the workspace owner has favorited the workspace.';
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE workspaces
|
||||
DROP CONSTRAINT IF EXISTS group_acl_is_object,
|
||||
DROP CONSTRAINT IF EXISTS user_acl_is_object;
|
||||
@@ -0,0 +1,9 @@
|
||||
-- Add constraints that reject 'null'::jsonb for group and user ACLs
|
||||
-- because they would break the new workspace_expanded view.
|
||||
|
||||
UPDATE workspaces SET group_acl = '{}'::jsonb WHERE group_acl = 'null'::jsonb;
|
||||
UPDATE workspaces SET user_acl = '{}'::jsonb WHERE user_acl = 'null'::jsonb;
|
||||
|
||||
ALTER TABLE workspaces
|
||||
ADD CONSTRAINT group_acl_is_object CHECK (jsonb_typeof(group_acl) = 'object'),
|
||||
ADD CONSTRAINT user_acl_is_object CHECK (jsonb_typeof(user_acl) = 'object');
|
||||
Vendored
+35
@@ -0,0 +1,35 @@
|
||||
-- Fixture for migration 000417_workspace_acl_object_constraint.
|
||||
-- Inserts a workspace with 'null'::json ACLs to ensure the migration
|
||||
-- correctly normalizes such values.
|
||||
|
||||
INSERT INTO workspaces (
|
||||
id,
|
||||
created_at,
|
||||
updated_at,
|
||||
owner_id,
|
||||
organization_id,
|
||||
template_id,
|
||||
deleted,
|
||||
name,
|
||||
last_used_at,
|
||||
automatic_updates,
|
||||
favorite,
|
||||
group_acl,
|
||||
user_acl
|
||||
)
|
||||
VALUES (
|
||||
'6f6fdbee-4c18-4a5c-8a8d-9b811c9f0a28',
|
||||
'2024-02-10 00:00:00+00',
|
||||
'2024-02-10 00:00:00+00',
|
||||
'30095c71-380b-457a-8995-97b8ee6e5307',
|
||||
'bb640d07-ca8a-4869-b6bc-ae61ebb2fda1',
|
||||
'4cc1f466-f326-477e-8762-9d0c6781fc56',
|
||||
false,
|
||||
'acl-null-workspace',
|
||||
'0001-01-01 00:00:00+00',
|
||||
'never',
|
||||
false,
|
||||
'null'::jsonb,
|
||||
'null'::jsonb
|
||||
)
|
||||
ON CONFLICT DO NOTHING;
|
||||
@@ -6765,6 +6765,65 @@ func TestWorkspaceBuildDeadlineConstraint(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWorkspaceACLObjectConstraint(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
template := dbgen.Template(t, db, database.Template{
|
||||
CreatedBy: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
})
|
||||
workspace := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
TemplateID: template.ID,
|
||||
Deleted: false,
|
||||
})
|
||||
|
||||
t.Run("GroupACLNull", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var nilACL database.WorkspaceACL
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := db.UpdateWorkspaceACLByID(ctx, database.UpdateWorkspaceACLByIDParams{
|
||||
ID: workspace.ID,
|
||||
GroupACL: nilACL,
|
||||
UserACL: database.WorkspaceACL{},
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.True(t, database.IsCheckViolation(err, database.CheckGroupAclIsObject))
|
||||
})
|
||||
|
||||
t.Run("UserACLNull", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var nilACL database.WorkspaceACL
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := db.UpdateWorkspaceACLByID(ctx, database.UpdateWorkspaceACLByIDParams{
|
||||
ID: workspace.ID,
|
||||
GroupACL: database.WorkspaceACL{},
|
||||
UserACL: nilACL,
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.True(t, database.IsCheckViolation(err, database.CheckUserAclIsObject))
|
||||
})
|
||||
|
||||
t.Run("ValidEmptyObjects", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
err := db.UpdateWorkspaceACLByID(ctx, database.UpdateWorkspaceACLByIDParams{
|
||||
ID: workspace.ID,
|
||||
GroupACL: database.WorkspaceACL{},
|
||||
UserACL: database.WorkspaceACL{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestGetLatestWorkspaceBuildsByWorkspaceIDs populates the database with
|
||||
// workspaces and builds. It then tests that
|
||||
// GetLatestWorkspaceBuildsByWorkspaceIDs returns the latest build for some
|
||||
|
||||
@@ -106,6 +106,10 @@ func ExtractUserContext(ctx context.Context, db database.Store, rw http.Response
|
||||
if userID, err := uuid.Parse(userQuery); err == nil {
|
||||
user, err = db.GetUserByID(ctx, userID)
|
||||
if err != nil {
|
||||
if httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return database.User{}, false
|
||||
}
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: userErrorMessage,
|
||||
Detail: fmt.Sprintf("queried user=%q", userQuery),
|
||||
@@ -120,6 +124,10 @@ func ExtractUserContext(ctx context.Context, db database.Store, rw http.Response
|
||||
Username: userQuery,
|
||||
})
|
||||
if err != nil {
|
||||
if httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return database.User{}, false
|
||||
}
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: userErrorMessage,
|
||||
Detail: fmt.Sprintf("queried user=%q", userQuery),
|
||||
|
||||
@@ -71,7 +71,53 @@ func TestUserParam(t *testing.T) {
|
||||
})).ServeHTTP(rw, r)
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode)
|
||||
// User "ben" doesn't exist, so expect 404.
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("NotFoundByUsername", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db, rw, r := setup(t)
|
||||
|
||||
httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: db,
|
||||
RedirectToLogin: false,
|
||||
})(http.HandlerFunc(func(rw http.ResponseWriter, returnedRequest *http.Request) {
|
||||
r = returnedRequest
|
||||
})).ServeHTTP(rw, r)
|
||||
|
||||
routeContext := chi.NewRouteContext()
|
||||
routeContext.URLParams.Add("user", "nonexistent-user")
|
||||
r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, routeContext))
|
||||
httpmw.ExtractUserParam(db)(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
})).ServeHTTP(rw, r)
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("NotFoundByUUID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db, rw, r := setup(t)
|
||||
|
||||
httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: db,
|
||||
RedirectToLogin: false,
|
||||
})(http.HandlerFunc(func(rw http.ResponseWriter, returnedRequest *http.Request) {
|
||||
r = returnedRequest
|
||||
})).ServeHTTP(rw, r)
|
||||
|
||||
routeContext := chi.NewRouteContext()
|
||||
// Use a valid UUID that doesn't exist in the database.
|
||||
routeContext.URLParams.Add("user", "88888888-4444-4444-4444-121212121212")
|
||||
r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, routeContext))
|
||||
httpmw.ExtractUserParam(db)(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
})).ServeHTTP(rw, r)
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("me", func(t *testing.T) {
|
||||
|
||||
@@ -262,8 +262,6 @@ func TestWebhookDispatch(t *testing.T) {
|
||||
// This is not strictly necessary for this test, but it's testing some side logic which is too small for its own test.
|
||||
require.Equal(t, payload.Payload.UserName, name)
|
||||
require.Equal(t, payload.Payload.UserUsername, username)
|
||||
// Right now we don't have a way to query notification templates by ID in dbmem, and it's not necessary to add this
|
||||
// just to satisfy this test. We can safely assume that as long as this value is not empty that the given value was delivered.
|
||||
require.NotEmpty(t, payload.Payload.NotificationName)
|
||||
}
|
||||
|
||||
|
||||
@@ -150,7 +150,7 @@ func TestNotificationPreferences(t *testing.T) {
|
||||
require.ErrorAsf(t, err, &sdkError, "error should be of type *codersdk.Error")
|
||||
// NOTE: ExtractUserParam gets in the way here, and returns a 400 Bad Request instead of a 403 Forbidden.
|
||||
// This is not ideal, and we should probably change this behavior.
|
||||
require.Equal(t, http.StatusBadRequest, sdkError.StatusCode())
|
||||
require.Equal(t, http.StatusNotFound, sdkError.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("Admin may read any users' preferences", func(t *testing.T) {
|
||||
|
||||
@@ -13,6 +13,7 @@ type Metrics struct {
|
||||
logger slog.Logger
|
||||
workspaceCreationTimings *prometheus.HistogramVec
|
||||
workspaceClaimTimings *prometheus.HistogramVec
|
||||
jobQueueWait *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
type WorkspaceTimingType int
|
||||
@@ -29,6 +30,12 @@ const (
|
||||
workspaceTypePrebuild = "prebuild"
|
||||
)
|
||||
|
||||
// BuildReasonPrebuild is the build_reason metric label value for prebuild
|
||||
// operations. This is distinct from database.BuildReason values since prebuilds
|
||||
// use BuildReasonInitiator in the database but we want to track them separately
|
||||
// in metrics. This is also used as a label value by the metrics in wsbuilder.
|
||||
const BuildReasonPrebuild = workspaceTypePrebuild
|
||||
|
||||
type WorkspaceTimingFlags struct {
|
||||
IsPrebuild bool
|
||||
IsClaim bool
|
||||
@@ -90,6 +97,30 @@ func NewMetrics(logger slog.Logger) *Metrics {
|
||||
NativeHistogramZeroThreshold: 0,
|
||||
NativeHistogramMaxZeroThreshold: 0,
|
||||
}, []string{"organization_name", "template_name", "preset_name"}),
|
||||
jobQueueWait: prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "coderd",
|
||||
Name: "provisioner_job_queue_wait_seconds",
|
||||
Help: "Time from job creation to acquisition by a provisioner daemon.",
|
||||
Buckets: []float64{
|
||||
0.1, // 100ms
|
||||
0.5, // 500ms
|
||||
1, // 1s
|
||||
5, // 5s
|
||||
10, // 10s
|
||||
30, // 30s
|
||||
60, // 1m
|
||||
120, // 2m
|
||||
300, // 5m
|
||||
600, // 10m
|
||||
900, // 15m
|
||||
1800, // 30m
|
||||
},
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
NativeHistogramMaxBucketNumber: 100,
|
||||
NativeHistogramMinResetDuration: time.Hour,
|
||||
NativeHistogramZeroThreshold: 0,
|
||||
NativeHistogramMaxZeroThreshold: 0,
|
||||
}, []string{"provisioner_type", "job_type", "transition", "build_reason"}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,7 +128,10 @@ func (m *Metrics) Register(reg prometheus.Registerer) error {
|
||||
if err := reg.Register(m.workspaceCreationTimings); err != nil {
|
||||
return err
|
||||
}
|
||||
return reg.Register(m.workspaceClaimTimings)
|
||||
if err := reg.Register(m.workspaceClaimTimings); err != nil {
|
||||
return err
|
||||
}
|
||||
return reg.Register(m.jobQueueWait)
|
||||
}
|
||||
|
||||
// IsTrackable returns true if the workspace build should be tracked in metrics.
|
||||
@@ -162,3 +196,9 @@ func (m *Metrics) UpdateWorkspaceTimingsMetrics(
|
||||
// Not a trackable build type (e.g. restart, stop, subsequent builds)
|
||||
}
|
||||
}
|
||||
|
||||
// ObserveJobQueueWait records the time a provisioner job spent waiting in the queue.
|
||||
// For non-workspace-build jobs, transition and buildReason should be empty strings.
|
||||
func (m *Metrics) ObserveJobQueueWait(provisionerType, jobType, transition, buildReason string, waitSeconds float64) {
|
||||
m.jobQueueWait.WithLabelValues(provisionerType, jobType, transition, buildReason).Observe(waitSeconds)
|
||||
}
|
||||
|
||||
@@ -478,6 +478,10 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
|
||||
TraceMetadata: jobTraceMetadata,
|
||||
}
|
||||
|
||||
// jobTransition and jobBuildReason are used for metrics; only set for workspace builds.
|
||||
var jobTransition string
|
||||
var jobBuildReason string
|
||||
|
||||
switch job.Type {
|
||||
case database.ProvisionerJobTypeWorkspaceBuild:
|
||||
var input WorkspaceProvisionJob
|
||||
@@ -584,6 +588,15 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
|
||||
if err != nil {
|
||||
return nil, failJob(fmt.Sprintf("convert workspace transition: %s", err))
|
||||
}
|
||||
jobTransition = string(workspaceBuild.Transition)
|
||||
// Prebuilds use BuildReasonInitiator in the database but we want to
|
||||
// track them separately in metrics. Check the initiator ID to detect
|
||||
// prebuild jobs.
|
||||
if job.InitiatorID == database.PrebuildsSystemUserID {
|
||||
jobBuildReason = BuildReasonPrebuild
|
||||
} else {
|
||||
jobBuildReason = string(workspaceBuild.Reason)
|
||||
}
|
||||
|
||||
// A previous workspace build exists
|
||||
var lastWorkspaceBuildParameters []database.WorkspaceBuildParameter
|
||||
@@ -825,6 +838,12 @@ func (s *server) acquireProtoJob(ctx context.Context, job database.ProvisionerJo
|
||||
return nil, failJob(fmt.Sprintf("payload was too big: %d > %d", protobuf.Size(protoJob), drpcsdk.MaxMessageSize))
|
||||
}
|
||||
|
||||
// Record the time the job spent waiting in the queue.
|
||||
if s.metrics != nil && job.StartedAt.Valid && job.Provisioner.Valid() {
|
||||
queueWaitSeconds := job.StartedAt.Time.Sub(job.CreatedAt).Seconds()
|
||||
s.metrics.ObserveJobQueueWait(string(job.Provisioner), string(job.Type), jobTransition, jobBuildReason, queueWaitSeconds)
|
||||
}
|
||||
|
||||
return protoJob, err
|
||||
}
|
||||
|
||||
|
||||
@@ -1131,6 +1131,7 @@ func (api *API) convertTemplate(
|
||||
RequireActiveVersion: templateAccessControl.RequireActiveVersion,
|
||||
Deprecated: templateAccessControl.IsDeprecated(),
|
||||
DeprecationMessage: templateAccessControl.Deprecated,
|
||||
Deleted: template.Deleted,
|
||||
MaxPortShareLevel: maxPortShareLevel,
|
||||
UseClassicParameterFlow: template.UseClassicParameterFlow,
|
||||
CORSBehavior: codersdk.CORSBehavior(template.CorsBehavior),
|
||||
|
||||
@@ -1801,6 +1801,49 @@ func TestDeleteTemplate(t *testing.T) {
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusForbidden, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("DeletedIsSet", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Verify the deleted field is exposed in the SDK and set to false for active templates
|
||||
got, err := client.Template(ctx, template.ID)
|
||||
require.NoError(t, err)
|
||||
require.False(t, got.Deleted)
|
||||
})
|
||||
|
||||
t.Run("DeletedIsTrue", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
err := client.DeleteTemplate(ctx, template.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the deleted field is set to true by listing templates with
|
||||
// deleted:true filter.
|
||||
templates, err := client.Templates(ctx, codersdk.TemplateFilter{
|
||||
OrganizationID: user.OrganizationID,
|
||||
SearchQuery: "deleted:true",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, templates, 1)
|
||||
require.Equal(t, template.ID, templates[0].ID)
|
||||
require.True(t, templates[0].Deleted)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTemplateMetrics(t *testing.T) {
|
||||
|
||||
@@ -349,7 +349,7 @@ func TestDeleteUser(t *testing.T) {
|
||||
err := client.DeleteUser(context.Background(), firstUser.UserID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusBadRequest, apiErr.StatusCode())
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
t.Run("HasWorkspaces", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -1010,7 +1010,7 @@ func TestUpdateUserProfile(t *testing.T) {
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
// Right now, we are raising a BAD request error because we don't support a
|
||||
// user accessing other users info
|
||||
require.Equal(t, http.StatusBadRequest, apiErr.StatusCode())
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("ConflictingUsername", func(t *testing.T) {
|
||||
@@ -2602,7 +2602,7 @@ func TestUserAutofillParameters(t *testing.T) {
|
||||
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusBadRequest, apiErr.StatusCode())
|
||||
require.Equal(t, http.StatusNotFound, apiErr.StatusCode())
|
||||
|
||||
// u1 should be able to read u2's parameters as u1 is site admin.
|
||||
_, err = client1.UserAutofillParameters(
|
||||
|
||||
@@ -68,27 +68,30 @@ func SubdomainAppSessionTokenCookie(hostname string) string {
|
||||
// the wrong value.
|
||||
//
|
||||
// We use different cookie names for:
|
||||
// - path apps on primary access URL: coder_session_token
|
||||
// - path apps on proxies: coder_path_app_session_token
|
||||
// - path apps: coder_path_app_session_token
|
||||
// - subdomain apps: coder_subdomain_app_session_token_{unique_hash}
|
||||
//
|
||||
// First we try the default function to get a token from request, which supports
|
||||
// query parameters, the Coder-Session-Token header and the coder_session_token
|
||||
// cookie.
|
||||
//
|
||||
// Then we try the specific cookie name for the access method.
|
||||
// We prefer the access-method-specific cookie first, then fall back to standard
|
||||
// Coder token extraction (query parameters, Coder-Session-Token header, etc.).
|
||||
func (c AppCookies) TokenFromRequest(r *http.Request, accessMethod AccessMethod) string {
|
||||
// Try the default function first.
|
||||
token := httpmw.APITokenFromRequest(r)
|
||||
if token != "" {
|
||||
return token
|
||||
}
|
||||
|
||||
// Then try the specific cookie name for the access method.
|
||||
// Prefer the access-method-specific cookie first.
|
||||
//
|
||||
// Workspace app requests commonly include an `Authorization` header intended
|
||||
// for the upstream app (e.g. API calls). `httpmw.APITokenFromRequest` supports
|
||||
// RFC 6750 bearer tokens, so if we consult it first we'd incorrectly treat
|
||||
// that upstream header as a Coder session token and ignore the app session
|
||||
// cookie, breaking token renewal for subdomain apps.
|
||||
cookie, err := r.Cookie(c.CookieNameForAccessMethod(accessMethod))
|
||||
if err == nil && cookie.Value != "" {
|
||||
return cookie.Value
|
||||
}
|
||||
|
||||
// Fall back to standard Coder token extraction (session cookie, query param,
|
||||
// Coder-Session-Token header, and then Authorization: Bearer).
|
||||
token := httpmw.APITokenFromRequest(r)
|
||||
if token != "" {
|
||||
return token
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package workspaceapps_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -32,3 +34,19 @@ func TestAppCookies(t *testing.T) {
|
||||
newCookies := workspaceapps.NewAppCookies("different.com")
|
||||
require.NotEqual(t, cookies.SubdomainAppSessionToken, newCookies.SubdomainAppSessionToken)
|
||||
}
|
||||
|
||||
func TestAppCookies_TokenFromRequest_PrefersAppCookieOverAuthorizationBearer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cookies := workspaceapps.NewAppCookies("apps.example.com")
|
||||
|
||||
req := httptest.NewRequest("GET", "https://8081--agent--workspace--user.apps.example.com/", nil)
|
||||
req.Header.Set("Authorization", "Bearer whatever")
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: cookies.CookieNameForAccessMethod(workspaceapps.AccessMethodSubdomain),
|
||||
Value: "subdomain-session-token",
|
||||
})
|
||||
|
||||
got := cookies.TokenFromRequest(req, workspaceapps.AccessMethodSubdomain)
|
||||
require.Equal(t, "subdomain-session-token", got)
|
||||
}
|
||||
|
||||
@@ -382,9 +382,10 @@ func (api *API) postWorkspaceBuildsInternal(
|
||||
LogLevel(string(createBuild.LogLevel)).
|
||||
DeploymentValues(api.Options.DeploymentValues).
|
||||
Experiments(api.Experiments).
|
||||
TemplateVersionPresetID(createBuild.TemplateVersionPresetID)
|
||||
TemplateVersionPresetID(createBuild.TemplateVersionPresetID).
|
||||
BuildMetrics(api.WorkspaceBuilderMetrics)
|
||||
|
||||
if transition == database.WorkspaceTransitionStart && createBuild.Reason != "" {
|
||||
if (transition == database.WorkspaceTransitionStart || transition == database.WorkspaceTransitionStop) && createBuild.Reason != "" {
|
||||
builder = builder.Reason(database.BuildReason(createBuild.Reason))
|
||||
}
|
||||
|
||||
|
||||
@@ -787,7 +787,8 @@ func createWorkspace(
|
||||
ActiveVersion().
|
||||
Experiments(api.Experiments).
|
||||
DeploymentValues(api.DeploymentValues).
|
||||
RichParameterValues(req.RichParameterValues)
|
||||
RichParameterValues(req.RichParameterValues).
|
||||
BuildMetrics(api.WorkspaceBuilderMetrics)
|
||||
if req.TemplateVersionID != uuid.Nil {
|
||||
builder = builder.VersionID(req.TemplateVersionID)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -21,7 +22,9 @@ import (
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/coderd"
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/autobuild"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
@@ -30,6 +33,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/notifications/notificationstest"
|
||||
"github.com/coder/coder/v2/coderd/provisionerdserver"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/render"
|
||||
@@ -37,6 +41,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/schedule/cron"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/coderd/wsbuilder"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
@@ -5901,3 +5906,135 @@ func TestWorkspaceCreateWithImplicitPreset(t *testing.T) {
|
||||
require.Equal(t, preset2ID, *ws2.LatestBuild.TemplateVersionPresetID)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProvisionerJobQueueWaitMetric(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
logger := testutil.Logger(t)
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := provisionerdserver.NewMetrics(logger)
|
||||
err := metrics.Register(reg)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
ProvisionerdServerMetrics: metrics,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a template version - this triggers a template_version_import job.
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
|
||||
// Check that the queue wait metric was recorded for the template_version_import job.
|
||||
importMetric := promhelp.MetricValue(t, reg, "coderd_provisioner_job_queue_wait_seconds", prometheus.Labels{
|
||||
"provisioner_type": string(database.ProvisionerTypeEcho),
|
||||
"job_type": string(database.ProvisionerJobTypeTemplateVersionImport),
|
||||
"transition": "",
|
||||
"build_reason": "",
|
||||
})
|
||||
require.NotNil(t, importMetric, "import job metric should be recorded")
|
||||
importHistogram := importMetric.GetHistogram()
|
||||
require.NotNil(t, importHistogram)
|
||||
require.Equal(t, uint64(1), importHistogram.GetSampleCount(), "import job should have 1 sample")
|
||||
require.Greater(t, importHistogram.GetSampleSum(), 0.0, "import job queue wait should be non-zero")
|
||||
|
||||
// Create a template and workspace - this triggers a workspace_build job.
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
// Check that the queue wait metric was recorded for the workspace_build job.
|
||||
buildMetric := promhelp.MetricValue(t, reg, "coderd_provisioner_job_queue_wait_seconds", prometheus.Labels{
|
||||
"provisioner_type": string(database.ProvisionerTypeEcho),
|
||||
"job_type": string(database.ProvisionerJobTypeWorkspaceBuild),
|
||||
"transition": string(database.WorkspaceTransitionStart),
|
||||
"build_reason": string(database.BuildReasonInitiator),
|
||||
})
|
||||
require.NotNil(t, buildMetric, "workspace build job metric should be recorded")
|
||||
buildHistogram := buildMetric.GetHistogram()
|
||||
require.NotNil(t, buildHistogram)
|
||||
require.Equal(t, uint64(1), buildHistogram.GetSampleCount(), "workspace build job should have 1 sample")
|
||||
require.Greater(t, buildHistogram.GetSampleSum(), 0.0, "workspace build job queue wait should be non-zero")
|
||||
}
|
||||
|
||||
func TestWorkspaceBuildsEnqueuedMetric(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
logger = testutil.Logger(t)
|
||||
reg = prometheus.NewRegistry()
|
||||
metrics = provisionerdserver.NewMetrics(logger)
|
||||
|
||||
sched = mustSchedule(t, "CRON_TZ=UTC 0 * * * *")
|
||||
tickCh = make(chan time.Time)
|
||||
statsCh = make(chan autobuild.Stats)
|
||||
)
|
||||
|
||||
err := metrics.Register(reg)
|
||||
require.NoError(t, err)
|
||||
|
||||
wsBuilderMetrics, err := wsbuilder.NewMetrics(reg)
|
||||
require.NoError(t, err)
|
||||
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
ProvisionerdServerMetrics: metrics,
|
||||
WorkspaceBuilderMetrics: wsBuilderMetrics,
|
||||
AutobuildTicker: tickCh,
|
||||
AutobuildStats: statsCh,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a template and workspace with autostart schedule.
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, template.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
cwr.AutostartSchedule = ptr.Ref(sched.String())
|
||||
})
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
// Stop the workspace to prepare for autostart.
|
||||
workspace = coderdtest.MustTransitionWorkspace(t, client, workspace.ID, codersdk.WorkspaceTransitionStart, codersdk.WorkspaceTransitionStop)
|
||||
|
||||
// Trigger an autostart build via the autobuild ticker. This verifies that
|
||||
// autostart builds are recorded with build_reason="autostart".
|
||||
p, err := coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, map[string]string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
go func() {
|
||||
tickTime := sched.Next(workspace.LatestBuild.CreatedAt)
|
||||
coderdtest.UpdateProvisionerLastSeenAt(t, db, p.ID, tickTime)
|
||||
tickCh <- tickTime
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
// Wait for the autostart to complete.
|
||||
stats := <-statsCh
|
||||
require.Len(t, stats.Errors, 0)
|
||||
require.Len(t, stats.Transitions, 1)
|
||||
require.Contains(t, stats.Transitions, workspace.ID)
|
||||
require.Equal(t, database.WorkspaceTransitionStart, stats.Transitions[workspace.ID])
|
||||
|
||||
// Verify the workspace was autostarted.
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
require.Equal(t, codersdk.BuildReasonAutostart, workspace.LatestBuild.Reason)
|
||||
|
||||
// Now check the autostart metric was recorded.
|
||||
autostartCount := promhelp.CounterValue(t, reg, "coderd_workspace_builds_enqueued_total", prometheus.Labels{
|
||||
"provisioner_type": string(database.ProvisionerTypeEcho),
|
||||
"build_reason": string(database.BuildReasonAutostart),
|
||||
"transition": string(database.WorkspaceTransitionStart),
|
||||
"status": wsbuilder.BuildStatusSuccess,
|
||||
})
|
||||
require.Equal(t, 1, autostartCount, "autostart should record 1 enqueue with build_reason=autostart")
|
||||
}
|
||||
|
||||
func mustSchedule(t *testing.T, s string) *cron.Schedule {
|
||||
t.Helper()
|
||||
sched, err := cron.Weekly(s)
|
||||
require.NoError(t, err)
|
||||
return sched
|
||||
}
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
package wsbuilder
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
// Metrics holds metrics related to workspace build creation.
|
||||
type Metrics struct {
|
||||
workspaceBuildsEnqueued *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// Metric label values for build status.
|
||||
const (
|
||||
BuildStatusSuccess = "success"
|
||||
BuildStatusFailed = "failed"
|
||||
)
|
||||
|
||||
func NewMetrics(reg prometheus.Registerer) (*Metrics, error) {
|
||||
m := &Metrics{
|
||||
workspaceBuildsEnqueued: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "coderd",
|
||||
Name: "workspace_builds_enqueued_total",
|
||||
Help: "Total number of workspace build enqueue attempts.",
|
||||
}, []string{"provisioner_type", "build_reason", "transition", "status"}),
|
||||
}
|
||||
|
||||
if reg != nil {
|
||||
if err := reg.Register(m.workspaceBuildsEnqueued); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// RecordBuildEnqueued records a workspace build enqueue attempt. It determines
|
||||
// the status based on whether an error occurred and increments the counter.
|
||||
func (m *Metrics) RecordBuildEnqueued(provisionerType, buildReason, transition string, err error) {
|
||||
status := BuildStatusSuccess
|
||||
if err != nil {
|
||||
status = BuildStatusFailed
|
||||
}
|
||||
m.workspaceBuildsEnqueued.WithLabelValues(provisionerType, buildReason, transition, status).Inc()
|
||||
}
|
||||
@@ -90,6 +90,8 @@ type Builder struct {
|
||||
|
||||
prebuiltWorkspaceBuildStage sdkproto.PrebuiltWorkspaceBuildStage
|
||||
verifyNoLegacyParametersOnce bool
|
||||
|
||||
buildMetrics *Metrics
|
||||
}
|
||||
|
||||
type UsageChecker interface {
|
||||
@@ -253,6 +255,12 @@ func (b Builder) TemplateVersionPresetID(id uuid.UUID) Builder {
|
||||
return b
|
||||
}
|
||||
|
||||
func (b Builder) BuildMetrics(m *Metrics) Builder {
|
||||
// nolint: revive
|
||||
b.buildMetrics = m
|
||||
return b
|
||||
}
|
||||
|
||||
type BuildError struct {
|
||||
// Status is a suitable HTTP status code
|
||||
Status int
|
||||
@@ -313,11 +321,34 @@ func (b *Builder) Build(
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
b.recordBuildMetrics(provisionerJob, err)
|
||||
return nil, nil, nil, xerrors.Errorf("build tx: %w", err)
|
||||
}
|
||||
b.recordBuildMetrics(provisionerJob, nil)
|
||||
return workspaceBuild, provisionerJob, provisionerDaemons, nil
|
||||
}
|
||||
|
||||
// recordBuildMetrics records the workspace build enqueue metric if metrics are
|
||||
// configured. It determines the appropriate build reason label, using "prebuild"
|
||||
// for prebuild operations instead of the database reason.
|
||||
func (b *Builder) recordBuildMetrics(job *database.ProvisionerJob, err error) {
|
||||
if b.buildMetrics == nil {
|
||||
return
|
||||
}
|
||||
if job == nil || !job.Provisioner.Valid() {
|
||||
return
|
||||
}
|
||||
|
||||
// Determine the build reason for metrics. Prebuilds use BuildReasonInitiator
|
||||
// in the database but we want to track them separately in metrics.
|
||||
buildReason := string(b.reason)
|
||||
if b.prebuiltWorkspaceBuildStage == sdkproto.PrebuiltWorkspaceBuildStage_CREATE {
|
||||
buildReason = provisionerdserver.BuildReasonPrebuild
|
||||
}
|
||||
|
||||
b.buildMetrics.RecordBuildEnqueued(string(job.Provisioner), buildReason, string(b.trans), err)
|
||||
}
|
||||
|
||||
// buildTx contains the business logic of computing a new build. Attributes of the new database objects are computed
|
||||
// in a functional style, rather than imperative, to emphasize the logic of how they are defined. A simple cache
|
||||
// of database-fetched objects is stored on the struct to ensure we only fetch things once, even if they are used in
|
||||
|
||||
@@ -329,6 +329,54 @@ func (c *Client) UpdateTaskInput(ctx context.Context, user string, id uuid.UUID,
|
||||
return nil
|
||||
}
|
||||
|
||||
// PauseTaskResponse represents the response from pausing a task.
|
||||
type PauseTaskResponse struct {
|
||||
WorkspaceBuild *WorkspaceBuild `json:"workspace_build"`
|
||||
}
|
||||
|
||||
// PauseTask pauses a task by stopping its workspace.
|
||||
// Experimental: uses the /api/experimental endpoint.
|
||||
func (c *Client) PauseTask(ctx context.Context, user string, id uuid.UUID) (PauseTaskResponse, error) {
|
||||
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/experimental/tasks/%s/%s/pause", user, id.String()), nil)
|
||||
if err != nil {
|
||||
return PauseTaskResponse{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
return PauseTaskResponse{}, ReadBodyAsError(res)
|
||||
}
|
||||
|
||||
var resp PauseTaskResponse
|
||||
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
|
||||
return PauseTaskResponse{}, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ResumeTaskResponse represents the response from resuming a task.
|
||||
type ResumeTaskResponse struct {
|
||||
WorkspaceBuild *WorkspaceBuild `json:"workspace_build"`
|
||||
}
|
||||
|
||||
func (c *Client) ResumeTask(ctx context.Context, user string, id uuid.UUID) (ResumeTaskResponse, error) {
|
||||
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/experimental/tasks/%s/%s/resume", user, id.String()), nil)
|
||||
if err != nil {
|
||||
return ResumeTaskResponse{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
return ResumeTaskResponse{}, ReadBodyAsError(res)
|
||||
}
|
||||
|
||||
var resp ResumeTaskResponse
|
||||
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
|
||||
return ResumeTaskResponse{}, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// TaskLogType indicates the source of a task log entry.
|
||||
type TaskLogType string
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ type Template struct {
|
||||
Description string `json:"description"`
|
||||
Deprecated bool `json:"deprecated"`
|
||||
DeprecationMessage string `json:"deprecation_message"`
|
||||
Deleted bool `json:"deleted"`
|
||||
Icon string `json:"icon"`
|
||||
DefaultTTLMillis int64 `json:"default_ttl_ms"`
|
||||
ActivityBumpMillis int64 `json:"activity_bump_ms"`
|
||||
|
||||
@@ -109,6 +109,8 @@ const (
|
||||
CreateWorkspaceBuildReasonSSHConnection CreateWorkspaceBuildReason = "ssh_connection"
|
||||
CreateWorkspaceBuildReasonVSCodeConnection CreateWorkspaceBuildReason = "vscode_connection"
|
||||
CreateWorkspaceBuildReasonJetbrainsConnection CreateWorkspaceBuildReason = "jetbrains_connection"
|
||||
CreateWorkspaceBuildReasonTaskManualPause CreateWorkspaceBuildReason = "task_manual_pause"
|
||||
CreateWorkspaceBuildReasonTaskResume CreateWorkspaceBuildReason = "task_resume"
|
||||
)
|
||||
|
||||
// CreateWorkspaceBuildRequest provides options to update the latest workspace build.
|
||||
@@ -129,7 +131,7 @@ type CreateWorkspaceBuildRequest struct {
|
||||
// TemplateVersionPresetID is the ID of the template version preset to use for the build.
|
||||
TemplateVersionPresetID uuid.UUID `json:"template_version_preset_id,omitempty" format:"uuid"`
|
||||
// Reason sets the reason for the workspace build.
|
||||
Reason CreateWorkspaceBuildReason `json:"reason,omitempty" validate:"omitempty,oneof=dashboard cli ssh_connection vscode_connection jetbrains_connection"`
|
||||
Reason CreateWorkspaceBuildReason `json:"reason,omitempty" validate:"omitempty,oneof=dashboard cli ssh_connection vscode_connection jetbrains_connection task_manual_pause"`
|
||||
}
|
||||
|
||||
type WorkspaceOptions struct {
|
||||
|
||||
@@ -119,9 +119,7 @@ this:
|
||||
- Run `./scripts/deploy-pr.sh`
|
||||
- Manually trigger the
|
||||
[`pr-deploy.yaml`](https://github.com/coder/coder/actions/workflows/pr-deploy.yaml)
|
||||
GitHub Action workflow:
|
||||
|
||||
<Image src="./images/deploy-pr-manually.png" alt="Deploy PR manually" height="348px" align="center" />
|
||||
GitHub Action workflow.
|
||||
|
||||
#### Available options
|
||||
|
||||
|
||||
@@ -220,16 +220,12 @@ screen-readers; a placeholder text value is not enough for all users.
|
||||
When possible, make sure that all image/graphic elements have accompanying text
|
||||
that describes the image. `<img />` elements should have an `alt` text value. In
|
||||
other situations, it might make sense to place invisible, descriptive text
|
||||
inside the component itself using MUI's `visuallyHidden` utility function.
|
||||
inside the component itself using Tailwind's `sr-only` class.
|
||||
|
||||
```tsx
|
||||
import { visuallyHidden } from "@mui/utils";
|
||||
|
||||
<Button>
|
||||
<GearIcon />
|
||||
<Box component="span" sx={visuallyHidden}>
|
||||
Settings
|
||||
</Box>
|
||||
<span className="sr-only">Settings</span>
|
||||
</Button>;
|
||||
```
|
||||
|
||||
|
||||
@@ -104,105 +104,170 @@ deployment. They will always be available from the agent.
|
||||
|
||||
<!-- Code generated by 'make docs/admin/integrations/prometheus.md'. DO NOT EDIT -->
|
||||
|
||||
| Name | Type | Description | Labels |
|
||||
|---------------------------------------------------------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------|
|
||||
| `agent_scripts_executed_total` | counter | Total number of scripts executed by the Coder agent. Includes cron scheduled scripts. | `agent_name` `success` `template_name` `username` `workspace_name` |
|
||||
| `coder_aibridged_injected_tool_invocations_total` | counter | The number of times an injected MCP tool was invoked by aibridge. | `model` `name` `provider` `server` |
|
||||
| `coder_aibridged_interceptions_duration_seconds` | histogram | The total duration of intercepted requests, in seconds. The majority of this time will be the upstream processing of the request. aibridge has no control over upstream processing time, so it's just an illustrative metric. | `model` `provider` |
|
||||
| `coder_aibridged_interceptions_inflight` | gauge | The number of intercepted requests which are being processed. | `model` `provider` `route` |
|
||||
| `coder_aibridged_interceptions_total` | counter | The count of intercepted requests. | `initiator_id` `method` `model` `provider` `route` `status` |
|
||||
| `coder_aibridged_non_injected_tool_selections_total` | counter | The number of times an AI model selected a tool to be invoked by the client. | `model` `name` `provider` |
|
||||
| `coder_aibridged_prompts_total` | counter | The number of prompts issued by users (initiators). | `initiator_id` `model` `provider` |
|
||||
| `coder_aibridged_tokens_total` | counter | The number of tokens used by intercepted requests. | `initiator_id` `model` `provider` `type` |
|
||||
| `coderd_agentapi_metadata_batch_size` | histogram | Total number of metadata entries in each batch, updated before flushes. | |
|
||||
| `coderd_agentapi_metadata_batch_utilization` | histogram | Number of metadata keys per agent in each batch, updated before flushes. | |
|
||||
| `coderd_agentapi_metadata_batches_total` | counter | Total number of metadata batches flushed. | `reason` |
|
||||
| `coderd_agentapi_metadata_dropped_keys_total` | counter | Total number of metadata keys dropped due to capacity limits. | |
|
||||
| `coderd_agentapi_metadata_flush_duration_seconds` | histogram | Time taken to flush metadata batch to database and pubsub. | `reason` |
|
||||
| `coderd_agentapi_metadata_flushed_total` | counter | Total number of unique metadatas flushed. | |
|
||||
| `coderd_agentapi_metadata_publish_errors_total` | counter | Total number of metadata batch pubsub publish calls that have resulted in an error. | |
|
||||
| `coderd_agents_apps` | gauge | Agent applications with statuses. | `agent_name` `app_name` `health` `username` `workspace_name` |
|
||||
| `coderd_agents_connection_latencies_seconds` | gauge | Agent connection latencies in seconds. | `agent_name` `derp_region` `preferred` `username` `workspace_name` |
|
||||
| `coderd_agents_connections` | gauge | Agent connections with statuses. | `agent_name` `lifecycle_state` `status` `tailnet_node` `username` `workspace_name` |
|
||||
| `coderd_agents_up` | gauge | The number of active agents per workspace. | `template_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_connection_count` | gauge | The number of established connections by agent | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_connection_median_latency_seconds` | gauge | The median agent connection latency | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_currently_reachable_peers` | gauge | The number of peers (e.g. clients) that are currently reachable over the encrypted network. | `agent_name` `connection_type` `template_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_rx_bytes` | gauge | Agent Rx bytes | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_session_count_jetbrains` | gauge | The number of session established by JetBrains | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_session_count_reconnecting_pty` | gauge | The number of session established by reconnecting PTY | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_session_count_ssh` | gauge | The number of session established by SSH | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_session_count_vscode` | gauge | The number of session established by VSCode | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_startup_script_seconds` | gauge | The number of seconds the startup script took to execute. | `agent_name` `success` `template_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_tx_bytes` | gauge | Agent Tx bytes | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_api_active_users_duration_hour` | gauge | The number of users that have been active within the last hour. | |
|
||||
| `coderd_api_concurrent_requests` | gauge | The number of concurrent API requests. | |
|
||||
| `coderd_api_concurrent_websockets` | gauge | The total number of concurrent API websockets. | |
|
||||
| `coderd_api_request_latencies_seconds` | histogram | Latency distribution of requests in seconds. | `method` `path` |
|
||||
| `coderd_api_requests_processed_total` | counter | The total number of processed API requests | `code` `method` `path` |
|
||||
| `coderd_api_websocket_durations_seconds` | histogram | Websocket duration distribution of requests in seconds. | `path` |
|
||||
| `coderd_api_workspace_latest_build` | gauge | The latest workspace builds with a status. | `status` |
|
||||
| `coderd_insights_applications_usage_seconds` | gauge | The application usage per template. | `application_name` `slug` `template_name` |
|
||||
| `coderd_insights_parameters` | gauge | The parameter usage per template. | `parameter_name` `parameter_type` `parameter_value` `template_name` |
|
||||
| `coderd_insights_templates_active_users` | gauge | The number of active users of the template. | `template_name` |
|
||||
| `coderd_license_active_users` | gauge | The number of active users. | |
|
||||
| `coderd_license_errors` | gauge | The number of active license errors. | |
|
||||
| `coderd_license_limit_users` | gauge | The user seats limit based on the active Coder license. | |
|
||||
| `coderd_license_user_limit_enabled` | gauge | Returns 1 if the current license enforces the user limit. | |
|
||||
| `coderd_license_warnings` | gauge | The number of active license warnings. | |
|
||||
| `coderd_metrics_collector_agents_execution_seconds` | histogram | Histogram for duration of agents metrics collection in seconds. | |
|
||||
| `coderd_oauth2_external_requests_rate_limit` | gauge | The total number of allowed requests per interval. | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_rate_limit_next_reset_unix` | gauge | Unix timestamp of the next interval | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_rate_limit_remaining` | gauge | The remaining number of allowed requests in this interval. | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_rate_limit_reset_in_seconds` | gauge | Seconds until the next interval | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_rate_limit_used` | gauge | The number of requests made in this interval. | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_total` | counter | The total number of api calls made to external oauth2 providers. 'status_code' will be 0 if the request failed with no response. | `name` `source` `status_code` |
|
||||
| `coderd_prebuilt_workspace_claim_duration_seconds` | histogram | Time to claim a prebuilt workspace by organization, template, and preset. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_provisionerd_job_timings_seconds` | histogram | The provisioner job time duration in seconds. | `provisioner` `status` |
|
||||
| `coderd_provisionerd_jobs_current` | gauge | The number of currently running provisioner jobs. | `provisioner` |
|
||||
| `coderd_provisionerd_num_daemons` | gauge | The number of provisioner daemons. | |
|
||||
| `coderd_provisionerd_workspace_build_timings_seconds` | histogram | The time taken for a workspace to build. | `status` `template_name` `template_version` `workspace_transition` |
|
||||
| `coderd_template_workspace_build_duration_seconds` | histogram | Duration from workspace build creation to agent ready, by template. | `is_prebuild` `organization_name` `status` `template_name` `transition` |
|
||||
| `coderd_workspace_builds_total` | counter | The number of workspaces started, updated, or deleted. | `action` `owner_email` `status` `template_name` `template_version` `workspace_name` |
|
||||
| `coderd_workspace_creation_duration_seconds` | histogram | Time to create a workspace by organization, template, preset, and type (regular or prebuild). | `organization_name` `preset_name` `template_name` `type` |
|
||||
| `coderd_workspace_creation_total` | counter | Total regular (non-prebuilt) workspace creations by organization, template, and preset. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_workspace_latest_build_status` | gauge | The current workspace statuses by template, transition, and owner. | `status` `template_name` `template_version` `workspace_owner` `workspace_transition` |
|
||||
| `go_gc_duration_seconds` | summary | A summary of the pause duration of garbage collection cycles. | |
|
||||
| `go_goroutines` | gauge | Number of goroutines that currently exist. | |
|
||||
| `go_info` | gauge | Information about the Go environment. | `version` |
|
||||
| `go_memstats_alloc_bytes` | gauge | Number of bytes allocated and still in use. | |
|
||||
| `go_memstats_alloc_bytes_total` | counter | Total number of bytes allocated, even if freed. | |
|
||||
| `go_memstats_buck_hash_sys_bytes` | gauge | Number of bytes used by the profiling bucket hash table. | |
|
||||
| `go_memstats_frees_total` | counter | Total number of frees. | |
|
||||
| `go_memstats_gc_sys_bytes` | gauge | Number of bytes used for garbage collection system metadata. | |
|
||||
| `go_memstats_heap_alloc_bytes` | gauge | Number of heap bytes allocated and still in use. | |
|
||||
| `go_memstats_heap_idle_bytes` | gauge | Number of heap bytes waiting to be used. | |
|
||||
| `go_memstats_heap_inuse_bytes` | gauge | Number of heap bytes that are in use. | |
|
||||
| `go_memstats_heap_objects` | gauge | Number of allocated objects. | |
|
||||
| `go_memstats_heap_released_bytes` | gauge | Number of heap bytes released to OS. | |
|
||||
| `go_memstats_heap_sys_bytes` | gauge | Number of heap bytes obtained from system. | |
|
||||
| `go_memstats_last_gc_time_seconds` | gauge | Number of seconds since 1970 of last garbage collection. | |
|
||||
| `go_memstats_lookups_total` | counter | Total number of pointer lookups. | |
|
||||
| `go_memstats_mallocs_total` | counter | Total number of mallocs. | |
|
||||
| `go_memstats_mcache_inuse_bytes` | gauge | Number of bytes in use by mcache structures. | |
|
||||
| `go_memstats_mcache_sys_bytes` | gauge | Number of bytes used for mcache structures obtained from system. | |
|
||||
| `go_memstats_mspan_inuse_bytes` | gauge | Number of bytes in use by mspan structures. | |
|
||||
| `go_memstats_mspan_sys_bytes` | gauge | Number of bytes used for mspan structures obtained from system. | |
|
||||
| `go_memstats_next_gc_bytes` | gauge | Number of heap bytes when next garbage collection will take place. | |
|
||||
| `go_memstats_other_sys_bytes` | gauge | Number of bytes used for other system allocations. | |
|
||||
| `go_memstats_stack_inuse_bytes` | gauge | Number of bytes in use by the stack allocator. | |
|
||||
| `go_memstats_stack_sys_bytes` | gauge | Number of bytes obtained from system for stack allocator. | |
|
||||
| `go_memstats_sys_bytes` | gauge | Number of bytes obtained from system. | |
|
||||
| `go_threads` | gauge | Number of OS threads created. | |
|
||||
| `process_cpu_seconds_total` | counter | Total user and system CPU time spent in seconds. | |
|
||||
| `process_max_fds` | gauge | Maximum number of open file descriptors. | |
|
||||
| `process_open_fds` | gauge | Number of open file descriptors. | |
|
||||
| `process_resident_memory_bytes` | gauge | Resident memory size in bytes. | |
|
||||
| `process_start_time_seconds` | gauge | Start time of the process since unix epoch in seconds. | |
|
||||
| `process_virtual_memory_bytes` | gauge | Virtual memory size in bytes. | |
|
||||
| `process_virtual_memory_max_bytes` | gauge | Maximum amount of virtual memory available in bytes. | |
|
||||
| `promhttp_metric_handler_requests_in_flight` | gauge | Current number of scrapes being served. | |
|
||||
| `promhttp_metric_handler_requests_total` | counter | Total number of scrapes by HTTP status code. | `code` |
|
||||
| Name | Type | Description | Labels |
|
||||
|-------------------------------------------------------------------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------|
|
||||
| `agent_scripts_executed_total` | counter | Total number of scripts executed by the Coder agent. Includes cron scheduled scripts. | `agent_name` `success` `template_name` `username` `workspace_name` |
|
||||
| `coder_aibridged_circuit_breaker_rejects_total` | counter | Total number of requests rejected due to open circuit breaker. | `endpoint` `model` `provider` |
|
||||
| `coder_aibridged_circuit_breaker_state` | gauge | Current state of the circuit breaker (0=closed, 0.5=half-open, 1=open). | `endpoint` `model` `provider` |
|
||||
| `coder_aibridged_circuit_breaker_trips_total` | counter | Total number of times the circuit breaker transitioned to open state. | `endpoint` `model` `provider` |
|
||||
| `coder_aibridged_injected_tool_invocations_total` | counter | The number of times an injected MCP tool was invoked by aibridge. | `model` `name` `provider` `server` |
|
||||
| `coder_aibridged_interceptions_duration_seconds` | histogram | The total duration of intercepted requests, in seconds. The majority of this time will be the upstream processing of the request. aibridge has no control over upstream processing time, so it's just an illustrative metric. | `model` `provider` |
|
||||
| `coder_aibridged_interceptions_inflight` | gauge | The number of intercepted requests which are being processed. | `model` `provider` `route` |
|
||||
| `coder_aibridged_interceptions_total` | counter | The count of intercepted requests. | `initiator_id` `method` `model` `provider` `route` `status` |
|
||||
| `coder_aibridged_non_injected_tool_selections_total` | counter | The number of times an AI model selected a tool to be invoked by the client. | `model` `name` `provider` |
|
||||
| `coder_aibridged_passthrough_total` | counter | The count of requests which were not intercepted but passed through to the upstream. | `method` `provider` `route` |
|
||||
| `coder_aibridged_prompts_total` | counter | The number of prompts issued by users (initiators). | `initiator_id` `model` `provider` |
|
||||
| `coder_aibridged_tokens_total` | counter | The number of tokens used by intercepted requests. | `initiator_id` `model` `provider` `type` |
|
||||
| `coder_aibridgeproxyd_connect_sessions_total` | counter | Total number of CONNECT sessions established. | `type` |
|
||||
| `coder_aibridgeproxyd_inflight_mitm_requests` | gauge | Number of MITM requests currently being processed. | `provider` |
|
||||
| `coder_aibridgeproxyd_mitm_requests_total` | counter | Total number of MITM requests handled by the proxy. | `provider` |
|
||||
| `coder_aibridgeproxyd_mitm_responses_total` | counter | Total number of MITM responses by HTTP status code class. | `code` `provider` |
|
||||
| `coder_pubsub_connected` | gauge | Whether we are connected (1) or not connected (0) to postgres | |
|
||||
| `coder_pubsub_current_events` | gauge | The current number of pubsub event channels listened for | |
|
||||
| `coder_pubsub_current_subscribers` | gauge | The current number of active pubsub subscribers | |
|
||||
| `coder_pubsub_disconnections_total` | counter | Total number of times we disconnected unexpectedly from postgres | |
|
||||
| `coder_pubsub_latency_measure_errs_total` | counter | The number of pubsub latency measurement failures | |
|
||||
| `coder_pubsub_latency_measures_total` | counter | The number of pubsub latency measurements | |
|
||||
| `coder_pubsub_messages_total` | counter | Total number of messages received from postgres | `size` |
|
||||
| `coder_pubsub_published_bytes_total` | counter | Total number of bytes successfully published across all publishes | |
|
||||
| `coder_pubsub_publishes_total` | counter | Total number of calls to Publish | `success` |
|
||||
| `coder_pubsub_receive_latency_seconds` | gauge | The time taken to receive a message from a pubsub event channel | |
|
||||
| `coder_pubsub_received_bytes_total` | counter | Total number of bytes received across all messages | |
|
||||
| `coder_pubsub_send_latency_seconds` | gauge | The time taken to send a message into a pubsub event channel | |
|
||||
| `coder_pubsub_subscribes_total` | counter | Total number of calls to Subscribe/SubscribeWithErr | `success` |
|
||||
| `coder_servertailnet_connections_total` | counter | Total number of TCP connections made to workspace agents. | `network` |
|
||||
| `coder_servertailnet_open_connections` | gauge | Total number of TCP connections currently open to workspace agents. | `network` |
|
||||
| `coderd_agentapi_metadata_batch_size` | histogram | Total number of metadata entries in each batch, updated before flushes. | |
|
||||
| `coderd_agentapi_metadata_batch_utilization` | histogram | Number of metadata keys per agent in each batch, updated before flushes. | |
|
||||
| `coderd_agentapi_metadata_batches_total` | counter | Total number of metadata batches flushed. | `reason` |
|
||||
| `coderd_agentapi_metadata_dropped_keys_total` | counter | Total number of metadata keys dropped due to capacity limits. | |
|
||||
| `coderd_agentapi_metadata_flush_duration_seconds` | histogram | Time taken to flush metadata batch to database and pubsub. | `reason` |
|
||||
| `coderd_agentapi_metadata_flushed_total` | counter | Total number of unique metadatas flushed. | |
|
||||
| `coderd_agentapi_metadata_publish_errors_total` | counter | Total number of metadata batch pubsub publish calls that have resulted in an error. | |
|
||||
| `coderd_agents_apps` | gauge | Agent applications with statuses. | `agent_name` `app_name` `health` `username` `workspace_name` |
|
||||
| `coderd_agents_connection_latencies_seconds` | gauge | Agent connection latencies in seconds. | `agent_name` `derp_region` `preferred` `username` `workspace_name` |
|
||||
| `coderd_agents_connections` | gauge | Agent connections with statuses. | `agent_name` `lifecycle_state` `status` `tailnet_node` `username` `workspace_name` |
|
||||
| `coderd_agents_up` | gauge | The number of active agents per workspace. | `template_name` `template_version` `username` `workspace_name` |
|
||||
| `coderd_agentstats_connection_count` | gauge | The number of established connections by agent | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_connection_median_latency_seconds` | gauge | The median agent connection latency | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_currently_reachable_peers` | gauge | The number of peers (e.g. clients) that are currently reachable over the encrypted network. | `agent_name` `connection_type` `template_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_rx_bytes` | gauge | Agent Rx bytes | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_session_count_jetbrains` | gauge | The number of session established by JetBrains | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_session_count_reconnecting_pty` | gauge | The number of session established by reconnecting PTY | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_session_count_ssh` | gauge | The number of session established by SSH | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_session_count_vscode` | gauge | The number of session established by VSCode | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_startup_script_seconds` | gauge | The number of seconds the startup script took to execute. | `agent_name` `success` `template_name` `username` `workspace_name` |
|
||||
| `coderd_agentstats_tx_bytes` | gauge | Agent Tx bytes | `agent_name` `username` `workspace_name` |
|
||||
| `coderd_api_active_users_duration_hour` | gauge | The number of users that have been active within the last hour. | |
|
||||
| `coderd_api_concurrent_requests` | gauge | The number of concurrent API requests. | `method` `path` |
|
||||
| `coderd_api_concurrent_websockets` | gauge | The total number of concurrent API websockets. | `path` |
|
||||
| `coderd_api_request_latencies_seconds` | histogram | Latency distribution of requests in seconds. | `method` `path` |
|
||||
| `coderd_api_requests_processed_total` | counter | The total number of processed API requests | `code` `method` `path` |
|
||||
| `coderd_api_total_user_count` | gauge | The total number of registered users, partitioned by status. | `status` |
|
||||
| `coderd_api_websocket_durations_seconds` | histogram | Websocket duration distribution of requests in seconds. | `path` |
|
||||
| `coderd_api_workspace_latest_build` | gauge | The current number of workspace builds by status for all non-deleted workspaces. | `status` |
|
||||
| `coderd_authz_authorize_duration_seconds` | histogram | Duration of the 'Authorize' call in seconds. Only counts calls that succeed. | `allowed` |
|
||||
| `coderd_authz_prepare_authorize_duration_seconds` | histogram | Duration of the 'PrepareAuthorize' call in seconds. | |
|
||||
| `coderd_db_query_counts_total` | counter | Total number of queries labelled by HTTP route, method, and query name. | `method` `query` `route` |
|
||||
| `coderd_db_query_latencies_seconds` | histogram | Latency distribution of queries in seconds. | `query` |
|
||||
| `coderd_db_tx_duration_seconds` | histogram | Duration of transactions in seconds. | `success` `tx_id` |
|
||||
| `coderd_db_tx_executions_count` | counter | Total count of transactions executed. 'retries' is expected to be 0 for a successful transaction. | `retries` `success` `tx_id` |
|
||||
| `coderd_dbpurge_iteration_duration_seconds` | histogram | Duration of each dbpurge iteration in seconds. | `success` |
|
||||
| `coderd_dbpurge_records_purged_total` | counter | Total number of records purged by type. | `record_type` |
|
||||
| `coderd_experiments` | gauge | Indicates whether each experiment is enabled (1) or not (0) | `experiment` |
|
||||
| `coderd_insights_applications_usage_seconds` | gauge | The application usage per template. | `application_name` `slug` `template_name` |
|
||||
| `coderd_insights_parameters` | gauge | The parameter usage per template. | `parameter_name` `parameter_type` `parameter_value` `template_name` |
|
||||
| `coderd_insights_templates_active_users` | gauge | The number of active users of the template. | `template_name` |
|
||||
| `coderd_license_active_users` | gauge | The number of active users. | |
|
||||
| `coderd_license_errors` | gauge | The number of active license errors. | |
|
||||
| `coderd_license_limit_users` | gauge | The user seats limit based on the active Coder license. | |
|
||||
| `coderd_license_user_limit_enabled` | gauge | Returns 1 if the current license enforces the user limit. | |
|
||||
| `coderd_license_warnings` | gauge | The number of active license warnings. | |
|
||||
| `coderd_lifecycle_autobuild_execution_duration_seconds` | histogram | Duration of each autobuild execution. | |
|
||||
| `coderd_notifications_dispatcher_send_seconds` | histogram | The time taken to dispatch notifications. | `method` |
|
||||
| `coderd_notifications_inflight_dispatches` | gauge | The number of dispatch attempts which are currently in progress. | `method` `notification_template_id` |
|
||||
| `coderd_notifications_pending_updates` | gauge | The number of dispatch attempt results waiting to be flushed to the store. | |
|
||||
| `coderd_notifications_queued_seconds` | histogram | The time elapsed between a notification being enqueued in the store and retrieved for dispatching (measures the latency of the notifications system). This should generally be within CODER_NOTIFICATIONS_FETCH_INTERVAL seconds; higher values for a sustained period indicates delayed processing and CODER_NOTIFICATIONS_LEASE_COUNT can be increased to accommodate this. | `method` |
|
||||
| `coderd_notifications_retry_count` | counter | The count of notification dispatch retry attempts. | `method` `notification_template_id` |
|
||||
| `coderd_notifications_synced_updates_total` | counter | The number of dispatch attempt results flushed to the store. | |
|
||||
| `coderd_oauth2_external_requests_rate_limit` | gauge | The total number of allowed requests per interval. | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_rate_limit_next_reset_unix` | gauge | Unix timestamp for when the next interval starts | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_rate_limit_remaining` | gauge | The remaining number of allowed requests in this interval. | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_rate_limit_reset_in_seconds` | gauge | Seconds until the next interval | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_rate_limit_used` | gauge | The number of requests made in this interval. | `name` `resource` |
|
||||
| `coderd_oauth2_external_requests_total` | counter | The total number of api calls made to external oauth2 providers. 'status_code' will be 0 if the request failed with no response. | `name` `source` `status_code` |
|
||||
| `coderd_open_file_refs_current` | gauge | The count of file references currently open in the file cache. Multiple references can be held for the same file. | |
|
||||
| `coderd_open_file_refs_total` | counter | The total number of file references ever opened in the file cache. The 'hit' label indicates if the file was loaded from the cache. | `hit` |
|
||||
| `coderd_open_files_current` | gauge | The count of unique files currently open in the file cache. | |
|
||||
| `coderd_open_files_size_bytes_current` | gauge | The current amount of memory of all files currently open in the file cache. | |
|
||||
| `coderd_open_files_size_bytes_total` | counter | The total amount of memory ever opened in the file cache. This number never decrements. | |
|
||||
| `coderd_open_files_total` | counter | The total count of unique files ever opened in the file cache. | |
|
||||
| `coderd_prebuilds_reconciliation_duration_seconds` | histogram | Duration of each prebuilds reconciliation cycle. | |
|
||||
| `coderd_prebuilt_workspace_claim_duration_seconds` | histogram | Time to claim a prebuilt workspace by organization, template, and preset. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prebuilt_workspaces_claimed_total` | counter | Total number of prebuilt workspaces which were claimed by users. Claiming refers to creating a workspace with a preset selected for which eligible prebuilt workspaces are available and one is reassigned to a user. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prebuilt_workspaces_created_total` | counter | Total number of prebuilt workspaces that have been created to meet the desired instance count of each template preset. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prebuilt_workspaces_desired` | gauge | Target number of prebuilt workspaces that should be available for each template preset. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prebuilt_workspaces_eligible` | gauge | Current number of prebuilt workspaces that are eligible to be claimed by users. These are workspaces that have completed their build process with their agent reporting 'ready' status. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prebuilt_workspaces_failed_total` | counter | Total number of prebuilt workspaces that failed to build. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prebuilt_workspaces_metrics_last_updated` | gauge | The unix timestamp when the metrics related to prebuilt workspaces were last updated; these metrics are cached. | |
|
||||
| `coderd_prebuilt_workspaces_preset_hard_limited` | gauge | Indicates whether a given preset has reached the hard failure limit (1 = hard-limited). Metric is omitted otherwise. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prebuilt_workspaces_reconciliation_paused` | gauge | Indicates whether prebuilds reconciliation is currently paused (1 = paused, 0 = not paused). | |
|
||||
| `coderd_prebuilt_workspaces_resource_replacements_total` | counter | Total number of prebuilt workspaces whose resource(s) got replaced upon being claimed. In Terraform, drift on immutable attributes results in resource replacement. This represents a worst-case scenario for prebuilt workspaces because the pre-provisioned resource would have been recreated when claiming, thus obviating the point of pre-provisioning. See https://coder.com/docs/admin/templates/extending-templates/prebuilt-workspaces#preventing-resource-replacement | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prebuilt_workspaces_running` | gauge | Current number of prebuilt workspaces that are in a running state. These workspaces have started successfully but may not yet be claimable by users (see coderd_prebuilt_workspaces_eligible). | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_prometheusmetrics_agents_execution_seconds` | histogram | Histogram for duration of agents metrics collection in seconds. | |
|
||||
| `coderd_prometheusmetrics_agentstats_execution_seconds` | histogram | Histogram for duration of agent stats metrics collection in seconds. | |
|
||||
| `coderd_prometheusmetrics_metrics_aggregator_execution_cleanup_seconds` | histogram | Histogram for duration of metrics aggregator cleanup in seconds. | |
|
||||
| `coderd_prometheusmetrics_metrics_aggregator_execution_update_seconds` | histogram | Histogram for duration of metrics aggregator update in seconds. | |
|
||||
| `coderd_prometheusmetrics_metrics_aggregator_store_size` | gauge | The number of metrics stored in the aggregator | |
|
||||
| `coderd_provisioner_job_queue_wait_seconds` | histogram | Time from job creation to acquisition by a provisioner daemon. | `build_reason` `job_type` `provisioner_type` `transition` |
|
||||
| `coderd_provisionerd_job_timings_seconds` | histogram | The provisioner job time duration in seconds. | `provisioner` `status` |
|
||||
| `coderd_provisionerd_jobs_current` | gauge | The number of currently running provisioner jobs. | `provisioner` |
|
||||
| `coderd_provisionerd_num_daemons` | gauge | The number of provisioner daemons. | |
|
||||
| `coderd_provisionerd_workspace_build_timings_seconds` | histogram | The time taken for a workspace to build. | `status` `template_name` `template_version` `workspace_transition` |
|
||||
| `coderd_proxyhealth_health_check_duration_seconds` | histogram | Histogram for duration of proxy health collection in seconds. | |
|
||||
| `coderd_proxyhealth_health_check_results` | gauge | This endpoint returns a number to indicate the health status. -3 (unknown), -2 (Unreachable), -1 (Unhealthy), 0 (Unregistered), 1 (Healthy) | `proxy_id` |
|
||||
| `coderd_template_workspace_build_duration_seconds` | histogram | Duration from workspace build creation to agent ready, by template. | `is_prebuild` `organization_name` `status` `template_name` `transition` |
|
||||
| `coderd_workspace_builds_enqueued_total` | counter | Total number of workspace build enqueue attempts. | `build_reason` `provisioner_type` `status` `transition` |
|
||||
| `coderd_workspace_builds_total` | counter | The number of workspaces started, updated, or deleted. | `status` `template_name` `template_version` `workspace_name` `workspace_owner` `workspace_transition` |
|
||||
| `coderd_workspace_creation_duration_seconds` | histogram | Time to create a workspace by organization, template, preset, and type (regular or prebuild). | `organization_name` `preset_name` `template_name` `type` |
|
||||
| `coderd_workspace_creation_total` | counter | Total regular (non-prebuilt) workspace creations by organization, template, and preset. | `organization_name` `preset_name` `template_name` |
|
||||
| `coderd_workspace_latest_build_status` | gauge | The current workspace statuses by template, transition, and owner for all non-deleted workspaces. | `status` `template_name` `template_version` `workspace_owner` `workspace_transition` |
|
||||
| `go_gc_duration_seconds` | summary | A summary of the pause duration of garbage collection cycles. | |
|
||||
| `go_goroutines` | gauge | Number of goroutines that currently exist. | |
|
||||
| `go_info` | gauge | Information about the Go environment. | `version` |
|
||||
| `go_memstats_alloc_bytes` | gauge | Number of bytes allocated and still in use. | |
|
||||
| `go_memstats_alloc_bytes_total` | counter | Total number of bytes allocated, even if freed. | |
|
||||
| `go_memstats_buck_hash_sys_bytes` | gauge | Number of bytes used by the profiling bucket hash table. | |
|
||||
| `go_memstats_frees_total` | counter | Total number of frees. | |
|
||||
| `go_memstats_gc_sys_bytes` | gauge | Number of bytes used for garbage collection system metadata. | |
|
||||
| `go_memstats_heap_alloc_bytes` | gauge | Number of heap bytes allocated and still in use. | |
|
||||
| `go_memstats_heap_idle_bytes` | gauge | Number of heap bytes waiting to be used. | |
|
||||
| `go_memstats_heap_inuse_bytes` | gauge | Number of heap bytes that are in use. | |
|
||||
| `go_memstats_heap_objects` | gauge | Number of allocated objects. | |
|
||||
| `go_memstats_heap_released_bytes` | gauge | Number of heap bytes released to OS. | |
|
||||
| `go_memstats_heap_sys_bytes` | gauge | Number of heap bytes obtained from system. | |
|
||||
| `go_memstats_last_gc_time_seconds` | gauge | Number of seconds since 1970 of last garbage collection. | |
|
||||
| `go_memstats_lookups_total` | counter | Total number of pointer lookups. | |
|
||||
| `go_memstats_mallocs_total` | counter | Total number of mallocs. | |
|
||||
| `go_memstats_mcache_inuse_bytes` | gauge | Number of bytes in use by mcache structures. | |
|
||||
| `go_memstats_mcache_sys_bytes` | gauge | Number of bytes used for mcache structures obtained from system. | |
|
||||
| `go_memstats_mspan_inuse_bytes` | gauge | Number of bytes in use by mspan structures. | |
|
||||
| `go_memstats_mspan_sys_bytes` | gauge | Number of bytes used for mspan structures obtained from system. | |
|
||||
| `go_memstats_next_gc_bytes` | gauge | Number of heap bytes when next garbage collection will take place. | |
|
||||
| `go_memstats_other_sys_bytes` | gauge | Number of bytes used for other system allocations. | |
|
||||
| `go_memstats_stack_inuse_bytes` | gauge | Number of bytes in use by the stack allocator. | |
|
||||
| `go_memstats_stack_sys_bytes` | gauge | Number of bytes obtained from system for stack allocator. | |
|
||||
| `go_memstats_sys_bytes` | gauge | Number of bytes obtained from system. | |
|
||||
| `go_threads` | gauge | Number of OS threads created. | |
|
||||
| `process_cpu_seconds_total` | counter | Total user and system CPU time spent in seconds. | |
|
||||
| `process_max_fds` | gauge | Maximum number of open file descriptors. | |
|
||||
| `process_open_fds` | gauge | Number of open file descriptors. | |
|
||||
| `process_resident_memory_bytes` | gauge | Resident memory size in bytes. | |
|
||||
| `process_start_time_seconds` | gauge | Start time of the process since unix epoch in seconds. | |
|
||||
| `process_virtual_memory_bytes` | gauge | Virtual memory size in bytes. | |
|
||||
| `process_virtual_memory_max_bytes` | gauge | Maximum amount of virtual memory available in bytes. | |
|
||||
| `promhttp_metric_handler_requests_in_flight` | gauge | Current number of scrapes being served. | |
|
||||
| `promhttp_metric_handler_requests_total` | counter | Total number of scrapes by HTTP status code. | `code` |
|
||||
|
||||
<!-- End generated by 'make docs/admin/integrations/prometheus.md'. -->
|
||||
|
||||
|
||||
@@ -115,6 +115,25 @@ specified in your template in the `disable_params` search params list
|
||||
[](https://YOUR_ACCESS_URL/templates/YOUR_TEMPLATE/workspace?disable_params=first_parameter,second_parameter)
|
||||
```
|
||||
|
||||
### Security: consent dialog for automatic creation
|
||||
|
||||
When using `mode=auto` with prefilled `param.*` values, Coder displays a
|
||||
security consent dialog before creating the workspace. This protects users
|
||||
from malicious links that could provision workspaces with untrusted
|
||||
configurations, such as dotfiles or startup scripts from unknown sources.
|
||||
|
||||
The dialog shows:
|
||||
|
||||
- A warning that a workspace is about to be created automatically from a link
|
||||
- All prefilled `param.*` values from the URL
|
||||
- **Confirm and Create** and **Cancel** buttons
|
||||
|
||||
The workspace is only created if the user explicitly clicks **Confirm and
|
||||
Create**. Clicking **Cancel** falls back to the standard creation form where
|
||||
all parameters can be reviewed manually.
|
||||
|
||||

|
||||
|
||||
### Example: Kubernetes
|
||||
|
||||
For a full example of the Open in Coder flow in Kubernetes, check out
|
||||
|
||||
@@ -13,7 +13,8 @@ AI Bridge runs inside the Coder control plane (`coderd`), requiring no separate
|
||||
You will need to enable AI Bridge explicitly:
|
||||
|
||||
```sh
|
||||
CODER_AIBRIDGE_ENABLED=true coder server
|
||||
export CODER_AIBRIDGE_ENABLED=true
|
||||
coder server
|
||||
# or
|
||||
coder server --aibridge-enabled=true
|
||||
```
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 52 KiB |
@@ -2009,6 +2009,11 @@
|
||||
"description": "Show a task's logs",
|
||||
"path": "reference/cli/task_logs.md"
|
||||
},
|
||||
{
|
||||
"title": "task pause",
|
||||
"description": "Pause a task",
|
||||
"path": "reference/cli/task_pause.md"
|
||||
},
|
||||
{
|
||||
"title": "task send",
|
||||
"description": "Send input to a task",
|
||||
|
||||
Generated
+448
-8
@@ -2184,9 +2184,9 @@ This is required on creation to enable a user-flow of validating a template work
|
||||
|
||||
#### Enumerated Values
|
||||
|
||||
| Value(s) |
|
||||
|-----------------------------------------------------------------------------------|
|
||||
| `cli`, `dashboard`, `jetbrains_connection`, `ssh_connection`, `vscode_connection` |
|
||||
| Value(s) |
|
||||
|-----------------------------------------------------------------------------------------------------------------------|
|
||||
| `cli`, `dashboard`, `jetbrains_connection`, `ssh_connection`, `task_manual_pause`, `task_resume`, `vscode_connection` |
|
||||
|
||||
## codersdk.CreateWorkspaceBuildRequest
|
||||
|
||||
@@ -2227,11 +2227,11 @@ This is required on creation to enable a user-flow of validating a template work
|
||||
|
||||
#### Enumerated Values
|
||||
|
||||
| Property | Value(s) |
|
||||
|--------------|-----------------------------------------------------------------------------------|
|
||||
| `log_level` | `debug` |
|
||||
| `reason` | `cli`, `dashboard`, `jetbrains_connection`, `ssh_connection`, `vscode_connection` |
|
||||
| `transition` | `delete`, `start`, `stop` |
|
||||
| Property | Value(s) |
|
||||
|--------------|--------------------------------------------------------------------------------------------------------|
|
||||
| `log_level` | `debug` |
|
||||
| `reason` | `cli`, `dashboard`, `jetbrains_connection`, `ssh_connection`, `task_manual_pause`, `vscode_connection` |
|
||||
| `transition` | `delete`, `start`, `stop` |
|
||||
|
||||
## codersdk.CreateWorkspaceProxyRequest
|
||||
|
||||
@@ -6178,6 +6178,225 @@ Only certain features set these fields: - FeatureManagedAgentLimit|
|
||||
| `name` | string | true | | |
|
||||
| `regenerate_token` | boolean | false | | |
|
||||
|
||||
## codersdk.PauseTaskResponse
|
||||
|
||||
```json
|
||||
{
|
||||
"workspace_build": {
|
||||
"build_number": 0,
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"daily_cost": 0,
|
||||
"deadline": "2019-08-24T14:15:22Z",
|
||||
"has_ai_task": true,
|
||||
"has_external_agent": true,
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
|
||||
"initiator_name": "string",
|
||||
"job": {
|
||||
"available_workers": [
|
||||
"497f6eca-6276-4993-bfeb-53cbbbba6f08"
|
||||
],
|
||||
"canceled_at": "2019-08-24T14:15:22Z",
|
||||
"completed_at": "2019-08-24T14:15:22Z",
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"error": "string",
|
||||
"error_code": "REQUIRED_TEMPLATE_VARIABLES",
|
||||
"file_id": "8a0cfb4f-ddc9-436d-91bb-75133c583767",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
|
||||
"input": {
|
||||
"error": "string",
|
||||
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
|
||||
"workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478"
|
||||
},
|
||||
"logs_overflowed": true,
|
||||
"metadata": {
|
||||
"template_display_name": "string",
|
||||
"template_icon": "string",
|
||||
"template_id": "c6d67e98-83ea-49f0-8812-e4abae2b68bc",
|
||||
"template_name": "string",
|
||||
"template_version_name": "string",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
|
||||
"workspace_name": "string"
|
||||
},
|
||||
"organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6",
|
||||
"queue_position": 0,
|
||||
"queue_size": 0,
|
||||
"started_at": "2019-08-24T14:15:22Z",
|
||||
"status": "pending",
|
||||
"tags": {
|
||||
"property1": "string",
|
||||
"property2": "string"
|
||||
},
|
||||
"type": "template_version_import",
|
||||
"worker_id": "ae5fa6f7-c55b-40c1-b40a-b36ac467652b",
|
||||
"worker_name": "string"
|
||||
},
|
||||
"matched_provisioners": {
|
||||
"available": 0,
|
||||
"count": 0,
|
||||
"most_recently_seen": "2019-08-24T14:15:22Z"
|
||||
},
|
||||
"max_deadline": "2019-08-24T14:15:22Z",
|
||||
"reason": "initiator",
|
||||
"resources": [
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"api_version": "string",
|
||||
"apps": [
|
||||
{
|
||||
"command": "string",
|
||||
"display_name": "string",
|
||||
"external": true,
|
||||
"group": "string",
|
||||
"health": "disabled",
|
||||
"healthcheck": {
|
||||
"interval": 0,
|
||||
"threshold": 0,
|
||||
"url": "string"
|
||||
},
|
||||
"hidden": true,
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"open_in": "slim-window",
|
||||
"sharing_level": "owner",
|
||||
"slug": "string",
|
||||
"statuses": [
|
||||
{
|
||||
"agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978",
|
||||
"app_id": "affd1d10-9538-4fc8-9e0b-4594a28c1335",
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"message": "string",
|
||||
"needs_user_attention": true,
|
||||
"state": "working",
|
||||
"uri": "string",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9"
|
||||
}
|
||||
],
|
||||
"subdomain": true,
|
||||
"subdomain_name": "string",
|
||||
"tooltip": "string",
|
||||
"url": "string"
|
||||
}
|
||||
],
|
||||
"architecture": "string",
|
||||
"connection_timeout_seconds": 0,
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"directory": "string",
|
||||
"disconnected_at": "2019-08-24T14:15:22Z",
|
||||
"display_apps": [
|
||||
"vscode"
|
||||
],
|
||||
"environment_variables": {
|
||||
"property1": "string",
|
||||
"property2": "string"
|
||||
},
|
||||
"expanded_directory": "string",
|
||||
"first_connected_at": "2019-08-24T14:15:22Z",
|
||||
"health": {
|
||||
"healthy": false,
|
||||
"reason": "agent has lost connection"
|
||||
},
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"instance_id": "string",
|
||||
"last_connected_at": "2019-08-24T14:15:22Z",
|
||||
"latency": {
|
||||
"property1": {
|
||||
"latency_ms": 0,
|
||||
"preferred": true
|
||||
},
|
||||
"property2": {
|
||||
"latency_ms": 0,
|
||||
"preferred": true
|
||||
}
|
||||
},
|
||||
"lifecycle_state": "created",
|
||||
"log_sources": [
|
||||
{
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"display_name": "string",
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"workspace_agent_id": "7ad2e618-fea7-4c1a-b70a-f501566a72f1"
|
||||
}
|
||||
],
|
||||
"logs_length": 0,
|
||||
"logs_overflowed": true,
|
||||
"name": "string",
|
||||
"operating_system": "string",
|
||||
"parent_id": {
|
||||
"uuid": "string",
|
||||
"valid": true
|
||||
},
|
||||
"ready_at": "2019-08-24T14:15:22Z",
|
||||
"resource_id": "4d5215ed-38bb-48ed-879a-fdb9ca58522f",
|
||||
"scripts": [
|
||||
{
|
||||
"cron": "string",
|
||||
"display_name": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"log_path": "string",
|
||||
"log_source_id": "4197ab25-95cf-4b91-9c78-f7f2af5d353a",
|
||||
"run_on_start": true,
|
||||
"run_on_stop": true,
|
||||
"script": "string",
|
||||
"start_blocks_login": true,
|
||||
"timeout": 0
|
||||
}
|
||||
],
|
||||
"started_at": "2019-08-24T14:15:22Z",
|
||||
"startup_script_behavior": "blocking",
|
||||
"status": "connecting",
|
||||
"subsystems": [
|
||||
"envbox"
|
||||
],
|
||||
"troubleshooting_url": "string",
|
||||
"updated_at": "2019-08-24T14:15:22Z",
|
||||
"version": "string"
|
||||
}
|
||||
],
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"daily_cost": 0,
|
||||
"hide": true,
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"job_id": "453bd7d7-5355-4d6d-a38e-d9e7eb218c3f",
|
||||
"metadata": [
|
||||
{
|
||||
"key": "string",
|
||||
"sensitive": true,
|
||||
"value": "string"
|
||||
}
|
||||
],
|
||||
"name": "string",
|
||||
"type": "string",
|
||||
"workspace_transition": "start"
|
||||
}
|
||||
],
|
||||
"status": "pending",
|
||||
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
|
||||
"template_version_name": "string",
|
||||
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
|
||||
"transition": "start",
|
||||
"updated_at": "2019-08-24T14:15:22Z",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
|
||||
"workspace_name": "string",
|
||||
"workspace_owner_avatar_url": "string",
|
||||
"workspace_owner_id": "e7078695-5279-4c86-8774-3ac2367a2fc7",
|
||||
"workspace_owner_name": "string"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
| Name | Type | Required | Restrictions | Description |
|
||||
|-------------------|----------------------------------------------------|----------|--------------|-------------|
|
||||
| `workspace_build` | [codersdk.WorkspaceBuild](#codersdkworkspacebuild) | false | | |
|
||||
|
||||
## codersdk.Permission
|
||||
|
||||
```json
|
||||
@@ -7303,6 +7522,225 @@ Only certain features set these fields: - FeatureManagedAgentLimit|
|
||||
| `message` | string | false | | Message is an actionable message that depicts actions the request took. These messages should be fully formed sentences with proper punctuation. Examples: - "A user has been created." - "Failed to create a user." |
|
||||
| `validations` | array of [codersdk.ValidationError](#codersdkvalidationerror) | false | | Validations are form field-specific friendly error messages. They will be shown on a form field in the UI. These can also be used to add additional context if there is a set of errors in the primary 'Message'. |
|
||||
|
||||
## codersdk.ResumeTaskResponse
|
||||
|
||||
```json
|
||||
{
|
||||
"workspace_build": {
|
||||
"build_number": 0,
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"daily_cost": 0,
|
||||
"deadline": "2019-08-24T14:15:22Z",
|
||||
"has_ai_task": true,
|
||||
"has_external_agent": true,
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
|
||||
"initiator_name": "string",
|
||||
"job": {
|
||||
"available_workers": [
|
||||
"497f6eca-6276-4993-bfeb-53cbbbba6f08"
|
||||
],
|
||||
"canceled_at": "2019-08-24T14:15:22Z",
|
||||
"completed_at": "2019-08-24T14:15:22Z",
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"error": "string",
|
||||
"error_code": "REQUIRED_TEMPLATE_VARIABLES",
|
||||
"file_id": "8a0cfb4f-ddc9-436d-91bb-75133c583767",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"initiator_id": "06588898-9a84-4b35-ba8f-f9cbd64946f3",
|
||||
"input": {
|
||||
"error": "string",
|
||||
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
|
||||
"workspace_build_id": "badaf2eb-96c5-4050-9f1d-db2d39ca5478"
|
||||
},
|
||||
"logs_overflowed": true,
|
||||
"metadata": {
|
||||
"template_display_name": "string",
|
||||
"template_icon": "string",
|
||||
"template_id": "c6d67e98-83ea-49f0-8812-e4abae2b68bc",
|
||||
"template_name": "string",
|
||||
"template_version_name": "string",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
|
||||
"workspace_name": "string"
|
||||
},
|
||||
"organization_id": "7c60d51f-b44e-4682-87d6-449835ea4de6",
|
||||
"queue_position": 0,
|
||||
"queue_size": 0,
|
||||
"started_at": "2019-08-24T14:15:22Z",
|
||||
"status": "pending",
|
||||
"tags": {
|
||||
"property1": "string",
|
||||
"property2": "string"
|
||||
},
|
||||
"type": "template_version_import",
|
||||
"worker_id": "ae5fa6f7-c55b-40c1-b40a-b36ac467652b",
|
||||
"worker_name": "string"
|
||||
},
|
||||
"matched_provisioners": {
|
||||
"available": 0,
|
||||
"count": 0,
|
||||
"most_recently_seen": "2019-08-24T14:15:22Z"
|
||||
},
|
||||
"max_deadline": "2019-08-24T14:15:22Z",
|
||||
"reason": "initiator",
|
||||
"resources": [
|
||||
{
|
||||
"agents": [
|
||||
{
|
||||
"api_version": "string",
|
||||
"apps": [
|
||||
{
|
||||
"command": "string",
|
||||
"display_name": "string",
|
||||
"external": true,
|
||||
"group": "string",
|
||||
"health": "disabled",
|
||||
"healthcheck": {
|
||||
"interval": 0,
|
||||
"threshold": 0,
|
||||
"url": "string"
|
||||
},
|
||||
"hidden": true,
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"open_in": "slim-window",
|
||||
"sharing_level": "owner",
|
||||
"slug": "string",
|
||||
"statuses": [
|
||||
{
|
||||
"agent_id": "2b1e3b65-2c04-4fa2-a2d7-467901e98978",
|
||||
"app_id": "affd1d10-9538-4fc8-9e0b-4594a28c1335",
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"message": "string",
|
||||
"needs_user_attention": true,
|
||||
"state": "working",
|
||||
"uri": "string",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9"
|
||||
}
|
||||
],
|
||||
"subdomain": true,
|
||||
"subdomain_name": "string",
|
||||
"tooltip": "string",
|
||||
"url": "string"
|
||||
}
|
||||
],
|
||||
"architecture": "string",
|
||||
"connection_timeout_seconds": 0,
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"directory": "string",
|
||||
"disconnected_at": "2019-08-24T14:15:22Z",
|
||||
"display_apps": [
|
||||
"vscode"
|
||||
],
|
||||
"environment_variables": {
|
||||
"property1": "string",
|
||||
"property2": "string"
|
||||
},
|
||||
"expanded_directory": "string",
|
||||
"first_connected_at": "2019-08-24T14:15:22Z",
|
||||
"health": {
|
||||
"healthy": false,
|
||||
"reason": "agent has lost connection"
|
||||
},
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"instance_id": "string",
|
||||
"last_connected_at": "2019-08-24T14:15:22Z",
|
||||
"latency": {
|
||||
"property1": {
|
||||
"latency_ms": 0,
|
||||
"preferred": true
|
||||
},
|
||||
"property2": {
|
||||
"latency_ms": 0,
|
||||
"preferred": true
|
||||
}
|
||||
},
|
||||
"lifecycle_state": "created",
|
||||
"log_sources": [
|
||||
{
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"display_name": "string",
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"workspace_agent_id": "7ad2e618-fea7-4c1a-b70a-f501566a72f1"
|
||||
}
|
||||
],
|
||||
"logs_length": 0,
|
||||
"logs_overflowed": true,
|
||||
"name": "string",
|
||||
"operating_system": "string",
|
||||
"parent_id": {
|
||||
"uuid": "string",
|
||||
"valid": true
|
||||
},
|
||||
"ready_at": "2019-08-24T14:15:22Z",
|
||||
"resource_id": "4d5215ed-38bb-48ed-879a-fdb9ca58522f",
|
||||
"scripts": [
|
||||
{
|
||||
"cron": "string",
|
||||
"display_name": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"log_path": "string",
|
||||
"log_source_id": "4197ab25-95cf-4b91-9c78-f7f2af5d353a",
|
||||
"run_on_start": true,
|
||||
"run_on_stop": true,
|
||||
"script": "string",
|
||||
"start_blocks_login": true,
|
||||
"timeout": 0
|
||||
}
|
||||
],
|
||||
"started_at": "2019-08-24T14:15:22Z",
|
||||
"startup_script_behavior": "blocking",
|
||||
"status": "connecting",
|
||||
"subsystems": [
|
||||
"envbox"
|
||||
],
|
||||
"troubleshooting_url": "string",
|
||||
"updated_at": "2019-08-24T14:15:22Z",
|
||||
"version": "string"
|
||||
}
|
||||
],
|
||||
"created_at": "2019-08-24T14:15:22Z",
|
||||
"daily_cost": 0,
|
||||
"hide": true,
|
||||
"icon": "string",
|
||||
"id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
|
||||
"job_id": "453bd7d7-5355-4d6d-a38e-d9e7eb218c3f",
|
||||
"metadata": [
|
||||
{
|
||||
"key": "string",
|
||||
"sensitive": true,
|
||||
"value": "string"
|
||||
}
|
||||
],
|
||||
"name": "string",
|
||||
"type": "string",
|
||||
"workspace_transition": "start"
|
||||
}
|
||||
],
|
||||
"status": "pending",
|
||||
"template_version_id": "0ba39c92-1f1b-4c32-aa3e-9925d7713eb1",
|
||||
"template_version_name": "string",
|
||||
"template_version_preset_id": "512a53a7-30da-446e-a1fc-713c630baff1",
|
||||
"transition": "start",
|
||||
"updated_at": "2019-08-24T14:15:22Z",
|
||||
"workspace_id": "0967198e-ec7b-4c6b-b4d3-f71244cadbe9",
|
||||
"workspace_name": "string",
|
||||
"workspace_owner_avatar_url": "string",
|
||||
"workspace_owner_id": "e7078695-5279-4c86-8774-3ac2367a2fc7",
|
||||
"workspace_owner_name": "string"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
| Name | Type | Required | Restrictions | Description |
|
||||
|-------------------|----------------------------------------------------|----------|--------------|-------------|
|
||||
| `workspace_build` | [codersdk.WorkspaceBuild](#codersdkworkspacebuild) | false | | |
|
||||
|
||||
## codersdk.RetentionConfig
|
||||
|
||||
```json
|
||||
@@ -8005,6 +8443,7 @@ Only certain features set these fields: - FeatureManagedAgentLimit|
|
||||
"created_by_id": "9377d689-01fb-4abf-8450-3368d2c1924f",
|
||||
"created_by_name": "string",
|
||||
"default_ttl_ms": 0,
|
||||
"deleted": true,
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
@@ -8046,6 +8485,7 @@ Only certain features set these fields: - FeatureManagedAgentLimit|
|
||||
| `created_by_id` | string | false | | |
|
||||
| `created_by_name` | string | false | | |
|
||||
| `default_ttl_ms` | integer | false | | |
|
||||
| `deleted` | boolean | false | | |
|
||||
| `deprecated` | boolean | false | | |
|
||||
| `deprecation_message` | string | false | | |
|
||||
| `description` | string | false | | |
|
||||
|
||||
Generated
+64
@@ -365,6 +365,70 @@ curl -X GET http://coder-server:8080/api/v2/tasks/{user}/{task}/logs \
|
||||
|
||||
To perform this operation, you must be authenticated. [Learn more](authentication.md).
|
||||
|
||||
## Pause task
|
||||
|
||||
### Code samples
|
||||
|
||||
```shell
|
||||
# Example request using curl
|
||||
curl -X POST http://coder-server:8080/api/v2/tasks/{user}/{task}/pause \
|
||||
-H 'Accept: */*' \
|
||||
-H 'Coder-Session-Token: API_KEY'
|
||||
```
|
||||
|
||||
`POST /tasks/{user}/{task}/pause`
|
||||
|
||||
### Parameters
|
||||
|
||||
| Name | In | Type | Required | Description |
|
||||
|--------|------|--------------|----------|-------------------------------------------------------|
|
||||
| `user` | path | string | true | Username, user ID, or 'me' for the authenticated user |
|
||||
| `task` | path | string(uuid) | true | Task ID |
|
||||
|
||||
### Example responses
|
||||
|
||||
> 202 Response
|
||||
|
||||
### Responses
|
||||
|
||||
| Status | Meaning | Description | Schema |
|
||||
|--------|---------------------------------------------------------------|-------------|--------------------------------------------------------------------|
|
||||
| 202 | [Accepted](https://tools.ietf.org/html/rfc7231#section-6.3.3) | Accepted | [codersdk.PauseTaskResponse](schemas.md#codersdkpausetaskresponse) |
|
||||
|
||||
To perform this operation, you must be authenticated. [Learn more](authentication.md).
|
||||
|
||||
## Resume task
|
||||
|
||||
### Code samples
|
||||
|
||||
```shell
|
||||
# Example request using curl
|
||||
curl -X POST http://coder-server:8080/api/v2/tasks/{user}/{task}/resume \
|
||||
-H 'Accept: */*' \
|
||||
-H 'Coder-Session-Token: API_KEY'
|
||||
```
|
||||
|
||||
`POST /tasks/{user}/{task}/resume`
|
||||
|
||||
### Parameters
|
||||
|
||||
| Name | In | Type | Required | Description |
|
||||
|--------|------|--------------|----------|-------------------------------------------------------|
|
||||
| `user` | path | string | true | Username, user ID, or 'me' for the authenticated user |
|
||||
| `task` | path | string(uuid) | true | Task ID |
|
||||
|
||||
### Example responses
|
||||
|
||||
> 202 Response
|
||||
|
||||
### Responses
|
||||
|
||||
| Status | Meaning | Description | Schema |
|
||||
|--------|---------------------------------------------------------------|-------------|----------------------------------------------------------------------|
|
||||
| 202 | [Accepted](https://tools.ietf.org/html/rfc7231#section-6.3.3) | Accepted | [codersdk.ResumeTaskResponse](schemas.md#codersdkresumetaskresponse) |
|
||||
|
||||
To perform this operation, you must be authenticated. [Learn more](authentication.md).
|
||||
|
||||
## Send input to AI task
|
||||
|
||||
### Code samples
|
||||
|
||||
Generated
+8
@@ -62,6 +62,7 @@ To include deprecated templates, specify `deprecated:true` in the search query.
|
||||
"created_by_id": "9377d689-01fb-4abf-8450-3368d2c1924f",
|
||||
"created_by_name": "string",
|
||||
"default_ttl_ms": 0,
|
||||
"deleted": true,
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
@@ -120,6 +121,7 @@ Restarts will only happen on weekdays in this list on weeks which line up with W
|
||||
|`» created_by_id`|string(uuid)|false|||
|
||||
|`» created_by_name`|string|false|||
|
||||
|`» default_ttl_ms`|integer|false|||
|
||||
|`» deleted`|boolean|false|||
|
||||
|`» deprecated`|boolean|false|||
|
||||
|`» deprecation_message`|string|false|||
|
||||
|`» description`|string|false|||
|
||||
@@ -246,6 +248,7 @@ curl -X POST http://coder-server:8080/api/v2/organizations/{organization}/templa
|
||||
"created_by_id": "9377d689-01fb-4abf-8450-3368d2c1924f",
|
||||
"created_by_name": "string",
|
||||
"default_ttl_ms": 0,
|
||||
"deleted": true,
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
@@ -397,6 +400,7 @@ curl -X GET http://coder-server:8080/api/v2/organizations/{organization}/templat
|
||||
"created_by_id": "9377d689-01fb-4abf-8450-3368d2c1924f",
|
||||
"created_by_name": "string",
|
||||
"default_ttl_ms": 0,
|
||||
"deleted": true,
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
@@ -814,6 +818,7 @@ To include deprecated templates, specify `deprecated:true` in the search query.
|
||||
"created_by_id": "9377d689-01fb-4abf-8450-3368d2c1924f",
|
||||
"created_by_name": "string",
|
||||
"default_ttl_ms": 0,
|
||||
"deleted": true,
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
@@ -872,6 +877,7 @@ Restarts will only happen on weekdays in this list on weeks which line up with W
|
||||
|`» created_by_id`|string(uuid)|false|||
|
||||
|`» created_by_name`|string|false|||
|
||||
|`» default_ttl_ms`|integer|false|||
|
||||
|`» deleted`|boolean|false|||
|
||||
|`» deprecated`|boolean|false|||
|
||||
|`» deprecation_message`|string|false|||
|
||||
|`» description`|string|false|||
|
||||
@@ -1016,6 +1022,7 @@ curl -X GET http://coder-server:8080/api/v2/templates/{template} \
|
||||
"created_by_id": "9377d689-01fb-4abf-8450-3368d2c1924f",
|
||||
"created_by_name": "string",
|
||||
"default_ttl_ms": 0,
|
||||
"deleted": true,
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
@@ -1189,6 +1196,7 @@ curl -X PATCH http://coder-server:8080/api/v2/templates/{template} \
|
||||
"created_by_id": "9377d689-01fb-4abf-8450-3368d2c1924f",
|
||||
"created_by_name": "string",
|
||||
"default_ttl_ms": 0,
|
||||
"deleted": true,
|
||||
"deprecated": true,
|
||||
"deprecation_message": "string",
|
||||
"description": "string",
|
||||
|
||||
Generated
+1
@@ -21,5 +21,6 @@ coder task
|
||||
| [<code>delete</code>](./task_delete.md) | Delete tasks |
|
||||
| [<code>list</code>](./task_list.md) | List tasks |
|
||||
| [<code>logs</code>](./task_logs.md) | Show a task's logs |
|
||||
| [<code>pause</code>](./task_pause.md) | Pause a task |
|
||||
| [<code>send</code>](./task_send.md) | Send input to a task |
|
||||
| [<code>status</code>](./task_status.md) | Show the status of a task. |
|
||||
|
||||
Generated
+36
@@ -0,0 +1,36 @@
|
||||
<!-- DO NOT EDIT | GENERATED CONTENT -->
|
||||
# task pause
|
||||
|
||||
Pause a task
|
||||
|
||||
## Usage
|
||||
|
||||
```console
|
||||
coder task pause [flags] <task>
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
```console
|
||||
- Pause a task by name:
|
||||
|
||||
$ coder task pause my-task
|
||||
|
||||
- Pause another user's task:
|
||||
|
||||
$ coder task pause alice/my-task
|
||||
|
||||
- Pause a task without confirmation:
|
||||
|
||||
$ coder task pause my-task --yes
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
### -y, --yes
|
||||
|
||||
| | |
|
||||
|------|-------------------|
|
||||
| Type | <code>bool</code> |
|
||||
|
||||
Bypass confirmation prompts.
|
||||
@@ -102,11 +102,7 @@ manually updated the workspace.
|
||||
|
||||
## Bulk operations
|
||||
|
||||
> [!NOTE]
|
||||
> Bulk operations are a Premium feature.
|
||||
> [Learn more](https://coder.com/pricing#compare-plans).
|
||||
|
||||
Licensed admins may apply bulk operations (update, delete, start, stop) in the
|
||||
Admins may apply bulk operations (update, delete, start, stop) in the
|
||||
**Workspaces** tab. Select the workspaces you'd like to modify with the
|
||||
checkboxes on the left, then use the top-right **Actions** dropdown to apply the
|
||||
operation.
|
||||
|
||||
@@ -111,7 +111,7 @@ module "slackme" {
|
||||
|
||||
module "dotfiles" {
|
||||
source = "dev.registry.coder.com/coder/dotfiles/coder"
|
||||
version = "1.2.3"
|
||||
version = "1.3.0"
|
||||
agent_id = coder_agent.dev.id
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# 1.86.0
|
||||
FROM rust:slim@sha256:df6ca8f96d338697ccdbe3ccac57a85d2172e03a2429c2d243e74f3bb83ba2f5 AS rust-utils
|
||||
FROM rust:slim@sha256:760ad1d638d70ebbd0c61e06210e1289cbe45ff6425e3ea6e01241de3e14d08e AS rust-utils
|
||||
# Install rust helper programs
|
||||
ENV CARGO_INSTALL_ROOT=/tmp/
|
||||
# Use more reliable mirrors for Debian packages
|
||||
|
||||
@@ -337,7 +337,7 @@ module "slackme" {
|
||||
module "dotfiles" {
|
||||
count = data.coder_workspace.me.start_count
|
||||
source = "dev.registry.coder.com/coder/dotfiles/coder"
|
||||
version = "1.2.3"
|
||||
version = "1.3.0"
|
||||
agent_id = coder_agent.dev.id
|
||||
}
|
||||
|
||||
@@ -375,7 +375,7 @@ module "personalize" {
|
||||
module "mux" {
|
||||
count = data.coder_workspace.me.start_count
|
||||
source = "registry.coder.com/coder/mux/coder"
|
||||
version = "1.0.8"
|
||||
version = "1.1.0"
|
||||
agent_id = coder_agent.dev.id
|
||||
subdomain = true
|
||||
display_name = "Mux"
|
||||
|
||||
@@ -2,8 +2,8 @@ package aibridged_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -105,10 +105,65 @@ func TestPool(t *testing.T) {
|
||||
require.EqualValues(t, 2, cacheMetrics.KeysEvicted())
|
||||
require.EqualValues(t, 1, cacheMetrics.Hits())
|
||||
require.EqualValues(t, 3, cacheMetrics.Misses())
|
||||
}
|
||||
|
||||
// TODO: add test for expiry.
|
||||
// This requires Go 1.25's [synctest](https://pkg.go.dev/testing/synctest) since the
|
||||
// internal cache lib cannot be tested using coder/quartz.
|
||||
func TestPool_Expiry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
logger := slogtest.Make(t, nil)
|
||||
ctrl := gomock.NewController(t)
|
||||
client := mock.NewMockDRPCClient(ctrl)
|
||||
mcpProxy := mcpmock.NewMockServerProxier(ctrl)
|
||||
mcpProxy.EXPECT().Init(gomock.Any()).AnyTimes().Return(nil)
|
||||
mcpProxy.EXPECT().Shutdown(gomock.Any()).AnyTimes().Return(nil)
|
||||
|
||||
const ttl = time.Second
|
||||
opts := aibridged.PoolOptions{MaxItems: 1, TTL: ttl}
|
||||
pool, err := aibridged.NewCachedBridgePool(opts, nil, logger, nil, testTracer)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { pool.Shutdown(context.Background()) })
|
||||
|
||||
req := aibridged.Request{
|
||||
SessionKey: "key",
|
||||
InitiatorID: uuid.New(),
|
||||
APIKeyID: uuid.New().String(),
|
||||
}
|
||||
clientFn := func() (aibridged.DRPCClient, error) {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
// First acquire is a cache miss.
|
||||
_, err = pool.Acquire(ctx, req, clientFn, newMockMCPFactory(mcpProxy))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second acquire is a cache hit.
|
||||
_, err = pool.Acquire(ctx, req, clientFn, newMockMCPFactory(mcpProxy))
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics := pool.CacheMetrics()
|
||||
require.EqualValues(t, 1, metrics.Misses())
|
||||
require.EqualValues(t, 1, metrics.Hits())
|
||||
|
||||
// TTL expires
|
||||
time.Sleep(ttl + time.Millisecond)
|
||||
|
||||
// Third acquire is a cache miss because the entry expired.
|
||||
_, err = pool.Acquire(ctx, req, clientFn, newMockMCPFactory(mcpProxy))
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics = pool.CacheMetrics()
|
||||
require.EqualValues(t, 2, metrics.Misses())
|
||||
require.EqualValues(t, 1, metrics.Hits())
|
||||
|
||||
// Wait for all eviction goroutines to complete before gomock's ctrl.Finish()
|
||||
// runs in test cleanup. ristretto's OnEvict callback spawns goroutines that
|
||||
// need to finish calling mcpProxy.Shutdown() before ctrl.finish clears the
|
||||
// expectations.
|
||||
synctest.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
var _ aibridged.MCPProxyBuilder = &mockMCPFactory{}
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
# AI Bridge Proxy
|
||||
|
||||
A MITM (Man-in-the-Middle) proxy server for intercepting and decrypting HTTPS requests to AI providers.
|
||||
|
||||
## Overview
|
||||
|
||||
The AI Bridge Proxy intercepts HTTPS traffic, decrypts it using a configured CA certificate, and forwards requests to AI Bridge for processing.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Certificate Setup
|
||||
|
||||
Generate a CA key pair for MITM:
|
||||
|
||||
#### 1. Generate a new private key
|
||||
|
||||
```sh
|
||||
openssl genrsa -out mitm.key 2048
|
||||
chmod 400 mitm.key
|
||||
```
|
||||
|
||||
#### 2. Create a self-signed CA certificate
|
||||
|
||||
```sh
|
||||
openssl req -new -x509 -days 365 \
|
||||
-key mitm.key \
|
||||
-out mitm.crt \
|
||||
-subj "/CN=Coder AI Bridge Proxy CA"
|
||||
```
|
||||
|
||||
### Configuration options
|
||||
|
||||
| Environment Variable | Description | Default |
|
||||
|------------------------------------|---------------------------------|---------|
|
||||
| `CODER_AIBRIDGE_PROXY_ENABLED` | Enable the AI Bridge Proxy | `false` |
|
||||
| `CODER_AIBRIDGE_PROXY_LISTEN_ADDR` | Address the proxy listens on | `:8888` |
|
||||
| `CODER_AIBRIDGE_PROXY_CERT_FILE` | Path to the CA certificate file | - |
|
||||
| `CODER_AIBRIDGE_PROXY_KEY_FILE` | Path to the CA private key file | - |
|
||||
|
||||
### Client Configuration
|
||||
|
||||
Clients must trust the proxy's CA certificate and authenticate with their Coder session token.
|
||||
|
||||
#### CA Certificate
|
||||
|
||||
Clients need to trust the MITM CA certificate:
|
||||
|
||||
```sh
|
||||
# Node.js
|
||||
export NODE_EXTRA_CA_CERTS="/path/to/mitm.crt"
|
||||
|
||||
# Python (requests, httpx)
|
||||
export REQUESTS_CA_BUNDLE="/path/to/mitm.crt"
|
||||
export SSL_CERT_FILE="/path/to/mitm.crt"
|
||||
|
||||
# Go
|
||||
export SSL_CERT_FILE="/path/to/mitm.crt"
|
||||
```
|
||||
|
||||
#### Proxy Authentication
|
||||
|
||||
Clients authenticate with the proxy using their Coder session token in the `Proxy-Authorization` header via HTTP Basic Auth.
|
||||
The token is passed as the password (username is ignored):
|
||||
|
||||
```sh
|
||||
export HTTP_PROXY="http://ignored:<coder-session-token>@<proxy-host>:<proxy-port>"
|
||||
export HTTPS_PROXY="http://ignored:<coder-session-token>@<proxy-host>:<proxy-port>"
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```sh
|
||||
export HTTP_PROXY="http://coder:${CODER_SESSION_TOKEN}@localhost:8888"
|
||||
export HTTPS_PROXY="http://coder:${CODER_SESSION_TOKEN}@localhost:8888"
|
||||
```
|
||||
|
||||
Most HTTP clients and AI SDKs will automatically use these environment variables.
|
||||
@@ -192,6 +192,41 @@ func TestEnterpriseCreate(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// Site-wide admins (Owners) can create workspaces in organizations they
|
||||
// are not a member of by using the --org flag.
|
||||
t.Run("OwnerCanCreateInNonMemberOrg", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const templateName = "ownertemplate"
|
||||
setup := setupMultipleOrganizations(t, setupArgs{
|
||||
secondTemplates: []string{templateName},
|
||||
})
|
||||
|
||||
// Create a new Owner user who is NOT a member of the second org.
|
||||
// The setup.owner created the second org and is auto-added as member,
|
||||
// so we need a different Owner to test the RBAC-only path.
|
||||
newOwner, _ := coderdtest.CreateAnotherUser(t, setup.owner, setup.firstResponse.OrganizationID, rbac.RoleOwner())
|
||||
|
||||
args := []string{
|
||||
"create",
|
||||
"owner-workspace",
|
||||
"-y",
|
||||
"--template", templateName,
|
||||
"--org", setup.second.Name,
|
||||
}
|
||||
inv, root := clitest.New(t, args...)
|
||||
clitest.SetupConfig(t, newOwner, root)
|
||||
_ = ptytest.New(t).Attach(inv)
|
||||
err := inv.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
ws, err := newOwner.WorkspaceByOwnerAndName(context.Background(), codersdk.Me, "owner-workspace", codersdk.WorkspaceOptions{})
|
||||
if assert.NoError(t, err, "expected workspace to be created") {
|
||||
assert.Equal(t, ws.TemplateName, templateName)
|
||||
assert.Equal(t, ws.OrganizationName, setup.second.Name, "workspace in second organization")
|
||||
}
|
||||
})
|
||||
|
||||
// If an organization is specified, but the template is not in that
|
||||
// organization, an error is thrown.
|
||||
t.Run("CreateIncorrectOrg", func(t *testing.T) {
|
||||
@@ -370,6 +405,7 @@ func TestEnterpriseCreateWithPreset(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
@@ -483,6 +519,7 @@ func TestEnterpriseCreateWithPreset(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestRemoveOrganizationMembers(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "must be an existing uuid or username")
|
||||
require.ErrorContains(t, err, "Resource not found or you do not have access to this resource")
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1331,6 +1331,7 @@ func (api *API) setupPrebuilds(featureEnabled bool) (agplprebuilds.Reconciliatio
|
||||
api.AGPL.BuildUsageChecker,
|
||||
api.TracerProvider,
|
||||
int(api.DeploymentValues.PostgresConnMaxOpen.Value()),
|
||||
api.AGPL.WorkspaceBuilderMetrics,
|
||||
)
|
||||
return reconciler, prebuilds.NewEnterpriseClaimer()
|
||||
}
|
||||
|
||||
@@ -174,6 +174,7 @@ func TestClaimPrebuild(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
@@ -204,6 +204,7 @@ func TestMetricsCollector(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
@@ -344,6 +345,7 @@ func TestMetricsCollector_DuplicateTemplateNames(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
@@ -500,6 +502,7 @@ func TestMetricsCollector_ReconciliationPausedMetric(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
@@ -537,6 +540,7 @@ func TestMetricsCollector_ReconciliationPausedMetric(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
@@ -574,6 +578,7 @@ func TestMetricsCollector_ReconciliationPausedMetric(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
|
||||
@@ -51,9 +51,12 @@ type StoreReconciler struct {
|
||||
buildUsageChecker *atomic.Pointer[wsbuilder.UsageChecker]
|
||||
tracer trace.Tracer
|
||||
|
||||
cancelFn context.CancelCauseFunc
|
||||
running atomic.Bool
|
||||
stopped atomic.Bool
|
||||
// mu protects the reconciler's lifecycle state.
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
stopped bool
|
||||
cancelFn context.CancelCauseFunc
|
||||
|
||||
done chan struct{}
|
||||
provisionNotifyCh chan database.ProvisionerJob
|
||||
|
||||
@@ -62,7 +65,8 @@ type StoreReconciler struct {
|
||||
// Prebuild state metrics
|
||||
metrics *MetricsCollector
|
||||
// Operational metrics
|
||||
reconciliationDuration prometheus.Histogram
|
||||
reconciliationDuration prometheus.Histogram
|
||||
workspaceBuilderMetrics *wsbuilder.Metrics
|
||||
}
|
||||
|
||||
var _ prebuilds.ReconciliationOrchestrator = &StoreReconciler{}
|
||||
@@ -96,6 +100,7 @@ func NewStoreReconciler(store database.Store,
|
||||
buildUsageChecker *atomic.Pointer[wsbuilder.UsageChecker],
|
||||
tracerProvider trace.TracerProvider,
|
||||
maxDBConnections int,
|
||||
workspaceBuilderMetrics *wsbuilder.Metrics,
|
||||
) *StoreReconciler {
|
||||
reconciliationConcurrency := calculateReconciliationConcurrency(maxDBConnections)
|
||||
|
||||
@@ -117,6 +122,7 @@ func NewStoreReconciler(store database.Store,
|
||||
done: make(chan struct{}, 1),
|
||||
provisionNotifyCh: make(chan database.ProvisionerJob, 10),
|
||||
reconciliationConcurrency: reconciliationConcurrency,
|
||||
workspaceBuilderMetrics: workspaceBuilderMetrics,
|
||||
}
|
||||
|
||||
if registerer != nil {
|
||||
@@ -174,18 +180,33 @@ func (c *StoreReconciler) Run(ctx context.Context) {
|
||||
slog.F("backoff_lookback", c.cfg.ReconciliationBackoffLookback.String()),
|
||||
slog.F("preset_concurrency", c.reconciliationConcurrency))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
// Create a child context that will be canceled when:
|
||||
// 1. The parent context is canceled, OR
|
||||
// 2. c.cancelFn() is called to trigger shutdown
|
||||
// nolint:gocritic // Reconciliation Loop needs Prebuilds Orchestrator permissions.
|
||||
ctx, cancel := context.WithCancelCause(dbauthz.AsPrebuildsOrchestrator(ctx))
|
||||
|
||||
// If the reconciler was already stopped, exit early and release the context.
|
||||
// Otherwise, mark it as running and store the cancel function for shutdown.
|
||||
c.mu.Lock()
|
||||
if c.stopped || c.running {
|
||||
c.mu.Unlock()
|
||||
cancel(nil)
|
||||
return
|
||||
}
|
||||
c.running = true
|
||||
c.cancelFn = cancel
|
||||
c.mu.Unlock()
|
||||
|
||||
ticker := c.clock.NewTicker(reconciliationInterval)
|
||||
defer ticker.Stop()
|
||||
// Wait for all background goroutines to exit before signaling completion.
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
wg.Wait()
|
||||
c.done <- struct{}{}
|
||||
}()
|
||||
|
||||
// nolint:gocritic // Reconciliation Loop needs Prebuilds Orchestrator permissions.
|
||||
ctx, cancel := context.WithCancelCause(dbauthz.AsPrebuildsOrchestrator(ctx))
|
||||
c.cancelFn = cancel
|
||||
|
||||
// Start updating metrics in the background.
|
||||
if c.metrics != nil {
|
||||
wg.Add(1)
|
||||
@@ -195,11 +216,6 @@ func (c *StoreReconciler) Run(ctx context.Context) {
|
||||
}()
|
||||
}
|
||||
|
||||
// Everything is in place, reconciler can now be considered as running.
|
||||
//
|
||||
// NOTE: without this atomic bool, Stop might race with Run for the c.cancelFn above.
|
||||
c.running.Store(true)
|
||||
|
||||
// Publish provisioning jobs outside of database transactions.
|
||||
// A connection is held while a database transaction is active; PGPubsub also tries to acquire a new connection on
|
||||
// Publish, so we can exhaust available connections.
|
||||
@@ -207,11 +223,11 @@ func (c *StoreReconciler) Run(ctx context.Context) {
|
||||
// A single worker dequeues from the channel, which should be sufficient.
|
||||
// If any messages are missed due to congestion or errors, provisionerdserver has a backup polling mechanism which
|
||||
// will periodically pick up any queued jobs (see poll(time.Duration) in coderd/provisionerdserver/acquirer.go).
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-c.done:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case job := <-c.provisionNotifyCh:
|
||||
@@ -256,21 +272,29 @@ func (c *StoreReconciler) Run(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// Stop triggers reconciler shutdown and waits for it to complete.
|
||||
// The ctx parameter provides a timeout, if cleanup doesn't finish within
|
||||
// this timeout, Stop() logs an error and returns.
|
||||
func (c *StoreReconciler) Stop(ctx context.Context, cause error) {
|
||||
defer c.running.Store(false)
|
||||
|
||||
if cause != nil {
|
||||
c.logger.Info(context.Background(), "stopping reconciler", slog.F("cause", cause.Error()))
|
||||
} else {
|
||||
c.logger.Info(context.Background(), "stopping reconciler")
|
||||
}
|
||||
|
||||
// If previously stopped (Swap returns previous value), then short-circuit.
|
||||
// Mark the reconciler as stopped. If it was already stopped, return early.
|
||||
// If the reconciler is running, we'll proceed to shut it down.
|
||||
//
|
||||
// NOTE: we need to *prospectively* mark this as stopped to prevent Stop being called multiple times and causing problems.
|
||||
if c.stopped.Swap(true) {
|
||||
// NOTE: we need to *prospectively* mark this as stopped to prevent the
|
||||
// reconciler from being stopped multiple times and causing problems.
|
||||
c.mu.Lock()
|
||||
if c.stopped {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
c.stopped = true
|
||||
running := c.running
|
||||
c.mu.Unlock()
|
||||
|
||||
// Unregister prebuilds state and operational metrics.
|
||||
if c.metrics != nil && c.registerer != nil {
|
||||
@@ -289,16 +313,18 @@ func (c *StoreReconciler) Stop(ctx context.Context, cause error) {
|
||||
}
|
||||
|
||||
// If the reconciler is not running, there's nothing else to do.
|
||||
if !c.running.Load() {
|
||||
if !running {
|
||||
return
|
||||
}
|
||||
|
||||
// Trigger reconciler shutdown by canceling its internal context.
|
||||
if c.cancelFn != nil {
|
||||
c.cancelFn(cause)
|
||||
}
|
||||
|
||||
// Wait for the reconciler to signal that it has fully exited and cleaned up.
|
||||
select {
|
||||
// Give up waiting for control loop to exit.
|
||||
// Timeout: reconciler didn't finish cleanup within the timeout period.
|
||||
case <-ctx.Done():
|
||||
// nolint:gocritic // it's okay to use slog.F() for an error in this case
|
||||
// because we want to differentiate two different types of errors: ctx.Err() and context.Cause()
|
||||
@@ -308,7 +334,7 @@ func (c *StoreReconciler) Stop(ctx context.Context, cause error) {
|
||||
slog.Error(ctx.Err()),
|
||||
slog.F("cause", context.Cause(ctx)),
|
||||
)
|
||||
// Wait for the control loop to exit.
|
||||
// Happy path: reconciler has successfully exited.
|
||||
case <-c.done:
|
||||
c.logger.Info(context.Background(), "reconciler stopped")
|
||||
}
|
||||
@@ -1029,7 +1055,8 @@ func (c *StoreReconciler) provision(
|
||||
builder := wsbuilder.New(workspace, transition, *c.buildUsageChecker.Load()).
|
||||
Reason(database.BuildReasonInitiator).
|
||||
Initiator(database.PrebuildsSystemUserID).
|
||||
MarkPrebuild()
|
||||
MarkPrebuild().
|
||||
BuildMetrics(c.workspaceBuilderMetrics)
|
||||
|
||||
if transition != database.WorkspaceTransitionDelete {
|
||||
// We don't specify the version for a delete transition,
|
||||
|
||||
@@ -61,6 +61,7 @@ func TestNoReconciliationActionsIfNoPresets(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// given a template version with no presets
|
||||
@@ -112,6 +113,7 @@ func TestNoReconciliationActionsIfNoPrebuilds(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// given there are presets, but no prebuilds
|
||||
@@ -450,6 +452,7 @@ func (tc testCase) run(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Run the reconciliation multiple times to ensure idempotency
|
||||
@@ -527,6 +530,7 @@ func TestMultiplePresetsPerTemplateVersion(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
ownerID := uuid.New()
|
||||
@@ -658,6 +662,7 @@ func TestPrebuildScheduling(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
ownerID := uuid.New()
|
||||
@@ -767,6 +772,7 @@ func TestInvalidPreset(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
ownerID := uuid.New()
|
||||
@@ -837,6 +843,7 @@ func TestDeletionOfPrebuiltWorkspaceWithInvalidPreset(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
ownerID := uuid.New()
|
||||
@@ -939,6 +946,7 @@ func TestSkippingHardLimitedPresets(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Set up test environment with a template, version, and preset.
|
||||
@@ -1090,6 +1098,7 @@ func TestHardLimitedPresetShouldNotBlockDeletion(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Set up test environment with a template, version, and preset.
|
||||
@@ -1279,9 +1288,8 @@ func TestRunLoop(t *testing.T) {
|
||||
ReconciliationBackoffInterval: serpent.Duration(backoffInterval),
|
||||
ReconciliationInterval: serpent.Duration(time.Second),
|
||||
}
|
||||
logger := slogtest.Make(
|
||||
t, &slogtest.Options{IgnoreErrors: true},
|
||||
).Leveled(slog.LevelDebug)
|
||||
// Do not ignore errors as we want a graceful stop
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: false}).Leveled(slog.LevelDebug)
|
||||
db, pubSub := dbtestutil.NewDB(t)
|
||||
cache := files.New(prometheus.NewRegistry(), &coderdtest.FakeAuthorizer{})
|
||||
reconciler := prebuilds.NewStoreReconciler(
|
||||
@@ -1292,6 +1300,7 @@ func TestRunLoop(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
ownerID := uuid.New()
|
||||
@@ -1424,6 +1433,7 @@ func TestReconcilerLifecycle(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// When: the reconciler is stopped (simulating the prebuilds feature being disabled)
|
||||
@@ -1439,6 +1449,7 @@ func TestReconcilerLifecycle(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Gracefully stop the reconciliation loop
|
||||
@@ -1472,6 +1483,7 @@ func TestFailedBuildBackoff(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Given: an active template version with presets and prebuilds configured.
|
||||
@@ -1596,6 +1608,7 @@ func TestReconciliationLock(t *testing.T) {
|
||||
newNoopEnqueuer(),
|
||||
newNoopUsageCheckerPtr(), noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
reconciler.WithReconciliationLock(ctx, logger, func(_ context.Context, _ database.Store) error {
|
||||
lockObtained := mutex.TryLock()
|
||||
@@ -1634,6 +1647,7 @@ func TestTrackResourceReplacement(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Given: a template admin to receive a notification.
|
||||
@@ -1794,6 +1808,7 @@ func TestExpiredPrebuildsMultipleActions(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Set up test environment with a template, version, and preset
|
||||
@@ -2259,6 +2274,7 @@ func TestCancelPendingPrebuilds(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
@@ -2504,6 +2520,7 @@ func TestCancelPendingPrebuilds(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
@@ -2577,6 +2594,7 @@ func TestReconciliationStats(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
@@ -3067,6 +3085,7 @@ func TestReconciliationRespectsPauseSetting(t *testing.T) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Setup a template with a preset that should create prebuilds
|
||||
@@ -3173,6 +3192,7 @@ func BenchmarkReconcileAll_NoOps(b *testing.B) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
maxOpenConns,
|
||||
nil,
|
||||
)
|
||||
|
||||
org := dbgen.Organization(b, db, database.Organization{})
|
||||
@@ -3284,6 +3304,7 @@ func BenchmarkReconcileAll_ConnectionContention(b *testing.B) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
maxOpenConns,
|
||||
nil,
|
||||
)
|
||||
|
||||
// Create presets from active template versions that need reconciliation actions
|
||||
@@ -3403,6 +3424,7 @@ func BenchmarkReconcileAll_Mix(b *testing.B) {
|
||||
newNoopUsageCheckerPtr(),
|
||||
noop.NewTracerProvider(),
|
||||
maxOpenConns,
|
||||
nil,
|
||||
)
|
||||
|
||||
org := dbgen.Organization(b, db, database.Organization{})
|
||||
|
||||
@@ -356,7 +356,7 @@ func TestGrantSiteRoles(t *testing.T) {
|
||||
AssignToUser: uuid.NewString(),
|
||||
Roles: []string{codersdk.RoleOwner},
|
||||
Error: true,
|
||||
StatusCode: http.StatusBadRequest,
|
||||
StatusCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
Name: "MemberCannotUpdateRoles",
|
||||
@@ -364,7 +364,7 @@ func TestGrantSiteRoles(t *testing.T) {
|
||||
AssignToUser: first.UserID.String(),
|
||||
Roles: []string{},
|
||||
Error: true,
|
||||
StatusCode: http.StatusBadRequest,
|
||||
StatusCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
// Cannot update your own roles
|
||||
|
||||
@@ -1991,6 +1991,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
api.AGPL.BuildUsageChecker,
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
@@ -2115,6 +2116,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
api.AGPL.BuildUsageChecker,
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
@@ -2239,6 +2241,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
api.AGPL.BuildUsageChecker,
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
@@ -2385,6 +2388,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
api.AGPL.BuildUsageChecker,
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
@@ -2532,6 +2536,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
|
||||
api.AGPL.BuildUsageChecker,
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
@@ -2979,6 +2984,7 @@ func TestWorkspaceProvisionerdServerMetrics(t *testing.T) {
|
||||
api.AGPL.BuildUsageChecker,
|
||||
noop.NewTracerProvider(),
|
||||
10,
|
||||
nil,
|
||||
)
|
||||
var claimer agplprebuilds.Claimer = prebuilds.NewEnterpriseClaimer()
|
||||
api.AGPL.PrebuildsClaimer.Store(&claimer)
|
||||
|
||||
@@ -152,7 +152,7 @@ func TestEnterpriseMembers(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Contains(t, apiErr.Message, "must be an existing")
|
||||
require.Contains(t, apiErr.Message, "Resource not found or you do not have access to this resource")
|
||||
})
|
||||
|
||||
// Calling it from a user without the org access.
|
||||
|
||||
@@ -163,7 +163,7 @@ require (
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e
|
||||
github.com/pkg/sftp v1.13.7
|
||||
github.com/prometheus-community/pro-bing v0.7.0
|
||||
github.com/prometheus-community/pro-bing v0.8.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/common v0.67.4
|
||||
@@ -198,14 +198,14 @@ require (
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546
|
||||
golang.org/x/mod v0.32.0
|
||||
golang.org/x/net v0.49.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/oauth2 v0.35.0
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/sys v0.40.0
|
||||
golang.org/x/sys v0.41.0
|
||||
golang.org/x/term v0.39.0
|
||||
golang.org/x/text v0.33.0
|
||||
golang.org/x/tools v0.41.0
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
|
||||
google.golang.org/api v0.264.0
|
||||
google.golang.org/api v0.265.0
|
||||
google.golang.org/grpc v1.78.0
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.74.0
|
||||
@@ -450,7 +450,7 @@ require (
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
howett.net/plist v1.0.0 // indirect
|
||||
kernel.org/pub/linux/libs/security/libcap/psx v1.2.77 // indirect
|
||||
@@ -473,7 +473,7 @@ require (
|
||||
github.com/anthropics/anthropic-sdk-go v1.19.0
|
||||
github.com/brianvoe/gofakeit/v7 v7.14.0
|
||||
github.com/coder/agentapi-sdk-go v0.0.0-20250505131810-560d1d88d225
|
||||
github.com/coder/aibridge v1.0.2
|
||||
github.com/coder/aibridge v1.0.3
|
||||
github.com/coder/aisdk-go v0.0.9
|
||||
github.com/coder/boundary v0.8.0
|
||||
github.com/coder/preview v1.0.4
|
||||
@@ -481,7 +481,7 @@ require (
|
||||
github.com/dgraph-io/ristretto/v2 v2.4.0
|
||||
github.com/elazarl/goproxy v1.8.0
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/go-git/go-git/v5 v5.16.2
|
||||
github.com/go-git/go-git/v5 v5.16.5
|
||||
github.com/icholy/replace v0.6.0
|
||||
github.com/mark3labs/mcp-go v0.38.0
|
||||
gonum.org/v1/gonum v0.17.0
|
||||
|
||||
@@ -927,8 +927,8 @@ github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y
|
||||
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
|
||||
github.com/coder/agentapi-sdk-go v0.0.0-20250505131810-560d1d88d225 h1:tRIViZ5JRmzdOEo5wUWngaGEFBG8OaE1o2GIHN5ujJ8=
|
||||
github.com/coder/agentapi-sdk-go v0.0.0-20250505131810-560d1d88d225/go.mod h1:rNLVpYgEVeu1Zk29K64z6Od8RBP9DwqCu9OfCzh8MR4=
|
||||
github.com/coder/aibridge v1.0.2 h1:cVPr9+TFLIzULpKPGI/1lnL14+DruedR7KnjZHklIEU=
|
||||
github.com/coder/aibridge v1.0.2/go.mod h1:c7Of2xfAksZUrPWN180Eh60fiKgzs7dyOjniTjft6AE=
|
||||
github.com/coder/aibridge v1.0.3 h1:gt3XKbnFBJ/jyls/yanU/iWZO5yhd6LVYuTQbEZ/SxQ=
|
||||
github.com/coder/aibridge v1.0.3/go.mod h1:c7Of2xfAksZUrPWN180Eh60fiKgzs7dyOjniTjft6AE=
|
||||
github.com/coder/aisdk-go v0.0.9 h1:Vzo/k2qwVGLTR10ESDeP2Ecek1SdPfZlEjtTfMveiVo=
|
||||
github.com/coder/aisdk-go v0.0.9/go.mod h1:KF6/Vkono0FJJOtWtveh5j7yfNrSctVTpwgweYWSp5M=
|
||||
github.com/coder/boundary v0.8.0 h1:g/H6VIGY4IoWeKkbvao7zhO1BAQe7upSHfHzoAZxdik=
|
||||
@@ -1149,8 +1149,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=
|
||||
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
|
||||
github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM=
|
||||
github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
|
||||
github.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s=
|
||||
github.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -1743,8 +1743,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus-community/pro-bing v0.7.0 h1:KFYFbxC2f2Fp6c+TyxbCOEarf7rbnzr9Gw8eIb0RfZA=
|
||||
github.com/prometheus-community/pro-bing v0.7.0/go.mod h1:Moob9dvlY50Bfq6i88xIwfyw7xLFHH69LUgx9n5zqCE=
|
||||
github.com/prometheus-community/pro-bing v0.8.0 h1:CEY/g1/AgERRDjxw5P32ikcOgmrSuXs7xon7ovx6mNc=
|
||||
github.com/prometheus-community/pro-bing v0.8.0/go.mod h1:Idyxz8raDO6TgkUN6ByiEGvWJNyQd40kN9ZUeho3lN0=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -2264,8 +2264,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec
|
||||
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
|
||||
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
|
||||
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
|
||||
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -2385,8 +2385,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo=
|
||||
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
|
||||
@@ -2591,8 +2591,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/
|
||||
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
|
||||
google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
|
||||
google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
|
||||
google.golang.org/api v0.264.0 h1:+Fo3DQXBK8gLdf8rFZ3uLu39JpOnhvzJrLMQSoSYZJM=
|
||||
google.golang.org/api v0.264.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8=
|
||||
google.golang.org/api v0.265.0 h1:FZvfUdI8nfmuNrE34aOWFPmLC+qRBEiNm3JdivTvAAU=
|
||||
google.golang.org/api v0.265.0/go.mod h1:uAvfEl3SLUj/7n6k+lJutcswVojHPp2Sp08jWCu8hLY=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -2737,8 +2737,8 @@ google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb
|
||||
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
||||
+13
-3
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"github.com/acarl005/stripansi"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/pty"
|
||||
@@ -78,7 +79,7 @@ func newExpecter(t *testing.T, r io.Reader, name string) outExpecter {
|
||||
ex := outExpecter{
|
||||
t: t,
|
||||
out: out,
|
||||
name: name,
|
||||
name: atomic.NewString(name),
|
||||
|
||||
runeReader: bufio.NewReaderSize(out, utf8.UTFMax),
|
||||
}
|
||||
@@ -140,7 +141,7 @@ type outExpecter struct {
|
||||
t *testing.T
|
||||
close func(reason string) error
|
||||
out *stdbuf
|
||||
name string
|
||||
name *atomic.String
|
||||
|
||||
runeReader *bufio.Reader
|
||||
}
|
||||
@@ -361,7 +362,7 @@ func (e *outExpecter) logf(format string, args ...interface{}) {
|
||||
|
||||
// Match regular logger timestamp format, we seem to be logging in
|
||||
// UTC in other places as well, so match here.
|
||||
e.t.Logf("%s: %s: %s", time.Now().UTC().Format("2006-01-02 15:04:05.000"), e.name, fmt.Sprintf(format, args...))
|
||||
e.t.Logf("%s: %s: %s", time.Now().UTC().Format("2006-01-02 15:04:05.000"), e.name.Load(), fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (e *outExpecter) fatalf(reason string, format string, args ...interface{}) {
|
||||
@@ -430,6 +431,15 @@ func (p *PTY) WriteLine(str string) {
|
||||
require.NoError(p.t, err, "write line failed")
|
||||
}
|
||||
|
||||
// Named sets the PTY name in the logs. Defaults to "cmd". Make sure you set this before anything starts writing to the
|
||||
// pty, or it may not be named consistently. E.g.
|
||||
//
|
||||
// p := New(t).Named("myCmd")
|
||||
func (p *PTY) Named(name string) *PTY {
|
||||
p.name.Store(name)
|
||||
return p
|
||||
}
|
||||
|
||||
type PTYCmd struct {
|
||||
outExpecter
|
||||
pty.PTYCmd
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
# Metrics Documentation Generator
|
||||
|
||||
This tool generates the Prometheus metrics documentation at [`docs/admin/integrations/prometheus.md`](https://coder.com/docs/admin/integrations/prometheus#available-metrics).
|
||||
|
||||
## How It Works
|
||||
|
||||
The documentation is generated from two metrics files:
|
||||
|
||||
1. `metrics` (static, manually maintained)
|
||||
2. `generated_metrics` (auto-generated, do not edit)
|
||||
|
||||
These files are merged and used to produce the final documentation.
|
||||
|
||||
### `metrics` (static)
|
||||
|
||||
Contains metrics that are **not** directly defined in the coder source code:
|
||||
|
||||
- `go_*`: Go runtime metrics
|
||||
- `process_*`: Process metrics from prometheus/client_golang
|
||||
- `promhttp_*`: Prometheus HTTP handler metrics
|
||||
- `coder_aibridged_*`: Metrics from external dependencies
|
||||
|
||||
> [!Note]
|
||||
> This file also contains edge cases where metric metadata cannot be accurately extracted by the scanner (e.g., labels determined by runtime logic).
|
||||
> Static metrics take priority over generated metrics when both files contain the same metric name.
|
||||
|
||||
**Edit this file** to add metrics that should appear in the documentation but are not scanned from the coder codebase,
|
||||
or to manually override metrics where the scanner generates incorrect metadata (e.g., missing runtime-determined labels like in `agent_scripts_executed_total`).
|
||||
|
||||
### `generated_metrics` (auto-generated)
|
||||
|
||||
Contains metrics extracted from the coder source code by the AST scanner (`scanner/scanner.go`).
|
||||
|
||||
**Do not edit this file directly.** It is regenerated by running:
|
||||
|
||||
```bash
|
||||
make scripts/metricsdocgen/generated_metrics
|
||||
```
|
||||
|
||||
## Updating Metrics Documentation
|
||||
|
||||
To regenerate the documentation after code changes:
|
||||
|
||||
```bash
|
||||
make docs/admin/integrations/prometheus.md
|
||||
```
|
||||
|
||||
This will:
|
||||
|
||||
- Run the scanner to update `generated_metrics`
|
||||
- Merge `metrics` and `generated_metrics` metric files
|
||||
- Update the documentation file
|
||||
@@ -0,0 +1,330 @@
|
||||
# HELP coder_pubsub_connected Whether we are connected (1) or not connected (0) to postgres
|
||||
# TYPE coder_pubsub_connected gauge
|
||||
coder_pubsub_connected 0
|
||||
# HELP coder_pubsub_current_events The current number of pubsub event channels listened for
|
||||
# TYPE coder_pubsub_current_events gauge
|
||||
coder_pubsub_current_events 0
|
||||
# HELP coder_pubsub_current_subscribers The current number of active pubsub subscribers
|
||||
# TYPE coder_pubsub_current_subscribers gauge
|
||||
coder_pubsub_current_subscribers 0
|
||||
# HELP coder_pubsub_disconnections_total Total number of times we disconnected unexpectedly from postgres
|
||||
# TYPE coder_pubsub_disconnections_total counter
|
||||
coder_pubsub_disconnections_total 0
|
||||
# HELP coder_pubsub_latency_measure_errs_total The number of pubsub latency measurement failures
|
||||
# TYPE coder_pubsub_latency_measure_errs_total counter
|
||||
coder_pubsub_latency_measure_errs_total 0
|
||||
# HELP coder_pubsub_latency_measures_total The number of pubsub latency measurements
|
||||
# TYPE coder_pubsub_latency_measures_total counter
|
||||
coder_pubsub_latency_measures_total 0
|
||||
# HELP coder_pubsub_messages_total Total number of messages received from postgres
|
||||
# TYPE coder_pubsub_messages_total counter
|
||||
coder_pubsub_messages_total{size=""} 0
|
||||
# HELP coder_pubsub_published_bytes_total Total number of bytes successfully published across all publishes
|
||||
# TYPE coder_pubsub_published_bytes_total counter
|
||||
coder_pubsub_published_bytes_total 0
|
||||
# HELP coder_pubsub_publishes_total Total number of calls to Publish
|
||||
# TYPE coder_pubsub_publishes_total counter
|
||||
coder_pubsub_publishes_total{success=""} 0
|
||||
# HELP coder_pubsub_receive_latency_seconds The time taken to receive a message from a pubsub event channel
|
||||
# TYPE coder_pubsub_receive_latency_seconds gauge
|
||||
coder_pubsub_receive_latency_seconds 0
|
||||
# HELP coder_pubsub_received_bytes_total Total number of bytes received across all messages
|
||||
# TYPE coder_pubsub_received_bytes_total counter
|
||||
coder_pubsub_received_bytes_total 0
|
||||
# HELP coder_pubsub_send_latency_seconds The time taken to send a message into a pubsub event channel
|
||||
# TYPE coder_pubsub_send_latency_seconds gauge
|
||||
coder_pubsub_send_latency_seconds 0
|
||||
# HELP coder_pubsub_subscribes_total Total number of calls to Subscribe/SubscribeWithErr
|
||||
# TYPE coder_pubsub_subscribes_total counter
|
||||
coder_pubsub_subscribes_total{success=""} 0
|
||||
# HELP coder_servertailnet_connections_total Total number of TCP connections made to workspace agents.
|
||||
# TYPE coder_servertailnet_connections_total counter
|
||||
coder_servertailnet_connections_total{network=""} 0
|
||||
# HELP coder_servertailnet_open_connections Total number of TCP connections currently open to workspace agents.
|
||||
# TYPE coder_servertailnet_open_connections gauge
|
||||
coder_servertailnet_open_connections{network=""} 0
|
||||
# HELP coderd_agentapi_metadata_batch_size Total number of metadata entries in each batch, updated before flushes.
|
||||
# TYPE coderd_agentapi_metadata_batch_size histogram
|
||||
coderd_agentapi_metadata_batch_size 0
|
||||
# HELP coderd_agentapi_metadata_batch_utilization Number of metadata keys per agent in each batch, updated before flushes.
|
||||
# TYPE coderd_agentapi_metadata_batch_utilization histogram
|
||||
coderd_agentapi_metadata_batch_utilization 0
|
||||
# HELP coderd_agentapi_metadata_batches_total Total number of metadata batches flushed.
|
||||
# TYPE coderd_agentapi_metadata_batches_total counter
|
||||
coderd_agentapi_metadata_batches_total{reason=""} 0
|
||||
# HELP coderd_agentapi_metadata_dropped_keys_total Total number of metadata keys dropped due to capacity limits.
|
||||
# TYPE coderd_agentapi_metadata_dropped_keys_total counter
|
||||
coderd_agentapi_metadata_dropped_keys_total 0
|
||||
# HELP coderd_agentapi_metadata_flush_duration_seconds Time taken to flush metadata batch to database and pubsub.
|
||||
# TYPE coderd_agentapi_metadata_flush_duration_seconds histogram
|
||||
coderd_agentapi_metadata_flush_duration_seconds{reason=""} 0
|
||||
# HELP coderd_agentapi_metadata_flushed_total Total number of unique metadatas flushed.
|
||||
# TYPE coderd_agentapi_metadata_flushed_total counter
|
||||
coderd_agentapi_metadata_flushed_total 0
|
||||
# HELP coderd_agentapi_metadata_publish_errors_total Total number of metadata batch pubsub publish calls that have resulted in an error.
|
||||
# TYPE coderd_agentapi_metadata_publish_errors_total counter
|
||||
coderd_agentapi_metadata_publish_errors_total 0
|
||||
# HELP coderd_agents_apps Agent applications with statuses.
|
||||
# TYPE coderd_agents_apps gauge
|
||||
coderd_agents_apps{agent_name="",username="",workspace_name="",app_name="",health=""} 0
|
||||
# HELP coderd_agents_connection_latencies_seconds Agent connection latencies in seconds.
|
||||
# TYPE coderd_agents_connection_latencies_seconds gauge
|
||||
coderd_agents_connection_latencies_seconds{agent_name="",username="",workspace_name="",derp_region="",preferred=""} 0
|
||||
# HELP coderd_agents_connections Agent connections with statuses.
|
||||
# TYPE coderd_agents_connections gauge
|
||||
coderd_agents_connections{agent_name="",username="",workspace_name="",status="",lifecycle_state="",tailnet_node=""} 0
|
||||
# HELP coderd_agents_up The number of active agents per workspace.
|
||||
# TYPE coderd_agents_up gauge
|
||||
coderd_agents_up{username="",workspace_name="",template_name="",template_version=""} 0
|
||||
# HELP coderd_agentstats_connection_count The number of established connections by agent
|
||||
# TYPE coderd_agentstats_connection_count gauge
|
||||
coderd_agentstats_connection_count 0
|
||||
# HELP coderd_agentstats_connection_median_latency_seconds The median agent connection latency in seconds
|
||||
# TYPE coderd_agentstats_connection_median_latency_seconds gauge
|
||||
coderd_agentstats_connection_median_latency_seconds 0
|
||||
# HELP coderd_agentstats_currently_reachable_peers The number of peers (e.g. clients) that are currently reachable over the encrypted network.
|
||||
# TYPE coderd_agentstats_currently_reachable_peers gauge
|
||||
coderd_agentstats_currently_reachable_peers{connection_type=""} 0
|
||||
# HELP coderd_agentstats_rx_bytes Agent Rx bytes
|
||||
# TYPE coderd_agentstats_rx_bytes gauge
|
||||
coderd_agentstats_rx_bytes 0
|
||||
# HELP coderd_agentstats_session_count_jetbrains The number of session established by JetBrains
|
||||
# TYPE coderd_agentstats_session_count_jetbrains gauge
|
||||
coderd_agentstats_session_count_jetbrains 0
|
||||
# HELP coderd_agentstats_session_count_reconnecting_pty The number of session established by reconnecting PTY
|
||||
# TYPE coderd_agentstats_session_count_reconnecting_pty gauge
|
||||
coderd_agentstats_session_count_reconnecting_pty 0
|
||||
# HELP coderd_agentstats_session_count_ssh The number of session established by SSH
|
||||
# TYPE coderd_agentstats_session_count_ssh gauge
|
||||
coderd_agentstats_session_count_ssh 0
|
||||
# HELP coderd_agentstats_session_count_vscode The number of session established by VSCode
|
||||
# TYPE coderd_agentstats_session_count_vscode gauge
|
||||
coderd_agentstats_session_count_vscode 0
|
||||
# HELP coderd_agentstats_startup_script_seconds Amount of time taken to run the startup script in seconds.
|
||||
# TYPE coderd_agentstats_startup_script_seconds gauge
|
||||
coderd_agentstats_startup_script_seconds{success=""} 0
|
||||
# HELP coderd_agentstats_tx_bytes Agent Tx bytes
|
||||
# TYPE coderd_agentstats_tx_bytes gauge
|
||||
coderd_agentstats_tx_bytes 0
|
||||
# HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour.
|
||||
# TYPE coderd_api_active_users_duration_hour gauge
|
||||
coderd_api_active_users_duration_hour 0
|
||||
# HELP coderd_api_concurrent_requests The number of concurrent API requests.
|
||||
# TYPE coderd_api_concurrent_requests gauge
|
||||
coderd_api_concurrent_requests{method="",path=""} 0
|
||||
# HELP coderd_api_concurrent_websockets The total number of concurrent API websockets.
|
||||
# TYPE coderd_api_concurrent_websockets gauge
|
||||
coderd_api_concurrent_websockets{path=""} 0
|
||||
# HELP coderd_api_request_latencies_seconds Latency distribution of requests in seconds.
|
||||
# TYPE coderd_api_request_latencies_seconds histogram
|
||||
coderd_api_request_latencies_seconds{method="",path=""} 0
|
||||
# HELP coderd_api_requests_processed_total The total number of processed API requests
|
||||
# TYPE coderd_api_requests_processed_total counter
|
||||
coderd_api_requests_processed_total{code="",method="",path=""} 0
|
||||
# HELP coderd_api_total_user_count The total number of registered users, partitioned by status.
|
||||
# TYPE coderd_api_total_user_count gauge
|
||||
coderd_api_total_user_count{status=""} 0
|
||||
# HELP coderd_api_websocket_durations_seconds Websocket duration distribution of requests in seconds.
|
||||
# TYPE coderd_api_websocket_durations_seconds histogram
|
||||
coderd_api_websocket_durations_seconds{path=""} 0
|
||||
# HELP coderd_api_workspace_latest_build The current number of workspace builds by status for all non-deleted workspaces.
|
||||
# TYPE coderd_api_workspace_latest_build gauge
|
||||
coderd_api_workspace_latest_build{status=""} 0
|
||||
# HELP coderd_authz_authorize_duration_seconds Duration of the 'Authorize' call in seconds. Only counts calls that succeed.
|
||||
# TYPE coderd_authz_authorize_duration_seconds histogram
|
||||
coderd_authz_authorize_duration_seconds{allowed=""} 0
|
||||
# HELP coderd_authz_prepare_authorize_duration_seconds Duration of the 'PrepareAuthorize' call in seconds.
|
||||
# TYPE coderd_authz_prepare_authorize_duration_seconds histogram
|
||||
coderd_authz_prepare_authorize_duration_seconds 0
|
||||
# HELP coderd_db_query_counts_total Total number of queries labelled by HTTP route, method, and query name.
|
||||
# TYPE coderd_db_query_counts_total counter
|
||||
coderd_db_query_counts_total{route="",method="",query=""} 0
|
||||
# HELP coderd_db_query_latencies_seconds Latency distribution of queries in seconds.
|
||||
# TYPE coderd_db_query_latencies_seconds histogram
|
||||
coderd_db_query_latencies_seconds{query=""} 0
|
||||
# HELP coderd_db_tx_duration_seconds Duration of transactions in seconds.
|
||||
# TYPE coderd_db_tx_duration_seconds histogram
|
||||
coderd_db_tx_duration_seconds{success="",tx_id=""} 0
|
||||
# HELP coderd_db_tx_executions_count Total count of transactions executed. 'retries' is expected to be 0 for a successful transaction.
|
||||
# TYPE coderd_db_tx_executions_count counter
|
||||
coderd_db_tx_executions_count{success="",retries="",tx_id=""} 0
|
||||
# HELP coderd_dbpurge_iteration_duration_seconds Duration of each dbpurge iteration in seconds.
|
||||
# TYPE coderd_dbpurge_iteration_duration_seconds histogram
|
||||
coderd_dbpurge_iteration_duration_seconds{success=""} 0
|
||||
# HELP coderd_dbpurge_records_purged_total Total number of records purged by type.
|
||||
# TYPE coderd_dbpurge_records_purged_total counter
|
||||
coderd_dbpurge_records_purged_total{record_type=""} 0
|
||||
# HELP coderd_experiments Indicates whether each experiment is enabled (1) or not (0)
|
||||
# TYPE coderd_experiments gauge
|
||||
coderd_experiments{experiment=""} 0
|
||||
# HELP coderd_insights_applications_usage_seconds The application usage per template.
|
||||
# TYPE coderd_insights_applications_usage_seconds gauge
|
||||
coderd_insights_applications_usage_seconds{template_name="",application_name="",slug=""} 0
|
||||
# HELP coderd_insights_parameters The parameter usage per template.
|
||||
# TYPE coderd_insights_parameters gauge
|
||||
coderd_insights_parameters{template_name="",parameter_name="",parameter_type="",parameter_value=""} 0
|
||||
# HELP coderd_insights_templates_active_users The number of active users of the template.
|
||||
# TYPE coderd_insights_templates_active_users gauge
|
||||
coderd_insights_templates_active_users{template_name=""} 0
|
||||
# HELP coderd_license_active_users The number of active users.
|
||||
# TYPE coderd_license_active_users gauge
|
||||
coderd_license_active_users 0
|
||||
# HELP coderd_license_errors The number of active license errors.
|
||||
# TYPE coderd_license_errors gauge
|
||||
coderd_license_errors 0
|
||||
# HELP coderd_license_limit_users The user seats limit based on the active Coder license.
|
||||
# TYPE coderd_license_limit_users gauge
|
||||
coderd_license_limit_users 0
|
||||
# HELP coderd_license_user_limit_enabled Returns 1 if the current license enforces the user limit.
|
||||
# TYPE coderd_license_user_limit_enabled gauge
|
||||
coderd_license_user_limit_enabled 0
|
||||
# HELP coderd_license_warnings The number of active license warnings.
|
||||
# TYPE coderd_license_warnings gauge
|
||||
coderd_license_warnings 0
|
||||
# HELP coderd_lifecycle_autobuild_execution_duration_seconds Duration of each autobuild execution.
|
||||
# TYPE coderd_lifecycle_autobuild_execution_duration_seconds histogram
|
||||
coderd_lifecycle_autobuild_execution_duration_seconds 0
|
||||
# HELP coderd_notifications_dispatcher_send_seconds The time taken to dispatch notifications.
|
||||
# TYPE coderd_notifications_dispatcher_send_seconds histogram
|
||||
coderd_notifications_dispatcher_send_seconds{method=""} 0
|
||||
# HELP coderd_notifications_inflight_dispatches The number of dispatch attempts which are currently in progress.
|
||||
# TYPE coderd_notifications_inflight_dispatches gauge
|
||||
coderd_notifications_inflight_dispatches{method="",notification_template_id=""} 0
|
||||
# HELP coderd_notifications_pending_updates The number of dispatch attempt results waiting to be flushed to the store.
|
||||
# TYPE coderd_notifications_pending_updates gauge
|
||||
coderd_notifications_pending_updates 0
|
||||
# HELP coderd_notifications_queued_seconds The time elapsed between a notification being enqueued in the store and retrieved for dispatching (measures the latency of the notifications system). This should generally be within CODER_NOTIFICATIONS_FETCH_INTERVAL seconds; higher values for a sustained period indicates delayed processing and CODER_NOTIFICATIONS_LEASE_COUNT can be increased to accommodate this.
|
||||
# TYPE coderd_notifications_queued_seconds histogram
|
||||
coderd_notifications_queued_seconds{method=""} 0
|
||||
# HELP coderd_notifications_retry_count The count of notification dispatch retry attempts.
|
||||
# TYPE coderd_notifications_retry_count counter
|
||||
coderd_notifications_retry_count{method="",notification_template_id=""} 0
|
||||
# HELP coderd_notifications_synced_updates_total The number of dispatch attempt results flushed to the store.
|
||||
# TYPE coderd_notifications_synced_updates_total counter
|
||||
coderd_notifications_synced_updates_total 0
|
||||
# HELP coderd_oauth2_external_requests_rate_limit The total number of allowed requests per interval.
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit gauge
|
||||
coderd_oauth2_external_requests_rate_limit{name="",resource=""} 0
|
||||
# HELP coderd_oauth2_external_requests_rate_limit_next_reset_unix Unix timestamp for when the next interval starts
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit_next_reset_unix gauge
|
||||
coderd_oauth2_external_requests_rate_limit_next_reset_unix{name="",resource=""} 0
|
||||
# HELP coderd_oauth2_external_requests_rate_limit_remaining The remaining number of allowed requests in this interval.
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit_remaining gauge
|
||||
coderd_oauth2_external_requests_rate_limit_remaining{name="",resource=""} 0
|
||||
# HELP coderd_oauth2_external_requests_rate_limit_reset_in_seconds Seconds until the next interval
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit_reset_in_seconds gauge
|
||||
coderd_oauth2_external_requests_rate_limit_reset_in_seconds{name="",resource=""} 0
|
||||
# HELP coderd_oauth2_external_requests_rate_limit_used The number of requests made in this interval.
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit_used gauge
|
||||
coderd_oauth2_external_requests_rate_limit_used{name="",resource=""} 0
|
||||
# HELP coderd_oauth2_external_requests_total The total number of api calls made to external oauth2 providers. 'status_code' will be 0 if the request failed with no response.
|
||||
# TYPE coderd_oauth2_external_requests_total counter
|
||||
coderd_oauth2_external_requests_total{name="",source="",status_code=""} 0
|
||||
# HELP coderd_open_file_refs_current The count of file references currently open in the file cache. Multiple references can be held for the same file.
|
||||
# TYPE coderd_open_file_refs_current gauge
|
||||
coderd_open_file_refs_current 0
|
||||
# HELP coderd_open_file_refs_total The total number of file references ever opened in the file cache. The 'hit' label indicates if the file was loaded from the cache.
|
||||
# TYPE coderd_open_file_refs_total counter
|
||||
coderd_open_file_refs_total{hit=""} 0
|
||||
# HELP coderd_open_files_current The count of unique files currently open in the file cache.
|
||||
# TYPE coderd_open_files_current gauge
|
||||
coderd_open_files_current 0
|
||||
# HELP coderd_open_files_size_bytes_current The current amount of memory of all files currently open in the file cache.
|
||||
# TYPE coderd_open_files_size_bytes_current gauge
|
||||
coderd_open_files_size_bytes_current 0
|
||||
# HELP coderd_open_files_size_bytes_total The total amount of memory ever opened in the file cache. This number never decrements.
|
||||
# TYPE coderd_open_files_size_bytes_total counter
|
||||
coderd_open_files_size_bytes_total 0
|
||||
# HELP coderd_open_files_total The total count of unique files ever opened in the file cache.
|
||||
# TYPE coderd_open_files_total counter
|
||||
coderd_open_files_total 0
|
||||
# HELP coderd_prebuilds_reconciliation_duration_seconds Duration of each prebuilds reconciliation cycle.
|
||||
# TYPE coderd_prebuilds_reconciliation_duration_seconds histogram
|
||||
coderd_prebuilds_reconciliation_duration_seconds 0
|
||||
# HELP coderd_prebuilt_workspace_claim_duration_seconds Time to claim a prebuilt workspace by organization, template, and preset.
|
||||
# TYPE coderd_prebuilt_workspace_claim_duration_seconds histogram
|
||||
coderd_prebuilt_workspace_claim_duration_seconds{organization_name="",template_name="",preset_name=""} 0
|
||||
# HELP coderd_prebuilt_workspaces_claimed_total Total number of prebuilt workspaces which were claimed by users. Claiming refers to creating a workspace with a preset selected for which eligible prebuilt workspaces are available and one is reassigned to a user.
|
||||
# TYPE coderd_prebuilt_workspaces_claimed_total counter
|
||||
coderd_prebuilt_workspaces_claimed_total{template_name="",preset_name="",organization_name=""} 0
|
||||
# HELP coderd_prebuilt_workspaces_created_total Total number of prebuilt workspaces that have been created to meet the desired instance count of each template preset.
|
||||
# TYPE coderd_prebuilt_workspaces_created_total counter
|
||||
coderd_prebuilt_workspaces_created_total{template_name="",preset_name="",organization_name=""} 0
|
||||
# HELP coderd_prebuilt_workspaces_desired Target number of prebuilt workspaces that should be available for each template preset.
|
||||
# TYPE coderd_prebuilt_workspaces_desired gauge
|
||||
coderd_prebuilt_workspaces_desired{template_name="",preset_name="",organization_name=""} 0
|
||||
# HELP coderd_prebuilt_workspaces_eligible Current number of prebuilt workspaces that are eligible to be claimed by users. These are workspaces that have completed their build process with their agent reporting 'ready' status.
|
||||
# TYPE coderd_prebuilt_workspaces_eligible gauge
|
||||
coderd_prebuilt_workspaces_eligible{template_name="",preset_name="",organization_name=""} 0
|
||||
# HELP coderd_prebuilt_workspaces_failed_total Total number of prebuilt workspaces that failed to build.
|
||||
# TYPE coderd_prebuilt_workspaces_failed_total counter
|
||||
coderd_prebuilt_workspaces_failed_total{template_name="",preset_name="",organization_name=""} 0
|
||||
# HELP coderd_prebuilt_workspaces_metrics_last_updated The unix timestamp when the metrics related to prebuilt workspaces were last updated; these metrics are cached.
|
||||
# TYPE coderd_prebuilt_workspaces_metrics_last_updated gauge
|
||||
coderd_prebuilt_workspaces_metrics_last_updated 0
|
||||
# HELP coderd_prebuilt_workspaces_preset_hard_limited Indicates whether a given preset has reached the hard failure limit (1 = hard-limited). Metric is omitted otherwise.
|
||||
# TYPE coderd_prebuilt_workspaces_preset_hard_limited gauge
|
||||
coderd_prebuilt_workspaces_preset_hard_limited{template_name="",preset_name="",organization_name=""} 0
|
||||
# HELP coderd_prebuilt_workspaces_reconciliation_paused Indicates whether prebuilds reconciliation is currently paused (1 = paused, 0 = not paused).
|
||||
# TYPE coderd_prebuilt_workspaces_reconciliation_paused gauge
|
||||
coderd_prebuilt_workspaces_reconciliation_paused 0
|
||||
# HELP coderd_prebuilt_workspaces_resource_replacements_total Total number of prebuilt workspaces whose resource(s) got replaced upon being claimed. In Terraform, drift on immutable attributes results in resource replacement. This represents a worst-case scenario for prebuilt workspaces because the pre-provisioned resource would have been recreated when claiming, thus obviating the point of pre-provisioning. See https://coder.com/docs/admin/templates/extending-templates/prebuilt-workspaces#preventing-resource-replacement
|
||||
# TYPE coderd_prebuilt_workspaces_resource_replacements_total counter
|
||||
coderd_prebuilt_workspaces_resource_replacements_total{template_name="",preset_name="",organization_name=""} 0
|
||||
# HELP coderd_prebuilt_workspaces_running Current number of prebuilt workspaces that are in a running state. These workspaces have started successfully but may not yet be claimable by users (see coderd_prebuilt_workspaces_eligible).
|
||||
# TYPE coderd_prebuilt_workspaces_running gauge
|
||||
coderd_prebuilt_workspaces_running{template_name="",preset_name="",organization_name=""} 0
|
||||
# HELP coderd_prometheusmetrics_agents_execution_seconds Histogram for duration of agents metrics collection in seconds.
|
||||
# TYPE coderd_prometheusmetrics_agents_execution_seconds histogram
|
||||
coderd_prometheusmetrics_agents_execution_seconds 0
|
||||
# HELP coderd_prometheusmetrics_agentstats_execution_seconds Histogram for duration of agent stats metrics collection in seconds.
|
||||
# TYPE coderd_prometheusmetrics_agentstats_execution_seconds histogram
|
||||
coderd_prometheusmetrics_agentstats_execution_seconds 0
|
||||
# HELP coderd_prometheusmetrics_metrics_aggregator_execution_cleanup_seconds Histogram for duration of metrics aggregator cleanup in seconds.
|
||||
# TYPE coderd_prometheusmetrics_metrics_aggregator_execution_cleanup_seconds histogram
|
||||
coderd_prometheusmetrics_metrics_aggregator_execution_cleanup_seconds 0
|
||||
# HELP coderd_prometheusmetrics_metrics_aggregator_execution_update_seconds Histogram for duration of metrics aggregator update in seconds.
|
||||
# TYPE coderd_prometheusmetrics_metrics_aggregator_execution_update_seconds histogram
|
||||
coderd_prometheusmetrics_metrics_aggregator_execution_update_seconds 0
|
||||
# HELP coderd_prometheusmetrics_metrics_aggregator_store_size The number of metrics stored in the aggregator
|
||||
# TYPE coderd_prometheusmetrics_metrics_aggregator_store_size gauge
|
||||
coderd_prometheusmetrics_metrics_aggregator_store_size 0
|
||||
# HELP coderd_provisioner_job_queue_wait_seconds Time from job creation to acquisition by a provisioner daemon.
|
||||
# TYPE coderd_provisioner_job_queue_wait_seconds histogram
|
||||
coderd_provisioner_job_queue_wait_seconds{provisioner_type="",job_type="",transition="",build_reason=""} 0
|
||||
# HELP coderd_provisionerd_job_timings_seconds The provisioner job time duration in seconds.
|
||||
# TYPE coderd_provisionerd_job_timings_seconds histogram
|
||||
coderd_provisionerd_job_timings_seconds{provisioner="",status=""} 0
|
||||
# HELP coderd_provisionerd_jobs_current The number of currently running provisioner jobs.
|
||||
# TYPE coderd_provisionerd_jobs_current gauge
|
||||
coderd_provisionerd_jobs_current{provisioner=""} 0
|
||||
# HELP coderd_provisionerd_num_daemons The number of provisioner daemons.
|
||||
# TYPE coderd_provisionerd_num_daemons gauge
|
||||
coderd_provisionerd_num_daemons 0
|
||||
# HELP coderd_provisionerd_workspace_build_timings_seconds The time taken for a workspace to build.
|
||||
# TYPE coderd_provisionerd_workspace_build_timings_seconds histogram
|
||||
coderd_provisionerd_workspace_build_timings_seconds{template_name="",template_version="",workspace_transition="",status=""} 0
|
||||
# HELP coderd_proxyhealth_health_check_duration_seconds Histogram for duration of proxy health collection in seconds.
|
||||
# TYPE coderd_proxyhealth_health_check_duration_seconds histogram
|
||||
coderd_proxyhealth_health_check_duration_seconds 0
|
||||
# HELP coderd_proxyhealth_health_check_results This endpoint returns a number to indicate the health status. -3 (unknown), -2 (Unreachable), -1 (Unhealthy), 0 (Unregistered), 1 (Healthy)
|
||||
# TYPE coderd_proxyhealth_health_check_results gauge
|
||||
coderd_proxyhealth_health_check_results{proxy_id=""} 0
|
||||
# HELP coderd_template_workspace_build_duration_seconds Duration from workspace build creation to agent ready, by template.
|
||||
# TYPE coderd_template_workspace_build_duration_seconds histogram
|
||||
coderd_template_workspace_build_duration_seconds{template_name="",organization_name="",transition="",status="",is_prebuild=""} 0
|
||||
# HELP coderd_workspace_builds_enqueued_total Total number of workspace build enqueue attempts.
|
||||
# TYPE coderd_workspace_builds_enqueued_total counter
|
||||
coderd_workspace_builds_enqueued_total{provisioner_type="",build_reason="",transition="",status=""} 0
|
||||
# HELP coderd_workspace_builds_total The number of workspaces started, updated, or deleted.
|
||||
# TYPE coderd_workspace_builds_total counter
|
||||
coderd_workspace_builds_total{workspace_owner="",workspace_name="",template_name="",template_version="",workspace_transition="",status=""} 0
|
||||
# HELP coderd_workspace_creation_duration_seconds Time to create a workspace by organization, template, preset, and type (regular or prebuild).
|
||||
# TYPE coderd_workspace_creation_duration_seconds histogram
|
||||
coderd_workspace_creation_duration_seconds{organization_name="",template_name="",preset_name="",type=""} 0
|
||||
# HELP coderd_workspace_creation_total Total regular (non-prebuilt) workspace creations by organization, template, and preset.
|
||||
# TYPE coderd_workspace_creation_total counter
|
||||
coderd_workspace_creation_total{organization_name="",template_name="",preset_name=""} 0
|
||||
# HELP coderd_workspace_latest_build_status The current workspace statuses by template, transition, and owner for all non-deleted workspaces.
|
||||
# TYPE coderd_workspace_latest_build_status gauge
|
||||
coderd_workspace_latest_build_status{status="",template_name="",template_version="",workspace_owner="",workspace_transition=""} 0
|
||||
@@ -16,21 +16,23 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
metricsFile string
|
||||
prometheusDocFile string
|
||||
dryRun bool
|
||||
staticMetricsFile string
|
||||
prometheusDocFile string
|
||||
generatedMetricsFile string
|
||||
dryRun bool
|
||||
|
||||
generatorPrefix = []byte("<!-- Code generated by 'make docs/admin/integrations/prometheus.md'. DO NOT EDIT -->")
|
||||
generatorSuffix = []byte("<!-- End generated by 'make docs/admin/integrations/prometheus.md'. -->")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&metricsFile, "metrics-file", "scripts/metricsdocgen/metrics", "Path to Prometheus metrics file")
|
||||
flag.StringVar(&staticMetricsFile, "static-metrics", "scripts/metricsdocgen/metrics", "Path to static metrics file (manually maintained)")
|
||||
flag.StringVar(&generatedMetricsFile, "generated-metrics", "scripts/metricsdocgen/generated_metrics", "Path to generated metrics file (from scanner)")
|
||||
flag.StringVar(&prometheusDocFile, "prometheus-doc-file", "docs/admin/integrations/prometheus.md", "Path to Prometheus doc file")
|
||||
flag.BoolVar(&dryRun, "dry-run", false, "Dry run")
|
||||
flag.Parse()
|
||||
|
||||
metrics, err := readMetrics()
|
||||
metrics, err := readAndMergeMetrics()
|
||||
if err != nil {
|
||||
log.Fatal("can't read metrics: ", err)
|
||||
}
|
||||
@@ -56,11 +58,13 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func readMetrics() ([]*dto.MetricFamily, error) {
|
||||
f, err := os.Open(metricsFile)
|
||||
// readMetricsFromFile reads metrics from a single Prometheus text format file.
|
||||
func readMetricsFromFile(path string) ([]*dto.MetricFamily, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.New("can't open metrics file")
|
||||
return nil, xerrors.Errorf("can't open metrics file %s: %w", path, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var metrics []*dto.MetricFamily
|
||||
|
||||
@@ -71,14 +75,55 @@ func readMetrics() ([]*dto.MetricFamily, error) {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("decoding metrics from %s: %w", path, err)
|
||||
}
|
||||
metrics = append(metrics, &m)
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// readAndMergeMetrics reads metrics from both generated and static files,
|
||||
// merges them, and returns a sorted list. Generated metrics are produced
|
||||
// by the AST scanner that extracts metric definitions from the coder source
|
||||
// code while static metrics are manually maintained (e.g., go_*, process_*,
|
||||
// external dependencies).
|
||||
// Note: Static metrics take priority over generated metrics, allowing manual
|
||||
// overrides for metrics that can't be accurately extracted by the scanner.
|
||||
func readAndMergeMetrics() ([]*dto.MetricFamily, error) {
|
||||
generatedMetrics, err := readMetricsFromFile(generatedMetricsFile)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("reading generated metrics: %w", err)
|
||||
}
|
||||
|
||||
staticMetrics, err := readMetricsFromFile(staticMetricsFile)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("reading static metrics: %w", err)
|
||||
}
|
||||
|
||||
// Merge metrics, using a map to deduplicate by name.
|
||||
metricsByName := make(map[string]*dto.MetricFamily)
|
||||
|
||||
// Add generated metrics first.
|
||||
for _, m := range generatedMetrics {
|
||||
metricsByName[*m.Name] = m
|
||||
}
|
||||
|
||||
// Static metrics overwrite generated metrics if they exist.
|
||||
for _, m := range staticMetrics {
|
||||
metricsByName[*m.Name] = m
|
||||
}
|
||||
|
||||
// Convert back to slice and sort.
|
||||
var metrics []*dto.MetricFamily
|
||||
for _, m := range metricsByName {
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
sort.Slice(metrics, func(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{*metrics[i].Name, *metrics[j].Name})
|
||||
return *metrics[i].Name < *metrics[j].Name
|
||||
})
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
|
||||
+27
-809
@@ -1,58 +1,9 @@
|
||||
# HELP coderd_oauth2_external_requests_rate_limit_next_reset_unix Unix timestamp of the next interval
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit_next_reset_unix gauge
|
||||
coderd_oauth2_external_requests_rate_limit_next_reset_unix{name="primary-github",resource="core"} 1.704835507e+09
|
||||
coderd_oauth2_external_requests_rate_limit_next_reset_unix{name="secondary-github",resource="core"} 1.704835507e+09
|
||||
# HELP coderd_oauth2_external_requests_rate_limit_remaining The remaining number of allowed requests in this interval.
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit_remaining gauge
|
||||
coderd_oauth2_external_requests_rate_limit_remaining{name="primary-github",resource="core"} 4852
|
||||
coderd_oauth2_external_requests_rate_limit_remaining{name="secondary-github",resource="core"} 4867
|
||||
# HELP coderd_oauth2_external_requests_rate_limit_reset_in_seconds Seconds until the next interval
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit_reset_in_seconds gauge
|
||||
coderd_oauth2_external_requests_rate_limit_reset_in_seconds{name="primary-github",resource="core"} 63.617162731
|
||||
coderd_oauth2_external_requests_rate_limit_reset_in_seconds{name="secondary-github",resource="core"} 121.82186601
|
||||
# HELP coderd_oauth2_external_requests_rate_limit The total number of allowed requests per interval.
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit gauge
|
||||
coderd_oauth2_external_requests_rate_limit{name="primary-github",resource="core-unauthorized"} 5000
|
||||
coderd_oauth2_external_requests_rate_limit{name="secondary-github",resource="core-unauthorized"} 5000
|
||||
# HELP coderd_oauth2_external_requests_rate_limit_used The number of requests made in this interval.
|
||||
# TYPE coderd_oauth2_external_requests_rate_limit_used gauge
|
||||
coderd_oauth2_external_requests_rate_limit_used{name="primary-github",resource="core"} 148
|
||||
coderd_oauth2_external_requests_rate_limit_used{name="secondary-github",resource="core"} 133
|
||||
# HELP coderd_oauth2_external_requests_total The total number of api calls made to external oauth2 providers. 'status_code' will be 0 if the request failed with no response.
|
||||
# TYPE coderd_oauth2_external_requests_total counter
|
||||
coderd_oauth2_external_requests_total{name="primary-github",source="AppInstallations",status_code="200"} 12
|
||||
coderd_oauth2_external_requests_total{name="primary-github",source="Exchange",status_code="200"} 1
|
||||
coderd_oauth2_external_requests_total{name="primary-github",source="TokenSource",status_code="200"} 1
|
||||
coderd_oauth2_external_requests_total{name="primary-github",source="ValidateToken",status_code="200"} 16
|
||||
coderd_oauth2_external_requests_total{name="secondary-github",source="AppInstallations",status_code="403"} 4
|
||||
coderd_oauth2_external_requests_total{name="secondary-github",source="Exchange",status_code="200"} 2
|
||||
coderd_oauth2_external_requests_total{name="secondary-github",source="ValidateToken",status_code="200"} 5
|
||||
# HELP coderd_agents_apps Agent applications with statuses.
|
||||
# TYPE coderd_agents_apps gauge
|
||||
coderd_agents_apps{agent_name="main",app_name="code-server",health="healthy",username="admin",workspace_name="workspace-1"} 1
|
||||
coderd_agents_apps{agent_name="main",app_name="code-server",health="healthy",username="admin",workspace_name="workspace-2"} 1
|
||||
coderd_agents_apps{agent_name="main",app_name="code-server",health="healthy",username="admin",workspace_name="workspace-3"} 1
|
||||
# HELP coderd_agents_connection_latencies_seconds Agent connection latencies in seconds.
|
||||
# TYPE coderd_agents_connection_latencies_seconds gauge
|
||||
coderd_agents_connection_latencies_seconds{agent_name="main",derp_region="Coder Embedded Relay",preferred="true",username="admin",workspace_name="workspace-1"} 0.03018125
|
||||
coderd_agents_connection_latencies_seconds{agent_name="main",derp_region="Coder Embedded Relay",preferred="true",username="admin",workspace_name="workspace-2"} 0.028658416
|
||||
coderd_agents_connection_latencies_seconds{agent_name="main",derp_region="Coder Embedded Relay",preferred="true",username="admin",workspace_name="workspace-3"} 0.028041416
|
||||
# HELP coderd_agents_connections Agent connections with statuses.
|
||||
# TYPE coderd_agents_connections gauge
|
||||
coderd_agents_connections{agent_name="main",lifecycle_state="ready",status="connected",tailnet_node="nodeid:16966f7df70d8cc5",username="admin",workspace_name="workspace-3"} 1
|
||||
coderd_agents_connections{agent_name="main",lifecycle_state="start_timeout",status="connected",tailnet_node="nodeid:3237d00938be23e3",username="admin",workspace_name="workspace-2"} 1
|
||||
coderd_agents_connections{agent_name="main",lifecycle_state="start_timeout",status="connected",tailnet_node="nodeid:3779bd45d00be0eb",username="admin",workspace_name="workspace-1"} 1
|
||||
# HELP coderd_agents_up The number of active agents per workspace.
|
||||
# TYPE coderd_agents_up gauge
|
||||
coderd_agents_up{template_name="docker", username="admin",workspace_name="workspace-1"} 1
|
||||
coderd_agents_up{template_name="docker", username="admin",workspace_name="workspace-2"} 1
|
||||
coderd_agents_up{template_name="gcp", username="admin",workspace_name="workspace-3"} 1
|
||||
# HELP coderd_agentstats_startup_script_seconds The number of seconds the startup script took to execute.
|
||||
# TYPE coderd_agentstats_startup_script_seconds gauge
|
||||
coderd_agentstats_startup_script_seconds{agent_name="main",success="true",template_name="docker",username="admin",workspace_name="workspace-1"} 1.969900304
|
||||
# HELP agent_scripts_executed_total Total number of scripts executed by the Coder agent. Includes cron scheduled scripts.
|
||||
# TYPE agent_scripts_executed_total counter
|
||||
agent_scripts_executed_total{agent_name="main",success="true",template_name="docker",username="admin",workspace_name="workspace-1"} 1
|
||||
# HELP coderd_agentstats_startup_script_seconds The number of seconds the startup script took to execute.
|
||||
# TYPE coderd_agentstats_startup_script_seconds gauge
|
||||
coderd_agentstats_startup_script_seconds{agent_name="main",success="true",template_name="docker",username="admin",workspace_name="workspace-1"} 1.969900304
|
||||
# HELP coderd_agentstats_connection_count The number of established connections by agent
|
||||
# TYPE coderd_agentstats_connection_count gauge
|
||||
coderd_agentstats_connection_count{agent_name="main",username="admin",workspace_name="workspace1"} 2
|
||||
@@ -80,694 +31,6 @@ coderd_agentstats_session_count_vscode{agent_name="main",username="admin",worksp
|
||||
# HELP coderd_agentstats_tx_bytes Agent Tx bytes
|
||||
# TYPE coderd_agentstats_tx_bytes gauge
|
||||
coderd_agentstats_tx_bytes{agent_name="main",username="admin",workspace_name="workspace1"} 6643
|
||||
# HELP coderd_api_websocket_durations_seconds Websocket duration distribution of requests in seconds.
|
||||
# TYPE coderd_api_websocket_durations_seconds histogram
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/me/coordinate",le="0.001"} 0
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/me/coordinate",le="1"} 3
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/me/coordinate",le="60"} 3
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/me/coordinate",le="3600"} 4
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/me/coordinate",le="54000"} 4
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/me/coordinate",le="108000"} 4
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/me/coordinate",le="+Inf"} 4
|
||||
coderd_api_websocket_durations_seconds_sum{path="/api/v2/workspaceagents/me/coordinate"} 156.042058706
|
||||
coderd_api_websocket_durations_seconds_count{path="/api/v2/workspaceagents/me/coordinate"} 4
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/{workspaceagent}/pty",le="0.001"} 0
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/{workspaceagent}/pty",le="1"} 0
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/{workspaceagent}/pty",le="60"} 0
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/{workspaceagent}/pty",le="3600"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/{workspaceagent}/pty",le="54000"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/{workspaceagent}/pty",le="108000"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspaceagents/{workspaceagent}/pty",le="+Inf"} 1
|
||||
coderd_api_websocket_durations_seconds_sum{path="/api/v2/workspaceagents/{workspaceagent}/pty"} 119.810027963
|
||||
coderd_api_websocket_durations_seconds_count{path="/api/v2/workspaceagents/{workspaceagent}/pty"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspacebuilds/{workspacebuild}/logs",le="0.001"} 0
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspacebuilds/{workspacebuild}/logs",le="1"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspacebuilds/{workspacebuild}/logs",le="60"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspacebuilds/{workspacebuild}/logs",le="3600"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspacebuilds/{workspacebuild}/logs",le="54000"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspacebuilds/{workspacebuild}/logs",le="108000"} 1
|
||||
coderd_api_websocket_durations_seconds_bucket{path="/api/v2/workspacebuilds/{workspacebuild}/logs",le="+Inf"} 1
|
||||
coderd_api_websocket_durations_seconds_sum{path="/api/v2/workspacebuilds/{workspacebuild}/logs"} 0.015562347
|
||||
coderd_api_websocket_durations_seconds_count{path="/api/v2/workspacebuilds/{workspacebuild}/logs"} 1
|
||||
# HELP coderd_api_active_users_duration_hour The number of users that have been active within the last hour.
|
||||
# TYPE coderd_api_active_users_duration_hour gauge
|
||||
coderd_api_active_users_duration_hour 0
|
||||
# HELP coderd_api_concurrent_requests The number of concurrent API requests.
|
||||
# TYPE coderd_api_concurrent_requests gauge
|
||||
coderd_api_concurrent_requests 3
|
||||
# HELP coderd_api_concurrent_websockets The total number of concurrent API websockets.
|
||||
# TYPE coderd_api_concurrent_websockets gauge
|
||||
coderd_api_concurrent_websockets 2
|
||||
# HELP coderd_api_request_latencies_seconds Latency distribution of requests in seconds.
|
||||
# TYPE coderd_api_request_latencies_seconds histogram
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="0.025"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="0.05"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="0.1"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="0.5"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="1"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="5"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path=""} 6.687792526
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path=""} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="0.005"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="0.01"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="0.025"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="0.05"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="0.1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="0.5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="10"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="30"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/appearance/",le="+Inf"} 2
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/appearance/"} 0.005080632
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/appearance/"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/applications/host/",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/applications/host/"} 0.001333428
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/applications/host/"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="0.001"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="0.005"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="0.01"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="0.025"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="0.05"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="0.1"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="0.5"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="1"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="5"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="10"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="30"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/buildinfo",le="+Inf"} 5
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/buildinfo"} 0.000471086
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/buildinfo"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="0.001"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="0.005"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="0.01"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="0.025"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="0.05"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="0.1"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="0.5"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="1"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="5"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="10"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="30"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/entitlements",le="+Inf"} 5
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/entitlements"} 0.0007040899999999999
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/entitlements"} 5
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="0.001"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="0.005"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="0.01"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="0.025"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="0.05"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="0.1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="0.5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="10"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="30"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/*",le="+Inf"} 2
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/organizations/*"} 0.000904424
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/organizations/*"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="0.05"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="0.1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="0.5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="10"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="30"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/",le="+Inf"} 2
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/organizations/{organization}/templates/"} 0.045776814
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/organizations/{organization}/templates/"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/examples",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/organizations/{organization}/templates/examples"} 0.015829003
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/organizations/{organization}/templates/examples"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}"} 0.004708487
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/templates/{template}/"} 0.004230499
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/templates/{template}/"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/daus",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/templates/{template}/daus"} 0.004370203
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/templates/{template}/daus"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templates/{template}/versions/",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/templates/{template}/versions/"} 0.00656286
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/templates/{template}/versions/"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/templateversions/{templateversion}/"} 0.010606176
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/templateversions/{templateversion}/"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/resources",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/templateversions/{templateversion}/resources"} 0.007596192
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/templateversions/{templateversion}/resources"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/templateversions/{templateversion}/schema",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/templateversions/{templateversion}/schema"} 0.00339007
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/templateversions/{templateversion}/schema"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="0.001"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/updatecheck",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/updatecheck"} 0.000390431
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/updatecheck"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/users/"} 0.003569641
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/users/"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="0.001"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/authmethods",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/users/authmethods"} 0.000148719
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/users/authmethods"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="0.001"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="0.005"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="0.01"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="0.025"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="0.05"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="0.1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="0.5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="10"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="30"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/first",le="+Inf"} 2
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/users/first"} 0.002299768
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/users/first"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="0.001"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/users/{user}"} 0.000131803
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/users/{user}"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="0.01"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="0.025"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="0.05"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="0.1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="0.5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="10"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="30"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/",le="+Inf"} 2
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/users/{user}/"} 0.012900051
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/users/{user}/"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="0.001"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="0.005"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="0.01"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="0.025"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="0.05"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="0.1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="0.5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="10"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="30"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/*",le="+Inf"} 2
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/users/{user}/*"} 0.0017976070000000001
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/users/{user}/*"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="0.01"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="0.025"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="0.05"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="0.1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="0.5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="10"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="30"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/",le="+Inf"} 2
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/"} 0.014837208000000001
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspace-quota/{user}/",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/workspace-quota/{user}/"} 0.01856146
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/workspace-quota/{user}/"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaceagents/me/metadata",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/workspaceagents/me/metadata"} 0.005921315
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/workspaceagents/me/metadata"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="0.001"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/workspaces"} 0.000824226
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/workspaces"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/workspaces/"} 0.016112682
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/workspaces/"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="0.025"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="0.05"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="0.1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="0.5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="1"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="5"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="10"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="30"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/api/v2/workspaces/{workspace}/builds/",le="+Inf"} 2
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/api/v2/workspaces/{workspace}/builds/"} 0.022512011000000002
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/api/v2/workspaces/{workspace}/builds/"} 2
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="0.001"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="GET",path="/healthz",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="GET",path="/healthz"} 0.000109226
|
||||
coderd_api_request_latencies_seconds_count{method="GET",path="/healthz"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="0.005"} 4
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="0.01"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="0.025"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="0.05"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="0.1"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="0.5"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="1"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="5"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="10"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="30"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/authcheck/",le="+Inf"} 6
|
||||
coderd_api_request_latencies_seconds_sum{method="POST",path="/api/v2/authcheck/"} 0.027684736
|
||||
coderd_api_request_latencies_seconds_count{method="POST",path="/api/v2/authcheck/"} 6
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="0.001"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/files",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="POST",path="/api/v2/files"} 0.000426037
|
||||
coderd_api_request_latencies_seconds_count{method="POST",path="/api/v2/files"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces"} 0.014369701
|
||||
coderd_api_request_latencies_seconds_count{method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="0.025"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="0.05"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/users/login",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="POST",path="/api/v2/users/login"} 0.079973393
|
||||
coderd_api_request_latencies_seconds_count{method="POST",path="/api/v2/users/login"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="0.005"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="0.01"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/report-stats",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="POST",path="/api/v2/workspaceagents/me/report-stats"} 0.001123106
|
||||
coderd_api_request_latencies_seconds_count{method="POST",path="/api/v2/workspaceagents/me/report-stats"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="0.001"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="0.005"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="0.01"} 0
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="0.025"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="0.05"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="0.1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="0.5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="1"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="5"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="10"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="30"} 1
|
||||
coderd_api_request_latencies_seconds_bucket{method="POST",path="/api/v2/workspaceagents/me/version",le="+Inf"} 1
|
||||
coderd_api_request_latencies_seconds_sum{method="POST",path="/api/v2/workspaceagents/me/version"} 0.012078959
|
||||
coderd_api_request_latencies_seconds_count{method="POST",path="/api/v2/workspaceagents/me/version"} 1
|
||||
# HELP coderd_api_requests_processed_total The total number of processed API requests
|
||||
# TYPE coderd_api_requests_processed_total counter
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path=""} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/appearance/"} 2
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/applications/host/"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/buildinfo"} 5
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/entitlements"} 5
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/organizations/{organization}/templates/"} 2
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/organizations/{organization}/templates/examples"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/organizations/{organization}/templates/{templatename}"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/templates/{template}/"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/templates/{template}/daus"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/templates/{template}/versions/"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/templateversions/{templateversion}/"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/templateversions/{templateversion}/resources"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/templateversions/{templateversion}/schema"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/updatecheck"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/authmethods"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/first"} 2
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/{user}/"} 2
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/users/{user}/workspace/{workspacename}/"} 2
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspace-quota/{user}/"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspaceagents/me/metadata"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspaces/"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/api/v2/workspaces/{workspace}/builds/"} 2
|
||||
coderd_api_requests_processed_total{code="200",method="GET",path="/healthz"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="POST",path="/api/v2/authcheck/"} 6
|
||||
coderd_api_requests_processed_total{code="200",method="POST",path="/api/v2/workspaceagents/me/report-stats"} 1
|
||||
coderd_api_requests_processed_total{code="200",method="POST",path="/api/v2/workspaceagents/me/version"} 1
|
||||
coderd_api_requests_processed_total{code="201",method="POST",path="/api/v2/organizations/{organization}/members/{user}/workspaces"} 1
|
||||
coderd_api_requests_processed_total{code="201",method="POST",path="/api/v2/users/login"} 1
|
||||
coderd_api_requests_processed_total{code="401",method="GET",path="/api/v2/organizations/*"} 2
|
||||
coderd_api_requests_processed_total{code="401",method="GET",path="/api/v2/users/{user}"} 1
|
||||
coderd_api_requests_processed_total{code="401",method="GET",path="/api/v2/users/{user}/*"} 2
|
||||
coderd_api_requests_processed_total{code="401",method="GET",path="/api/v2/workspaces"} 1
|
||||
coderd_api_requests_processed_total{code="401",method="POST",path="/api/v2/files"} 1
|
||||
# HELP coderd_api_workspace_latest_build The latest workspace builds with a status.
|
||||
# TYPE coderd_api_workspace_latest_build gauge
|
||||
coderd_api_workspace_latest_build{status="succeeded"} 1
|
||||
# HELP coderd_insights_applications_usage_seconds The application usage per template.
|
||||
# TYPE coderd_insights_applications_usage_seconds gauge
|
||||
coderd_insights_applications_usage_seconds{application_name="JetBrains",slug="",template_name="code-server-pod"} 1
|
||||
# HELP coderd_insights_parameters The parameter usage per template.
|
||||
# TYPE coderd_insights_parameters gauge
|
||||
coderd_insights_parameters{parameter_name="cpu",parameter_type="string",parameter_value="8",template_name="code-server-pod"} 1
|
||||
# HELP coderd_insights_templates_active_users The number of active users of the template.
|
||||
# TYPE coderd_insights_templates_active_users gauge
|
||||
coderd_insights_templates_active_users{template_name="code-server-pod"} 1
|
||||
# HELP coderd_license_active_users The number of active users.
|
||||
# TYPE coderd_license_active_users gauge
|
||||
coderd_license_active_users 1
|
||||
# HELP coderd_license_limit_users The user seats limit based on the active Coder license.
|
||||
# TYPE coderd_license_limit_users gauge
|
||||
coderd_license_limit_users 25
|
||||
# HELP coderd_license_user_limit_enabled Returns 1 if the current license enforces the user limit.
|
||||
# TYPE coderd_license_user_limit_enabled gauge
|
||||
coderd_license_user_limit_enabled 1
|
||||
# HELP coderd_metrics_collector_agents_execution_seconds Histogram for duration of agents metrics collection in seconds.
|
||||
# TYPE coderd_metrics_collector_agents_execution_seconds histogram
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="0.001"} 0
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="0.005"} 0
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="0.01"} 0
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="0.025"} 0
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="0.05"} 2
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="0.1"} 2
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="0.5"} 2
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="1"} 2
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="5"} 2
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="10"} 2
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="30"} 2
|
||||
coderd_metrics_collector_agents_execution_seconds_bucket{le="+Inf"} 2
|
||||
coderd_metrics_collector_agents_execution_seconds_sum 0.0592915
|
||||
coderd_metrics_collector_agents_execution_seconds_count 2
|
||||
# HELP coderd_provisionerd_job_timings_seconds The provisioner job time duration in seconds.
|
||||
# TYPE coderd_provisionerd_job_timings_seconds histogram
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="1"} 0
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="10"} 0
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="30"} 1
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="60"} 1
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="300"} 1
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="600"} 1
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="1800"} 1
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="3600"} 1
|
||||
coderd_provisionerd_job_timings_seconds_bucket{provisioner="terraform",status="success",le="+Inf"} 1
|
||||
coderd_provisionerd_job_timings_seconds_sum{provisioner="terraform",status="success"} 14.739479476
|
||||
coderd_provisionerd_job_timings_seconds_count{provisioner="terraform",status="success"} 1
|
||||
# HELP coderd_provisionerd_jobs_current The number of currently running provisioner jobs.
|
||||
# TYPE coderd_provisionerd_jobs_current gauge
|
||||
coderd_provisionerd_jobs_current{provisioner="terraform"} 0
|
||||
# HELP coderd_provisionerd_num_daemons The number of provisioner daemons.
|
||||
# TYPE coderd_provisionerd_num_daemons gauge
|
||||
coderd_provisionerd_num_daemons 3
|
||||
# HELP coderd_provisionerd_workspace_build_timings_seconds The time taken for a workspace to build.
|
||||
# TYPE coderd_provisionerd_workspace_build_timings_seconds histogram
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="1"} 0
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="10"} 0
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="30"} 0
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="60"} 1
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="300"} 1
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="600"} 1
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="1800"} 1
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="3600"} 1
|
||||
coderd_provisionerd_workspace_build_timings_seconds_bucket{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START",le="+Inf"} 1
|
||||
coderd_provisionerd_workspace_build_timings_seconds_sum{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START"} 31.042659852
|
||||
coderd_provisionerd_workspace_build_timings_seconds_count{status="success",template_name="docker",template_version="gallant_wright0",workspace_transition="START"} 1
|
||||
# HELP coderd_workspace_latest_build_status The current workspace statuses by template, transition, and owner.
|
||||
# TYPE coderd_workspace_latest_build_status gauge
|
||||
coderd_workspace_latest_build_status{status="failed",template_name="docker",template_version="sweet_gould9",workspace_owner="admin",workspace_transition="stop"} 1
|
||||
# HELP coderd_workspace_builds_total The number of workspaces started, updated, or deleted.
|
||||
# TYPE coderd_workspace_builds_total counter
|
||||
coderd_workspace_builds_total{action="START",owner_email="admin@coder.com",status="failed",template_name="docker",template_version="gallant_wright0",workspace_name="test1"} 1
|
||||
coderd_workspace_builds_total{action="START",owner_email="admin@coder.com",status="success",template_name="docker",template_version="gallant_wright0",workspace_name="test1"} 1
|
||||
coderd_workspace_builds_total{action="STOP",owner_email="admin@coder.com",status="success",template_name="docker",template_version="gallant_wright0",workspace_name="test1"} 1
|
||||
# HELP coderd_workspace_creation_total Total regular (non-prebuilt) workspace creations by organization, template, and preset.
|
||||
# TYPE coderd_workspace_creation_total counter
|
||||
coderd_workspace_creation_total{organization_name="{organization}",preset_name="",template_name="docker"} 1
|
||||
# HELP coderd_workspace_creation_duration_seconds Time to create a workspace by organization, template, preset, and type (regular or prebuild).
|
||||
# TYPE coderd_workspace_creation_duration_seconds histogram
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="1"} 0
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="10"} 1
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="30"} 1
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="60"} 1
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="300"} 1
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="600"} 1
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="1800"} 1
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="3600"} 1
|
||||
coderd_workspace_creation_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild",le="+Inf"} 1
|
||||
coderd_workspace_creation_duration_seconds_sum{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild"} 4.406214
|
||||
coderd_workspace_creation_duration_seconds_count{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",type="prebuild"} 1
|
||||
# HELP coderd_template_workspace_build_duration_seconds Duration from workspace build creation to agent ready, by template.
|
||||
# TYPE coderd_template_workspace_build_duration_seconds histogram
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="1"} 0
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="10"} 1
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="30"} 1
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="60"} 1
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="300"} 1
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="600"} 1
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="1800"} 1
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="3600"} 1
|
||||
coderd_template_workspace_build_duration_seconds_bucket{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start",le="+Inf"} 1
|
||||
coderd_template_workspace_build_duration_seconds_sum{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start"} 7.241532
|
||||
coderd_template_workspace_build_duration_seconds_count{is_prebuild="false",organization_name="{organization}",status="success",template_name="docker",transition="start"} 1
|
||||
# HELP coderd_prebuilt_workspace_claim_duration_seconds Time to claim a prebuilt workspace by organization, template, and preset.
|
||||
# TYPE coderd_prebuilt_workspace_claim_duration_seconds histogram
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="1"} 0
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="5"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="10"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="20"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="30"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="60"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="120"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="180"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="240"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="300"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_bucket{organization_name="{organization}",preset_name="Falkenstein",template_name="docker",le="+Inf"} 1
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_sum{organization_name="{organization}",preset_name="Falkenstein",template_name="docker"} 4.860075
|
||||
coderd_prebuilt_workspace_claim_duration_seconds_count{organization_name="{organization}",preset_name="Falkenstein",template_name="docker"} 1
|
||||
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds{quantile="0"} 2.4056e-05
|
||||
@@ -921,72 +184,27 @@ coder_aibridged_tokens_total{initiator_id="95f6752b-08cc-4cf1-97f7-c2165e3519c5"
|
||||
coder_aibridged_tokens_total{initiator_id="95f6752b-08cc-4cf1-97f7-c2165e3519c5",model="gpt-5-nano",provider="openai",type="output"} 2014
|
||||
coder_aibridged_tokens_total{initiator_id="95f6752b-08cc-4cf1-97f7-c2165e3519c5",model="gpt-5-nano",provider="openai",type="prompt_audio"} 0
|
||||
coder_aibridged_tokens_total{initiator_id="95f6752b-08cc-4cf1-97f7-c2165e3519c5",model="gpt-5-nano",provider="openai",type="prompt_cached"} 31872
|
||||
#HELP coderd_agentapi_metadata_batch_size Total number of metadata entries in each batch, updated before flushes.
|
||||
# TYPE coderd_agentapi_metadata_batch_size histogram
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="10"} 11
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="25"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="50"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="100"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="150"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="200"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="250"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="300"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="350"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="400"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="450"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="500"} 12
|
||||
coderd_agentapi_metadata_batch_size_bucket{le="+Inf"} 12
|
||||
coderd_agentapi_metadata_batch_size_sum 71
|
||||
coderd_agentapi_metadata_batch_size_count 12
|
||||
# HELP coderd_agentapi_metadata_batch_utilization Number of metadata keys per agent in each batch, updated before flushes.
|
||||
# TYPE coderd_agentapi_metadata_batch_utilization histogram
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="1"} 0
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="2"} 0
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="3"} 0
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="4"} 0
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="5"} 10
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="6"} 10
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="7"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="8"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="9"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="10"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="15"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="20"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="40"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="80"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="160"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_bucket{le="+Inf"} 13
|
||||
coderd_agentapi_metadata_batch_utilization_sum 71
|
||||
coderd_agentapi_metadata_batch_utilization_count 13
|
||||
# HELP coderd_agentapi_metadata_batches_total Total number of metadata batches flushed.
|
||||
# TYPE coderd_agentapi_metadata_batches_total counter
|
||||
coderd_agentapi_metadata_batches_total{reason="scheduled"} 12
|
||||
# HELP coderd_agentapi_metadata_dropped_keys_total Total number of metadata keys dropped due to capacity limits.
|
||||
# TYPE coderd_agentapi_metadata_dropped_keys_total counter
|
||||
coderd_agentapi_metadata_dropped_keys_total 0
|
||||
# HELP coderd_agentapi_metadata_flush_duration_seconds Time taken to flush metadata batch to database and pubsub.
|
||||
# TYPE coderd_agentapi_metadata_flush_duration_seconds histogram
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="0.01"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="0.025"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="0.05"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="0.1"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="0.25"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="0.5"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="1"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="2.5"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="5"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_bucket{reason="scheduled",le="+Inf"} 12
|
||||
coderd_agentapi_metadata_flush_duration_seconds_sum{reason="scheduled"} 0.008704553
|
||||
coderd_agentapi_metadata_flush_duration_seconds_count{reason="scheduled"} 12
|
||||
# HELP coderd_agentapi_metadata_flushed_total Total number of unique metadatas flushed.
|
||||
# TYPE coderd_agentapi_metadata_flushed_total counter
|
||||
coderd_agentapi_metadata_flushed_total 71
|
||||
# HELP coderd_agentapi_metadata_publish_errors_total Total number of metadata batch pubsub publish calls that have resulted in an error.
|
||||
# TYPE coderd_agentapi_metadata_publish_errors_total counter
|
||||
coderd_agentapi_metadata_publish_errors_total 0
|
||||
# HELP coderd_license_warnings The number of active license warnings.
|
||||
# TYPE coderd_license_warnings gauge
|
||||
coderd_license_warnings 0
|
||||
# HELP coderd_license_errors The number of active license errors.
|
||||
# TYPE coderd_license_errors gauge
|
||||
coderd_license_errors 0
|
||||
# HELP coder_aibridged_circuit_breaker_rejects_total Total number of requests rejected due to open circuit breaker.
|
||||
# TYPE coder_aibridged_circuit_breaker_rejects_total counter
|
||||
coder_aibridged_circuit_breaker_rejects_total{provider="",endpoint="",model=""} 0
|
||||
# HELP coder_aibridged_circuit_breaker_state Current state of the circuit breaker (0=closed, 0.5=half-open, 1=open).
|
||||
# TYPE coder_aibridged_circuit_breaker_state gauge
|
||||
coder_aibridged_circuit_breaker_state{provider="",endpoint="",model=""} 0
|
||||
# HELP coder_aibridged_circuit_breaker_trips_total Total number of times the circuit breaker transitioned to open state.
|
||||
# TYPE coder_aibridged_circuit_breaker_trips_total counter
|
||||
coder_aibridged_circuit_breaker_trips_total{provider="",endpoint="",model=""} 0
|
||||
# HELP coder_aibridged_passthrough_total The count of requests which were not intercepted but passed through to the upstream.
|
||||
# TYPE coder_aibridged_passthrough_total counter
|
||||
coder_aibridged_passthrough_total{provider="",route="",method=""} 0
|
||||
# HELP coder_aibridgeproxyd_connect_sessions_total Total number of CONNECT sessions established.
|
||||
# TYPE coder_aibridgeproxyd_connect_sessions_total counter
|
||||
coder_aibridgeproxyd_connect_sessions_total{type=""} 0
|
||||
# HELP coder_aibridgeproxyd_inflight_mitm_requests Number of MITM requests currently being processed.
|
||||
# TYPE coder_aibridgeproxyd_inflight_mitm_requests gauge
|
||||
coder_aibridgeproxyd_inflight_mitm_requests{provider=""} 0
|
||||
# HELP coder_aibridgeproxyd_mitm_requests_total Total number of MITM requests handled by the proxy.
|
||||
# TYPE coder_aibridgeproxyd_mitm_requests_total counter
|
||||
coder_aibridgeproxyd_mitm_requests_total{provider=""} 0
|
||||
# HELP coder_aibridgeproxyd_mitm_responses_total Total number of MITM responses by HTTP status code class.
|
||||
# TYPE coder_aibridgeproxyd_mitm_responses_total counter
|
||||
coder_aibridgeproxyd_mitm_responses_total{code="",provider=""} 0
|
||||
|
||||
@@ -0,0 +1,712 @@
|
||||
// Package main provides a tool to scan Go source files and extract Prometheus
|
||||
// metric definitions. It outputs metrics in Prometheus text exposition format
|
||||
// to stdout for use by the documentation generator.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go run ./scripts/metricsdocgen/scanner > scripts/metricsdocgen/generated_metrics
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Directories to scan for metric definitions, relative to the repository root.
|
||||
// Add or remove directories here to control the scanner's scope.
|
||||
var scanDirs = []string{
|
||||
"agent",
|
||||
"coderd",
|
||||
"enterprise",
|
||||
"provisionerd",
|
||||
}
|
||||
|
||||
// skipPaths lists files that should be excluded from scanning. Their metrics
|
||||
// must be maintained in the static metrics file instead.
|
||||
// TODO(ssncferreira): Add support for resolving WrapRegistererWithPrefix to
|
||||
//
|
||||
// eliminate the need for this skip list.
|
||||
var skipPaths = []string{
|
||||
"enterprise/aibridgeproxyd/metrics.go",
|
||||
}
|
||||
|
||||
// MetricType represents the type of Prometheus metric.
|
||||
type MetricType string
|
||||
|
||||
const (
|
||||
MetricTypeCounter MetricType = "counter"
|
||||
MetricTypeGauge MetricType = "gauge"
|
||||
MetricTypeHistogram MetricType = "histogram"
|
||||
MetricTypeSummary MetricType = "summary"
|
||||
)
|
||||
|
||||
// Metric represents a single Prometheus metric definition extracted from source code.
|
||||
type Metric struct {
|
||||
Name string // Full metric name (namespace_subsystem_name)
|
||||
Type MetricType // counter, gauge, histogram, or summary
|
||||
Help string // Description of the metric
|
||||
Labels []string // Label names for this metric
|
||||
}
|
||||
|
||||
// metricOpts holds the fields extracted from a prometheus.*Opts struct.
|
||||
type metricOpts struct {
|
||||
Namespace string
|
||||
Subsystem string
|
||||
Name string
|
||||
Help string
|
||||
}
|
||||
|
||||
// declarations holds const/var values collected from a file for resolving references.
|
||||
type declarations struct {
|
||||
strings map[string]string // string constants/variables
|
||||
stringSlices map[string][]string // []string variables
|
||||
}
|
||||
|
||||
// packageDeclarations holds exported string constants collected from all scanned files,
|
||||
// keyed by package name. This allows resolving cross-file references.
|
||||
// Note: resolution depends on directory scan order in scanDirs, i.e.,
|
||||
// constants from later directories won't be available when scanning earlier ones.
|
||||
var packageDeclarations = make(map[string]map[string]string)
|
||||
|
||||
func main() {
|
||||
metrics, err := scanAllDirs()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to scan directories: %v", err)
|
||||
}
|
||||
|
||||
// Duplicates are not expected since Prometheus enforces unique metric names at registration.
|
||||
uniqueMetrics := make(map[string]Metric)
|
||||
for _, m := range metrics {
|
||||
uniqueMetrics[m.Name] = m
|
||||
}
|
||||
metrics = make([]Metric, 0, len(uniqueMetrics))
|
||||
for _, m := range uniqueMetrics {
|
||||
metrics = append(metrics, m)
|
||||
}
|
||||
|
||||
// Sort metrics by name for consistent output across runs.
|
||||
sort.Slice(metrics, func(i, j int) bool {
|
||||
return metrics[i].Name < metrics[j].Name
|
||||
})
|
||||
|
||||
writeMetrics(metrics, os.Stdout)
|
||||
|
||||
log.Printf("Successfully parsed %d metrics", len(metrics))
|
||||
}
|
||||
|
||||
// scanAllDirs scans all configured directories for metric definitions.
|
||||
func scanAllDirs() ([]Metric, error) {
|
||||
var allMetrics []Metric
|
||||
|
||||
for _, dir := range scanDirs {
|
||||
metrics, err := scanDirectory(dir)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("scanning %s: %w", dir, err)
|
||||
}
|
||||
|
||||
log.Printf("scanning %s: found %d metrics", dir, len(metrics))
|
||||
allMetrics = append(allMetrics, metrics...)
|
||||
}
|
||||
|
||||
return allMetrics, nil
|
||||
}
|
||||
|
||||
// scanDirectory recursively walks a directory and extracts metrics from all Go files.
|
||||
func scanDirectory(root string) ([]Metric, error) {
|
||||
var metrics []Metric
|
||||
|
||||
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip non-Go files.
|
||||
if d.IsDir() || !strings.HasSuffix(path, ".go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip test files.
|
||||
if strings.HasSuffix(path, "_test.go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip files listed in skipPaths.
|
||||
for _, sp := range skipPaths {
|
||||
if path == sp {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fileMetrics, err := scanFile(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("scanning %s: %w", path, err)
|
||||
}
|
||||
|
||||
if len(fileMetrics) > 0 {
|
||||
log.Printf("scanning %s: found %d metrics", path, len(fileMetrics))
|
||||
}
|
||||
metrics = append(metrics, fileMetrics...)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
// scanFile parses a single Go file and extracts all Prometheus metric definitions.
|
||||
func scanFile(path string) ([]Metric, error) {
|
||||
fset := token.NewFileSet()
|
||||
file, err := parser.ParseFile(fset, path, nil, parser.SkipObjectResolution)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parsing file: %w", err)
|
||||
}
|
||||
|
||||
// Collect exported constants into the global package declarations map.
|
||||
collectPackageConsts(file)
|
||||
|
||||
// Collect file-local const and var declarations for resolving references.
|
||||
decls := collectDecls(file)
|
||||
|
||||
var metrics []Metric
|
||||
|
||||
// Walk the AST looking for metric registration calls.
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
call, ok := n.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
metric, ok := extractMetricFromCall(call, decls)
|
||||
if ok {
|
||||
if metric.Help == "" {
|
||||
log.Printf("WARNING: metric %q has no HELP description, skipping", metric.Name)
|
||||
// Skip metrics without descriptions, they should be fixed in the source code
|
||||
// or added to the static metrics file with a manual description.
|
||||
return true
|
||||
}
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// collectPackageConsts collects exported string constants from a file into
|
||||
// the global packageDeclarations map, keyed by package name.
|
||||
func collectPackageConsts(file *ast.File) {
|
||||
pkgName := file.Name.Name
|
||||
|
||||
if packageDeclarations[pkgName] == nil {
|
||||
packageDeclarations[pkgName] = make(map[string]string)
|
||||
}
|
||||
|
||||
for _, decl := range file.Decls {
|
||||
genDecl, ok := decl.(*ast.GenDecl)
|
||||
if !ok || genDecl.Tok != token.CONST {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, spec := range genDecl.Specs {
|
||||
valueSpec, ok := spec.(*ast.ValueSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for i, name := range valueSpec.Names {
|
||||
if !ast.IsExported(name.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if i >= len(valueSpec.Values) {
|
||||
continue
|
||||
}
|
||||
|
||||
if lit, ok := valueSpec.Values[i].(*ast.BasicLit); ok {
|
||||
if lit.Kind == token.STRING {
|
||||
packageDeclarations[pkgName][name.Name] = strings.Trim(lit.Value, `"`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// resolveStringExpr attempts to resolve an expression to a string value.
|
||||
// Examples:
|
||||
// - "my_metric": "my_metric" (string literal)
|
||||
// - metricName: resolved value of metricName constant (identifier)
|
||||
// - agentmetrics.LabelUsername: resolved from package constants (selector)
|
||||
func resolveStringExpr(expr ast.Expr, decls declarations) string {
|
||||
switch e := expr.(type) {
|
||||
case *ast.BasicLit:
|
||||
return strings.Trim(e.Value, `"`)
|
||||
case *ast.Ident:
|
||||
return decls.strings[e.Name]
|
||||
case *ast.BinaryExpr:
|
||||
return resolveBinaryExpr(e, decls)
|
||||
case *ast.SelectorExpr:
|
||||
// Handle pkg.Const syntax.
|
||||
if ident, ok := e.X.(*ast.Ident); ok {
|
||||
if pkgConsts, ok := packageDeclarations[ident.Name]; ok {
|
||||
return pkgConsts[e.Sel.Name]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// resolveBinaryExpr resolves a binary expression (string concatenation) to a string.
|
||||
// It recursively resolves the left and right operands.
|
||||
// Example:
|
||||
// - "coderd_" + "api_" + "requests": "coderd_api_requests"
|
||||
// - namespace + "_" + metricName: resolved concatenation
|
||||
func resolveBinaryExpr(expr *ast.BinaryExpr, decls declarations) string {
|
||||
left := resolveStringExpr(expr.X, decls)
|
||||
right := resolveStringExpr(expr.Y, decls)
|
||||
if left != "" && right != "" {
|
||||
return left + right
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// extractStringSlice extracts a []string from a composite literal.
|
||||
// Example:
|
||||
// - []string{"a", "b", myConst}: ["a", "b", <resolved value of myConst>]
|
||||
func extractStringSlice(lit *ast.CompositeLit, decls declarations) []string {
|
||||
var labels []string
|
||||
for _, elt := range lit.Elts {
|
||||
if label := resolveStringExpr(elt, decls); label != "" {
|
||||
labels = append(labels, label)
|
||||
}
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// collectDecls collects const and var declarations from a file.
|
||||
// This is used to resolve constant and variable references in metric definitions.
|
||||
func collectDecls(file *ast.File) declarations {
|
||||
decls := declarations{
|
||||
strings: make(map[string]string),
|
||||
stringSlices: make(map[string][]string),
|
||||
}
|
||||
|
||||
for _, decl := range file.Decls {
|
||||
genDecl, ok := decl.(*ast.GenDecl)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, spec := range genDecl.Specs {
|
||||
valueSpec, ok := spec.(*ast.ValueSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for i, name := range valueSpec.Names {
|
||||
if i >= len(valueSpec.Values) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch v := valueSpec.Values[i].(type) {
|
||||
case *ast.BasicLit:
|
||||
// String literal: const name = "value"
|
||||
decls.strings[name.Name] = strings.Trim(v.Value, `"`)
|
||||
case *ast.BinaryExpr:
|
||||
// Concatenation: const name = prefix + "suffix"
|
||||
if resolved := resolveBinaryExpr(v, decls); resolved != "" {
|
||||
decls.strings[name.Name] = resolved
|
||||
}
|
||||
case *ast.CompositeLit:
|
||||
// Slice literal: var labels = []string{"a", "b"}
|
||||
if resolved := extractStringSlice(v, decls); resolved != nil {
|
||||
decls.stringSlices[name.Name] = resolved
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return decls
|
||||
}
|
||||
|
||||
// extractLabels extracts label names from an expression passed as an argument
|
||||
// to a metric constructor. Handles both inline []string literals and
|
||||
// variable references from decls.
|
||||
// Examples:
|
||||
// - []string{"label1", "label2"}: ["label1", "label2"] (inline literal)
|
||||
// - myLabels: resolved value of myLabels variable (variable reference)
|
||||
func extractLabels(expr ast.Expr, decls declarations) []string {
|
||||
switch e := expr.(type) {
|
||||
case *ast.CompositeLit:
|
||||
// []string{"label1", "label2"}
|
||||
return extractStringSlice(e, decls)
|
||||
case *ast.Ident:
|
||||
// Variable reference like 'labels'.
|
||||
if labels, ok := decls.stringSlices[e.Name]; ok {
|
||||
return labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractNewDescMetric extracts a metric from a prometheus.NewDesc() call.
|
||||
// Pattern: prometheus.NewDesc(name, help, variableLabels, constLabels)
|
||||
// Currently, coder only uses MustNewConstMetric with NewDesc.
|
||||
// TODO(ssncferreira): Add support for other MustNewConst* functions if needed.
|
||||
func extractNewDescMetric(call *ast.CallExpr, decls declarations) (Metric, bool) {
|
||||
// Check if this is a prometheus.NewDesc call.
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Match calls that are exactly "prometheus.NewDesc()". This checks the local
|
||||
// package identifier, not the resolved import path. If the prometheus package
|
||||
// is imported with an alias, this will not match.
|
||||
ident, ok := sel.X.(*ast.Ident)
|
||||
if !ok || ident.Name != "prometheus" || sel.Sel.Name != "NewDesc" {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// NewDesc requires at least 4 arguments: name, help, variableLabels, constLabels
|
||||
if len(call.Args) < 4 {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Extract name (first argument).
|
||||
name := resolveStringExpr(call.Args[0], decls)
|
||||
if name == "" {
|
||||
log.Printf("extractNewDescMetric: skipping prometheus.NewDesc() call: could not resolve metric name")
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Extract help (second argument).
|
||||
help := resolveStringExpr(call.Args[1], decls)
|
||||
|
||||
// Extract labels (third argument).
|
||||
labels := extractLabels(call.Args[2], decls)
|
||||
|
||||
// Infer metric type from name suffix.
|
||||
// TODO(ssncferreira): The actual type is determined by the MustNewConst* function
|
||||
// that uses this descriptor (e.g., MustNewConstMetric with prometheus.CounterValue or
|
||||
// prometheus.GaugeValue). Currently, coder only uses MustNewConstMetric, so we
|
||||
// infer the type from naming conventions.
|
||||
metricType := MetricTypeGauge
|
||||
if strings.HasSuffix(name, "_total") || strings.HasSuffix(name, "_count") {
|
||||
metricType = MetricTypeCounter
|
||||
}
|
||||
|
||||
return Metric{
|
||||
Name: name,
|
||||
Type: metricType,
|
||||
Help: help,
|
||||
Labels: labels,
|
||||
}, true
|
||||
}
|
||||
|
||||
// parseMetricFuncName parses a prometheus function name and returns the metric type
|
||||
// and whether it's a Vec type. Returns empty string if not a recognized metric function.
|
||||
func parseMetricFuncName(funcName string) (MetricType, bool) {
|
||||
isVec := strings.HasSuffix(funcName, "Vec")
|
||||
baseName := strings.TrimSuffix(funcName, "Vec")
|
||||
|
||||
switch baseName {
|
||||
case "NewGauge":
|
||||
return MetricTypeGauge, isVec
|
||||
case "NewCounter":
|
||||
return MetricTypeCounter, isVec
|
||||
case "NewHistogram":
|
||||
return MetricTypeHistogram, isVec
|
||||
case "NewSummary":
|
||||
return MetricTypeSummary, isVec
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// extractOpts extracts fields from a prometheus.*Opts composite literal.
|
||||
func extractOpts(expr ast.Expr, decls declarations) (metricOpts, bool) {
|
||||
// Handle both direct composite literals and calls that return opts.
|
||||
var lit *ast.CompositeLit
|
||||
|
||||
switch e := expr.(type) {
|
||||
case *ast.CompositeLit:
|
||||
lit = e
|
||||
case *ast.UnaryExpr:
|
||||
// Handle &prometheus.GaugeOpts{...}
|
||||
if l, ok := e.X.(*ast.CompositeLit); ok {
|
||||
lit = l
|
||||
}
|
||||
}
|
||||
|
||||
if lit == nil {
|
||||
return metricOpts{}, false
|
||||
}
|
||||
|
||||
var opts metricOpts
|
||||
for _, elt := range lit.Elts {
|
||||
kv, ok := elt.(*ast.KeyValueExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
key, ok := kv.Key.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
value := resolveStringExpr(kv.Value, decls)
|
||||
|
||||
switch key.Name {
|
||||
case "Namespace":
|
||||
opts.Namespace = value
|
||||
case "Subsystem":
|
||||
opts.Subsystem = value
|
||||
case "Name":
|
||||
opts.Name = value
|
||||
case "Help":
|
||||
opts.Help = value
|
||||
}
|
||||
}
|
||||
|
||||
return opts, opts.Name != ""
|
||||
}
|
||||
|
||||
// buildMetricName constructs the full metric name from namespace, subsystem, and name.
|
||||
func buildMetricName(namespace, subsystem, name string) string {
|
||||
metricNameParts := make([]string, 0, 3)
|
||||
if namespace != "" {
|
||||
metricNameParts = append(metricNameParts, namespace)
|
||||
}
|
||||
if subsystem != "" {
|
||||
metricNameParts = append(metricNameParts, subsystem)
|
||||
}
|
||||
if name != "" {
|
||||
metricNameParts = append(metricNameParts, name)
|
||||
}
|
||||
// Join non-empty parts with "_" to handle optional namespace/subsystem.
|
||||
// e.g., ("coderd", "", "agents_up"): "coderd_agents_up"
|
||||
return strings.Join(metricNameParts, "_")
|
||||
}
|
||||
|
||||
// extractOptsMetric extracts a metric from prometheus.New*() or prometheus.New*Vec() calls.
|
||||
// Supported patterns:
|
||||
// - prometheus.NewGauge(prometheus.GaugeOpts{...})
|
||||
// - prometheus.NewCounter(prometheus.CounterOpts{...})
|
||||
// - prometheus.NewHistogram(prometheus.HistogramOpts{...})
|
||||
// - prometheus.NewSummary(prometheus.SummaryOpts{...})
|
||||
// - prometheus.NewGaugeVec(prometheus.GaugeOpts{...}, labels)
|
||||
// - prometheus.NewCounterVec(prometheus.CounterOpts{...}, labels)
|
||||
// - prometheus.NewHistogramVec(prometheus.HistogramOpts{...}, labels)
|
||||
// - prometheus.NewSummaryVec(prometheus.SummaryOpts{...}, labels)
|
||||
func extractOptsMetric(call *ast.CallExpr, decls declarations) (Metric, bool) {
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Match calls that are exactly "prometheus.New*(...)". This checks the local
|
||||
// package identifier, not the resolved import path. If the prometheus package
|
||||
// is imported with an alias, this will not match.
|
||||
ident, ok := sel.X.(*ast.Ident)
|
||||
if !ok || ident.Name != "prometheus" {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
funcName := sel.Sel.Name
|
||||
metricType, isVec := parseMetricFuncName(funcName)
|
||||
if metricType == "" {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Need at least one argument (the Opts struct).
|
||||
if len(call.Args) < 1 {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Extract metric info from the Opts struct.
|
||||
opts, ok := extractOpts(call.Args[0], decls)
|
||||
if !ok {
|
||||
log.Printf("extractOptsMetric: skipping prometheus.%s() call: could not extract opts", funcName)
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Extract labels for Vec types.
|
||||
var labels []string
|
||||
if isVec && len(call.Args) >= 2 {
|
||||
labels = extractLabels(call.Args[1], decls)
|
||||
}
|
||||
|
||||
// Build the full metric name.
|
||||
name := buildMetricName(opts.Namespace, opts.Subsystem, opts.Name)
|
||||
if name == "" {
|
||||
log.Printf("extractOptsMetric: skipping prometheus.%s() call: could not build metric name", funcName)
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
return Metric{
|
||||
Name: name,
|
||||
Type: metricType,
|
||||
Help: opts.Help,
|
||||
Labels: labels,
|
||||
}, true
|
||||
}
|
||||
|
||||
// isPromautoCall checks if an expression is a promauto factory call.
|
||||
// Matches:
|
||||
// - promauto.With(reg): direct chained call
|
||||
// - factory: variable that was assigned from promauto.With()
|
||||
func isPromautoCall(expr ast.Expr) bool {
|
||||
switch e := expr.(type) {
|
||||
case *ast.CallExpr:
|
||||
// Check for promauto.With(reg).New*()
|
||||
sel, ok := e.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
ident, ok := sel.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Match calls that are exactly "promauto.With(...)". This checks the local
|
||||
// package identifier, not the resolved import path. If the promauto package
|
||||
// is imported with an alias, this will not match.
|
||||
return ident.Name == "promauto" && sel.Sel.Name == "With"
|
||||
case *ast.Ident:
|
||||
// Heuristic: assume any identifier that isn't "prometheus" used as a
|
||||
// receiver for New*() methods is a promauto factory variable.
|
||||
// This works for the codebase patterns (e.g., factory.NewGaugeVec(...))
|
||||
// but could false-positive on other receivers. Downstream extractOpts
|
||||
// validation prevents incorrect metrics from being emitted.
|
||||
return e.Name != "prometheus"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// extractPromautoMetric extracts a metric from promauto.With().New*() or factory.New*() calls.
|
||||
// Supported patterns:
|
||||
// - promauto.With(reg).NewCounterVec(prometheus.CounterOpts{...}, labels)
|
||||
// - factory.NewGaugeVec(prometheus.GaugeOpts{...}, labels) where factory := promauto.With(reg)
|
||||
func extractPromautoMetric(call *ast.CallExpr, decls declarations) (Metric, bool) {
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
funcName := sel.Sel.Name
|
||||
metricType, isVec := parseMetricFuncName(funcName)
|
||||
if metricType == "" {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Check if this is a promauto call by examining the receiver.
|
||||
if !isPromautoCall(sel.X) {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Need at least one argument (the Opts struct).
|
||||
if len(call.Args) < 1 {
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Extract metric info from the Opts struct.
|
||||
opts, ok := extractOpts(call.Args[0], decls)
|
||||
if !ok {
|
||||
log.Printf("extractPromautoMetric: skipping promauto.%s() call: could not extract opts", funcName)
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// Extract labels for Vec types.
|
||||
var labels []string
|
||||
if isVec && len(call.Args) >= 2 {
|
||||
labels = extractLabels(call.Args[1], decls)
|
||||
}
|
||||
|
||||
// Build the full metric name.
|
||||
name := buildMetricName(opts.Namespace, opts.Subsystem, opts.Name)
|
||||
if name == "" {
|
||||
log.Printf("extractPromautoMetric: skipping promauto.%s() call: could not build metric name", funcName)
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
return Metric{
|
||||
Name: name,
|
||||
Type: metricType,
|
||||
Help: opts.Help,
|
||||
Labels: labels,
|
||||
}, true
|
||||
}
|
||||
|
||||
// extractMetricFromCall attempts to extract a Metric from a function call expression.
|
||||
// It returns the metric and true if successful, or an empty metric and false if
|
||||
// the call is not a metric registration.
|
||||
//
|
||||
// Supported patterns:
|
||||
// - prometheus.NewDesc() calls
|
||||
// - prometheus.New*() and prometheus.New*Vec() with *Opts{}
|
||||
// - promauto.With(reg).New*() and factory.New*() patterns
|
||||
func extractMetricFromCall(call *ast.CallExpr, decls declarations) (Metric, bool) {
|
||||
// Check for prometheus.NewDesc() pattern.
|
||||
if metric, ok := extractNewDescMetric(call, decls); ok {
|
||||
return metric, true
|
||||
}
|
||||
|
||||
// Check for prometheus.New*() and prometheus.New*Vec() patterns.
|
||||
if metric, ok := extractOptsMetric(call, decls); ok {
|
||||
return metric, true
|
||||
}
|
||||
|
||||
// Check for promauto.With(reg).New*() pattern.
|
||||
if metric, ok := extractPromautoMetric(call, decls); ok {
|
||||
return metric, true
|
||||
}
|
||||
|
||||
return Metric{}, false
|
||||
}
|
||||
|
||||
// String returns the metric in Prometheus text exposition format.
|
||||
// Label values are empty strings and metric values are 0 since only
|
||||
// metadata (name, type, help, label names) is used for documentation generation.
|
||||
func (m Metric) String() string {
|
||||
var buf strings.Builder
|
||||
|
||||
// Write HELP line.
|
||||
_, _ = fmt.Fprintf(&buf, "# HELP %s %s\n", m.Name, m.Help)
|
||||
|
||||
// Write TYPE line.
|
||||
_, _ = fmt.Fprintf(&buf, "# TYPE %s %s\n", m.Name, m.Type)
|
||||
|
||||
// Write a sample metric line with empty label values and zero metric value.
|
||||
if len(m.Labels) > 0 {
|
||||
labelPairs := make([]string, len(m.Labels))
|
||||
for i, l := range m.Labels {
|
||||
labelPairs[i] = fmt.Sprintf("%s=\"\"", l)
|
||||
}
|
||||
_, _ = fmt.Fprintf(&buf, "%s{%s} 0\n", m.Name, strings.Join(labelPairs, ","))
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(&buf, "%s 0\n", m.Name)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// writeMetrics writes all metrics in Prometheus text exposition format.
|
||||
func writeMetrics(metrics []Metric, w io.Writer) {
|
||||
for _, m := range metrics {
|
||||
_, _ = fmt.Fprint(w, m.String())
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
<link rel="preload" href="/node_modules/@fontsource-variable/inter/files/inter-latin-wght-normal.woff2" as="font" type="font/woff2" crossorigin />
|
||||
<link rel="preload" href="/node_modules/@fontsource-variable/geist/files/geist-latin-wght-normal.woff2" as="font" type="font/woff2" crossorigin />
|
||||
|
||||
<!-- Web terminal fonts -->
|
||||
<link rel="preload" href="/node_modules/@fontsource/ibm-plex-mono/files/ibm-plex-mono-latin-400-normal.woff2" as="font" type="font/woff2" crossorigin />
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user