Compare commits
65 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d4c7cb2021 | |||
| 25400fedca | |||
| 82bb833099 | |||
| 61beb7bfa8 | |||
| b4be5bcfed | |||
| ceaba0778e | |||
| e24cc5e6da | |||
| 259dee2ea8 | |||
| 8e0516a19c | |||
| 770fdb377c | |||
| 83dbf73dde | |||
| 0ab23abb19 | |||
| c4bf5a2d81 | |||
| 5cb02a6cc0 | |||
| cfdd4a9b88 | |||
| d9159103cd | |||
| 532a1f3054 | |||
| 6aeb144a98 | |||
| f94d8fc019 | |||
| e93a917c2f | |||
| 0f054096e4 | |||
| 2f829286f2 | |||
| 6acfcd5736 | |||
| 9e021f7b57 | |||
| aa306f2262 | |||
| fcd64ea7f5 | |||
| 9d7509aeb3 | |||
| d5bb1361e2 | |||
| e17b47f9ff | |||
| 40df21ed62 | |||
| 65ef6df1df | |||
| f1b2715555 | |||
| ad93262d07 | |||
| c750695d83 | |||
| 3c05cb6255 | |||
| 18ef78604f | |||
| db5ccda1ec | |||
| 0873d9af6d | |||
| c3c059fbc4 | |||
| ff46917e62 | |||
| d9888ced11 | |||
| 9ec90cf2e7 | |||
| 929db243cb | |||
| c85d79bcdb | |||
| fa7bbe2f55 | |||
| 4e2af837b7 | |||
| 9ebcca5b0d | |||
| 56e7858570 | |||
| 74d0c39cb3 | |||
| bf40d678ec | |||
| a47b3a4cb5 | |||
| 645da33767 | |||
| ab4366f5c6 | |||
| afbe9ea154 | |||
| cf6bb40cf8 | |||
| 60ac382ae6 | |||
| dcb4251849 | |||
| dc3b11e545 | |||
| a110c98040 | |||
| bbf3f763fd | |||
| 59866d9f52 | |||
| e00578cf99 | |||
| fe850752dd | |||
| d786bc400c | |||
| cbb0952e5a |
@@ -6,6 +6,8 @@ updates:
|
||||
interval: "weekly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
cooldown:
|
||||
default-days: 7
|
||||
labels: []
|
||||
commit-message:
|
||||
prefix: "ci"
|
||||
@@ -68,8 +70,8 @@ updates:
|
||||
interval: "monthly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
reviewers:
|
||||
- "coder/ts"
|
||||
cooldown:
|
||||
default-days: 7
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
labels: []
|
||||
|
||||
@@ -28,6 +28,7 @@ jobs:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
- name: Approve the PR
|
||||
if: steps.metadata.outputs.package-ecosystem != 'github-actions'
|
||||
run: |
|
||||
echo "Approving $PR_URL"
|
||||
gh pr review --approve "$PR_URL"
|
||||
@@ -36,6 +37,7 @@ jobs:
|
||||
GH_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Enable auto-merge
|
||||
if: steps.metadata.outputs.package-ecosystem != 'github-actions'
|
||||
run: |
|
||||
echo "Enabling auto-merge for $PR_URL"
|
||||
gh pr merge --auto --squash "$PR_URL"
|
||||
@@ -45,6 +47,11 @@ jobs:
|
||||
|
||||
- name: Send Slack notification
|
||||
run: |
|
||||
if [ "$PACKAGE_ECOSYSTEM" = "github-actions" ]; then
|
||||
STATUS_TEXT=":pr-opened: Dependabot opened PR #${PR_NUMBER} (GitHub Actions changes are not auto-merged)"
|
||||
else
|
||||
STATUS_TEXT=":pr-merged: Auto merge enabled for Dependabot PR #${PR_NUMBER}"
|
||||
fi
|
||||
curl -X POST -H 'Content-type: application/json' \
|
||||
--data '{
|
||||
"username": "dependabot",
|
||||
@@ -54,7 +61,7 @@ jobs:
|
||||
"type": "header",
|
||||
"text": {
|
||||
"type": "plain_text",
|
||||
"text": ":pr-merged: Auto merge enabled for Dependabot PR #'"${PR_NUMBER}"'",
|
||||
"text": "'"${STATUS_TEXT}"'",
|
||||
"emoji": true
|
||||
}
|
||||
},
|
||||
@@ -84,6 +91,7 @@ jobs:
|
||||
}' "${{ secrets.DEPENDABOT_PRS_SLACK_WEBHOOK }}"
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.DEPENDABOT_PRS_SLACK_WEBHOOK }}
|
||||
PACKAGE_ECOSYSTEM: ${{ steps.metadata.outputs.package-ecosystem }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||
|
||||
@@ -90,6 +90,9 @@ __debug_bin*
|
||||
|
||||
**/.claude/settings.local.json
|
||||
|
||||
# Local agent configuration
|
||||
AGENTS.local.md
|
||||
|
||||
/.env
|
||||
|
||||
# Ignore plans written by AI agents.
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"ignores": ["PLAN.md"],
|
||||
}
|
||||
@@ -173,6 +173,23 @@ ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
```
|
||||
|
||||
### Avoid Unnecessary Changes
|
||||
|
||||
When fixing a bug or adding a feature, don't modify code unrelated to your
|
||||
task. Unnecessary changes make PRs harder to review and can introduce
|
||||
regressions.
|
||||
|
||||
**Don't reword existing comments or code** unless the change is directly
|
||||
motivated by your task. Rewording comments to be shorter or "cleaner" wastes
|
||||
reviewer time and clutters the diff.
|
||||
|
||||
**Don't delete existing comments** that explain non-obvious behavior. These
|
||||
comments preserve important context about why code works a certain way.
|
||||
|
||||
**When adding tests for new behavior**, add new test cases instead of modifying
|
||||
existing ones. This preserves coverage for the original behavior and makes it
|
||||
clear what the new test covers.
|
||||
|
||||
## Detailed Development Guides
|
||||
|
||||
@.claude/docs/ARCHITECTURE.md
|
||||
@@ -181,6 +198,12 @@ ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
@.claude/docs/TROUBLESHOOTING.md
|
||||
@.claude/docs/DATABASE.md
|
||||
|
||||
## Local Configuration
|
||||
|
||||
These files may be gitignored, read manually if not auto-loaded.
|
||||
|
||||
@AGENTS.local.md
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. **Audit table errors** → Update `enterprise/audit/table.go`
|
||||
|
||||
@@ -679,7 +679,7 @@ gen/db: $(DB_GEN_FILES)
|
||||
gen/golden-files: \
|
||||
agent/unit/testdata/.gen-golden \
|
||||
cli/testdata/.gen-golden \
|
||||
coderd/insightsapi/.gen-golden \
|
||||
coderd/.gen-golden \
|
||||
coderd/notifications/.gen-golden \
|
||||
enterprise/cli/testdata/.gen-golden \
|
||||
enterprise/tailnet/testdata/.gen-golden \
|
||||
@@ -953,7 +953,7 @@ clean/golden-files:
|
||||
find \
|
||||
cli/testdata \
|
||||
coderd/notifications/testdata \
|
||||
coderd/insightsapi/testdata \
|
||||
coderd/testdata \
|
||||
enterprise/cli/testdata \
|
||||
enterprise/tailnet/testdata \
|
||||
helm/coder/tests/testdata \
|
||||
@@ -991,8 +991,8 @@ helm/provisioner/tests/testdata/.gen-golden: $(wildcard helm/provisioner/tests/t
|
||||
TZ=UTC go test ./helm/provisioner/tests -run=TestUpdateGoldenFiles -update
|
||||
touch "$@"
|
||||
|
||||
coderd/insightsapi/.gen-golden: $(wildcard coderd/insightsapi/testdata/*/*.golden) $(GO_SRC_FILES) $(wildcard coderd/insightsapi/*_test.go)
|
||||
TZ=UTC go test ./coderd/insightsapi -run="Test.*Golden$$" -update
|
||||
coderd/.gen-golden: $(wildcard coderd/testdata/*/*.golden) $(GO_SRC_FILES) $(wildcard coderd/*_test.go)
|
||||
TZ=UTC go test ./coderd -run="Test.*Golden$$" -update
|
||||
touch "$@"
|
||||
|
||||
coderd/notifications/.gen-golden: $(wildcard coderd/notifications/testdata/*/*.golden) $(GO_SRC_FILES) $(wildcard coderd/notifications/*_test.go)
|
||||
|
||||
+2
-2
@@ -1576,8 +1576,8 @@ func (a *agent) createTailnet(
|
||||
break
|
||||
}
|
||||
clog := a.logger.Named("speedtest").With(
|
||||
slog.F("remote", conn.RemoteAddr().String()),
|
||||
slog.F("local", conn.LocalAddr().String()))
|
||||
slog.F("remote", conn.RemoteAddr()),
|
||||
slog.F("local", conn.LocalAddr()))
|
||||
clog.Info(ctx, "accepted conn")
|
||||
wg.Add(1)
|
||||
closed := make(chan struct{})
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// TestReportConnectionEmpty tests that reportConnection() doesn't choke if given an empty IP string, which is what we
|
||||
// send if we cannot get the remote address.
|
||||
func TestReportConnectionEmpty(t *testing.T) {
|
||||
t.Parallel()
|
||||
connID := uuid.UUID{1}
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
uut := &agent{
|
||||
hardCtx: ctx,
|
||||
logger: logger,
|
||||
}
|
||||
disconnected := uut.reportConnection(connID, proto.Connection_TYPE_UNSPECIFIED, "")
|
||||
|
||||
require.Len(t, uut.reportConnections, 1)
|
||||
req0 := uut.reportConnections[0]
|
||||
require.Equal(t, proto.Connection_TYPE_UNSPECIFIED, req0.GetConnection().GetType())
|
||||
require.Equal(t, "", req0.GetConnection().Ip)
|
||||
require.Equal(t, connID[:], req0.GetConnection().GetId())
|
||||
require.Equal(t, proto.Connection_CONNECT, req0.GetConnection().GetAction())
|
||||
|
||||
disconnected(0, "because")
|
||||
require.Len(t, uut.reportConnections, 2)
|
||||
req1 := uut.reportConnections[1]
|
||||
require.Equal(t, proto.Connection_TYPE_UNSPECIFIED, req1.GetConnection().GetType())
|
||||
require.Equal(t, "", req1.GetConnection().Ip)
|
||||
require.Equal(t, connID[:], req1.GetConnection().GetId())
|
||||
require.Equal(t, proto.Connection_DISCONNECT, req1.GetConnection().GetAction())
|
||||
require.Equal(t, "because", req1.GetConnection().GetReason())
|
||||
}
|
||||
@@ -1039,6 +1039,10 @@ func (api *API) processUpdatedContainersLocked(ctx context.Context, updated code
|
||||
logger.Error(ctx, "inject subagent into container failed", slog.Error(err))
|
||||
dc.Error = err.Error()
|
||||
} else {
|
||||
// TODO(mafredri): Preserve the error from devcontainer
|
||||
// up if it was a lifecycle script error. Currently
|
||||
// this results in a brief flicker for the user if
|
||||
// injection is fast, as the error is shown then erased.
|
||||
dc.Error = ""
|
||||
}
|
||||
}
|
||||
@@ -1347,27 +1351,41 @@ func (api *API) CreateDevcontainer(workspaceFolder, configPath string, opts ...D
|
||||
upOptions := []DevcontainerCLIUpOptions{WithUpOutput(infoW, errW)}
|
||||
upOptions = append(upOptions, opts...)
|
||||
|
||||
_, err := api.dccli.Up(ctx, dc.WorkspaceFolder, configPath, upOptions...)
|
||||
if err != nil {
|
||||
containerID, upErr := api.dccli.Up(ctx, dc.WorkspaceFolder, configPath, upOptions...)
|
||||
if upErr != nil {
|
||||
// No need to log if the API is closing (context canceled), as this
|
||||
// is expected behavior when the API is shutting down.
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error(ctx, "devcontainer creation failed", slog.Error(err))
|
||||
if !errors.Is(upErr, context.Canceled) {
|
||||
logger.Error(ctx, "devcontainer creation failed", slog.Error(upErr))
|
||||
}
|
||||
|
||||
api.mu.Lock()
|
||||
dc = api.knownDevcontainers[dc.WorkspaceFolder]
|
||||
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusError
|
||||
dc.Error = err.Error()
|
||||
api.knownDevcontainers[dc.WorkspaceFolder] = dc
|
||||
api.recreateErrorTimes[dc.WorkspaceFolder] = api.clock.Now("agentcontainers", "recreate", "errorTimes")
|
||||
api.mu.Unlock()
|
||||
// If we don't have a container ID, the error is fatal, so we
|
||||
// should mark the devcontainer as errored and return.
|
||||
if containerID == "" {
|
||||
api.mu.Lock()
|
||||
dc = api.knownDevcontainers[dc.WorkspaceFolder]
|
||||
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusError
|
||||
dc.Error = upErr.Error()
|
||||
api.knownDevcontainers[dc.WorkspaceFolder] = dc
|
||||
api.recreateErrorTimes[dc.WorkspaceFolder] = api.clock.Now("agentcontainers", "recreate", "errorTimes")
|
||||
api.broadcastUpdatesLocked()
|
||||
api.mu.Unlock()
|
||||
|
||||
return xerrors.Errorf("start devcontainer: %w", err)
|
||||
return xerrors.Errorf("start devcontainer: %w", upErr)
|
||||
}
|
||||
|
||||
// If we have a container ID, it means the container was created
|
||||
// but a lifecycle script (e.g. postCreateCommand) failed. In this
|
||||
// case, we still want to refresh containers to pick up the new
|
||||
// container, inject the agent, and allow the user to debug the
|
||||
// issue. We store the error to surface it to the user.
|
||||
logger.Warn(ctx, "devcontainer created with errors (e.g. lifecycle script failure), container is available",
|
||||
slog.F("container_id", containerID),
|
||||
)
|
||||
} else {
|
||||
logger.Info(ctx, "devcontainer created successfully")
|
||||
}
|
||||
|
||||
logger.Info(ctx, "devcontainer created successfully")
|
||||
|
||||
api.mu.Lock()
|
||||
dc = api.knownDevcontainers[dc.WorkspaceFolder]
|
||||
// Update the devcontainer status to Running or Stopped based on the
|
||||
@@ -1376,13 +1394,18 @@ func (api *API) CreateDevcontainer(workspaceFolder, configPath string, opts ...D
|
||||
// to minimize the time between API consistency, we guess the status
|
||||
// based on the container state.
|
||||
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStopped
|
||||
if dc.Container != nil {
|
||||
if dc.Container.Running {
|
||||
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusRunning
|
||||
}
|
||||
if dc.Container != nil && dc.Container.Running {
|
||||
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusRunning
|
||||
}
|
||||
dc.Dirty = false
|
||||
dc.Error = ""
|
||||
if upErr != nil {
|
||||
// If there was a lifecycle script error but we have a container ID,
|
||||
// the container is running so we should set the status to Running.
|
||||
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusRunning
|
||||
dc.Error = upErr.Error()
|
||||
} else {
|
||||
dc.Error = ""
|
||||
}
|
||||
api.recreateSuccessTimes[dc.WorkspaceFolder] = api.clock.Now("agentcontainers", "recreate", "successTimes")
|
||||
api.knownDevcontainers[dc.WorkspaceFolder] = dc
|
||||
api.broadcastUpdatesLocked()
|
||||
@@ -1434,6 +1457,8 @@ func (api *API) markDevcontainerDirty(configPath string, modifiedAt time.Time) {
|
||||
|
||||
api.knownDevcontainers[dc.WorkspaceFolder] = dc
|
||||
}
|
||||
|
||||
api.broadcastUpdatesLocked()
|
||||
}
|
||||
|
||||
// cleanupSubAgents removes subagents that are no longer managed by
|
||||
|
||||
@@ -234,6 +234,8 @@ func (w *fakeWatcher) sendEventWaitNextCalled(ctx context.Context, event fsnotif
|
||||
// fakeSubAgentClient implements SubAgentClient for testing purposes.
|
||||
type fakeSubAgentClient struct {
|
||||
logger slog.Logger
|
||||
|
||||
mu sync.Mutex // Protects following.
|
||||
agents map[uuid.UUID]agentcontainers.SubAgent
|
||||
|
||||
listErrC chan error // If set, send to return error, close to return nil.
|
||||
@@ -254,6 +256,8 @@ func (m *fakeSubAgentClient) List(ctx context.Context) ([]agentcontainers.SubAge
|
||||
}
|
||||
}
|
||||
}
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
var agents []agentcontainers.SubAgent
|
||||
for _, agent := range m.agents {
|
||||
agents = append(agents, agent)
|
||||
@@ -283,6 +287,9 @@ func (m *fakeSubAgentClient) Create(ctx context.Context, agent agentcontainers.S
|
||||
return agentcontainers.SubAgent{}, xerrors.New("operating system must be set")
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
for _, a := range m.agents {
|
||||
if a.Name == agent.Name {
|
||||
return agentcontainers.SubAgent{}, &pq.Error{
|
||||
@@ -314,6 +321,8 @@ func (m *fakeSubAgentClient) Delete(ctx context.Context, id uuid.UUID) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if m.agents == nil {
|
||||
m.agents = make(map[uuid.UUID]agentcontainers.SubAgent)
|
||||
}
|
||||
@@ -1632,6 +1641,77 @@ func TestAPI(t *testing.T) {
|
||||
require.NotNil(t, response.Devcontainers[0].Container, "container should not be nil")
|
||||
})
|
||||
|
||||
// Verify that modifying a config file broadcasts the dirty status
|
||||
// over websocket immediately.
|
||||
t.Run("FileWatcherDirtyBroadcast", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
configPath := "/workspace/project/.devcontainer/devcontainer.json"
|
||||
fWatcher := newFakeWatcher(t)
|
||||
fLister := &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{
|
||||
{
|
||||
ID: "container-id",
|
||||
FriendlyName: "container-name",
|
||||
Running: true,
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspace/project",
|
||||
agentcontainers.DevcontainerConfigFileLabel: configPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mClock := quartz.NewMock(t)
|
||||
tickerTrap := mClock.Trap().TickerFunc("updaterLoop")
|
||||
|
||||
api := agentcontainers.NewAPI(
|
||||
slogtest.Make(t, nil).Leveled(slog.LevelDebug),
|
||||
agentcontainers.WithContainerCLI(fLister),
|
||||
agentcontainers.WithWatcher(fWatcher),
|
||||
agentcontainers.WithClock(mClock),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
srv := httptest.NewServer(api.Routes())
|
||||
defer srv.Close()
|
||||
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
wsConn, resp, err := websocket.Dial(ctx, "ws"+strings.TrimPrefix(srv.URL, "http")+"/watch", nil)
|
||||
require.NoError(t, err)
|
||||
if resp != nil && resp.Body != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
defer wsConn.Close(websocket.StatusNormalClosure, "")
|
||||
|
||||
// Read and discard initial state.
|
||||
_, _, err = wsConn.Read(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
fWatcher.waitNext(ctx)
|
||||
fWatcher.sendEventWaitNextCalled(ctx, fsnotify.Event{
|
||||
Name: configPath,
|
||||
Op: fsnotify.Write,
|
||||
})
|
||||
|
||||
// Verify dirty status is broadcast without advancing the clock.
|
||||
_, msg, err := wsConn.Read(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
var response codersdk.WorkspaceAgentListContainersResponse
|
||||
err = json.Unmarshal(msg, &response)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, response.Devcontainers, 1)
|
||||
assert.True(t, response.Devcontainers[0].Dirty,
|
||||
"devcontainer should be marked as dirty after config file modification")
|
||||
})
|
||||
|
||||
t.Run("SubAgentLifecycle", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -2070,6 +2150,122 @@ func TestAPI(t *testing.T) {
|
||||
require.Equal(t, "", response.Devcontainers[0].Error)
|
||||
})
|
||||
|
||||
// This test verifies that when devcontainer up fails due to a
|
||||
// lifecycle script error (such as postCreateCommand failing) but the
|
||||
// container was successfully created, we still proceed with the
|
||||
// devcontainer. The container should be available for use and the
|
||||
// agent should be injected.
|
||||
t.Run("DuringUpWithContainerID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitMedium)
|
||||
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
mClock = quartz.NewMock(t)
|
||||
|
||||
testContainer = codersdk.WorkspaceAgentContainer{
|
||||
ID: "test-container-id",
|
||||
FriendlyName: "test-container",
|
||||
Image: "test-image",
|
||||
Running: true,
|
||||
CreatedAt: time.Now(),
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspaces/project",
|
||||
agentcontainers.DevcontainerConfigFileLabel: "/workspaces/project/.devcontainer/devcontainer.json",
|
||||
},
|
||||
}
|
||||
fCCLI = &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{testContainer},
|
||||
},
|
||||
arch: "amd64",
|
||||
}
|
||||
fDCCLI = &fakeDevcontainerCLI{
|
||||
upID: testContainer.ID,
|
||||
upErrC: make(chan func() error, 1),
|
||||
}
|
||||
fSAC = &fakeSubAgentClient{
|
||||
logger: logger.Named("fakeSubAgentClient"),
|
||||
}
|
||||
|
||||
testDevcontainer = codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: uuid.New(),
|
||||
Name: "test-devcontainer",
|
||||
WorkspaceFolder: "/workspaces/project",
|
||||
ConfigPath: "/workspaces/project/.devcontainer/devcontainer.json",
|
||||
Status: codersdk.WorkspaceAgentDevcontainerStatusStopped,
|
||||
}
|
||||
)
|
||||
|
||||
mClock.Set(time.Now()).MustWait(ctx)
|
||||
tickerTrap := mClock.Trap().TickerFunc("updaterLoop")
|
||||
nowRecreateSuccessTrap := mClock.Trap().Now("recreate", "successTimes")
|
||||
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithClock(mClock),
|
||||
agentcontainers.WithContainerCLI(fCCLI),
|
||||
agentcontainers.WithDevcontainerCLI(fDCCLI),
|
||||
agentcontainers.WithDevcontainers(
|
||||
[]codersdk.WorkspaceAgentDevcontainer{testDevcontainer},
|
||||
[]codersdk.WorkspaceAgentScript{{ID: testDevcontainer.ID, LogSourceID: uuid.New()}},
|
||||
),
|
||||
agentcontainers.WithSubAgentClient(fSAC),
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
api.Start()
|
||||
defer func() {
|
||||
close(fDCCLI.upErrC)
|
||||
api.Close()
|
||||
}()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
// Send a recreate request to trigger devcontainer up.
|
||||
req := httptest.NewRequest(http.MethodPost, "/devcontainers/"+testDevcontainer.ID.String()+"/recreate", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
require.Equal(t, http.StatusAccepted, rec.Code)
|
||||
|
||||
// Simulate a lifecycle script failure. The devcontainer CLI
|
||||
// will return an error but also provide a container ID since
|
||||
// the container was created before the script failed.
|
||||
simulatedError := xerrors.New("postCreateCommand failed with exit code 1")
|
||||
testutil.RequireSend(ctx, t, fDCCLI.upErrC, func() error { return simulatedError })
|
||||
|
||||
// Wait for the recreate operation to complete. We expect it to
|
||||
// record a success time because the container was created.
|
||||
nowRecreateSuccessTrap.MustWait(ctx).MustRelease(ctx)
|
||||
nowRecreateSuccessTrap.Close()
|
||||
|
||||
// Advance the clock to run the devcontainer state update routine.
|
||||
_, aw := mClock.AdvanceNext()
|
||||
aw.MustWait(ctx)
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
rec = httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var response codersdk.WorkspaceAgentListContainersResponse
|
||||
err := json.NewDecoder(rec.Body).Decode(&response)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that the devcontainer is running and has the container
|
||||
// associated with it despite the lifecycle script error. The
|
||||
// error may be cleared during refresh if agent injection
|
||||
// succeeds, but the important thing is that the container is
|
||||
// available for use.
|
||||
require.Len(t, response.Devcontainers, 1)
|
||||
assert.Equal(t, codersdk.WorkspaceAgentDevcontainerStatusRunning, response.Devcontainers[0].Status)
|
||||
require.NotNil(t, response.Devcontainers[0].Container)
|
||||
assert.Equal(t, testContainer.ID, response.Devcontainers[0].Container.ID)
|
||||
})
|
||||
|
||||
t.Run("DuringInjection", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -263,11 +263,14 @@ func (d *devcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath st
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
_, err2 := parseDevcontainerCLILastLine[devcontainerCLIResult](ctx, logger, stdoutBuf.Bytes())
|
||||
result, err2 := parseDevcontainerCLILastLine[devcontainerCLIResult](ctx, logger, stdoutBuf.Bytes())
|
||||
if err2 != nil {
|
||||
err = errors.Join(err, err2)
|
||||
}
|
||||
return "", err
|
||||
// Return the container ID if available, even if there was an error.
|
||||
// This can happen if the container was created successfully but a
|
||||
// lifecycle script (e.g. postCreateCommand) failed.
|
||||
return result.ContainerID, err
|
||||
}
|
||||
|
||||
result, err := parseDevcontainerCLILastLine[devcontainerCLIResult](ctx, logger, stdoutBuf.Bytes())
|
||||
@@ -275,6 +278,13 @@ func (d *devcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath st
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Check if the result indicates an error (e.g. lifecycle script failure)
|
||||
// but still has a container ID, allowing the caller to potentially
|
||||
// continue with the container that was created.
|
||||
if err := result.Err(); err != nil {
|
||||
return result.ContainerID, err
|
||||
}
|
||||
|
||||
return result.ContainerID, nil
|
||||
}
|
||||
|
||||
@@ -394,7 +404,10 @@ func parseDevcontainerCLILastLine[T any](ctx context.Context, logger slog.Logger
|
||||
type devcontainerCLIResult struct {
|
||||
Outcome string `json:"outcome"` // "error", "success".
|
||||
|
||||
// The following fields are set if outcome is success.
|
||||
// The following fields are typically set if outcome is success, but
|
||||
// ContainerID may also be present when outcome is error if the
|
||||
// container was created but a lifecycle script (e.g. postCreateCommand)
|
||||
// failed.
|
||||
ContainerID string `json:"containerId"`
|
||||
RemoteUser string `json:"remoteUser"`
|
||||
RemoteWorkspaceFolder string `json:"remoteWorkspaceFolder"`
|
||||
@@ -404,18 +417,6 @@ type devcontainerCLIResult struct {
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
func (r *devcontainerCLIResult) UnmarshalJSON(data []byte) error {
|
||||
type wrapperResult devcontainerCLIResult
|
||||
|
||||
var wrappedResult wrapperResult
|
||||
if err := json.Unmarshal(data, &wrappedResult); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*r = devcontainerCLIResult(wrappedResult)
|
||||
return r.Err()
|
||||
}
|
||||
|
||||
func (r devcontainerCLIResult) Err() error {
|
||||
if r.Outcome == "success" {
|
||||
return nil
|
||||
|
||||
@@ -42,56 +42,63 @@ func TestDevcontainerCLI_ArgsAndParsing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
logFile string
|
||||
workspace string
|
||||
config string
|
||||
opts []agentcontainers.DevcontainerCLIUpOptions
|
||||
wantArgs string
|
||||
wantError bool
|
||||
name string
|
||||
logFile string
|
||||
workspace string
|
||||
config string
|
||||
opts []agentcontainers.DevcontainerCLIUpOptions
|
||||
wantArgs string
|
||||
wantError bool
|
||||
wantContainerID bool // If true, expect a container ID even when wantError is true.
|
||||
}{
|
||||
{
|
||||
name: "success",
|
||||
logFile: "up.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: false,
|
||||
name: "success",
|
||||
logFile: "up.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: false,
|
||||
wantContainerID: true,
|
||||
},
|
||||
{
|
||||
name: "success with config",
|
||||
logFile: "up.log",
|
||||
workspace: "/test/workspace",
|
||||
config: "/test/config.json",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace --config /test/config.json",
|
||||
wantError: false,
|
||||
name: "success with config",
|
||||
logFile: "up.log",
|
||||
workspace: "/test/workspace",
|
||||
config: "/test/config.json",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace --config /test/config.json",
|
||||
wantError: false,
|
||||
wantContainerID: true,
|
||||
},
|
||||
{
|
||||
name: "already exists",
|
||||
logFile: "up-already-exists.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: false,
|
||||
name: "already exists",
|
||||
logFile: "up-already-exists.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: false,
|
||||
wantContainerID: true,
|
||||
},
|
||||
{
|
||||
name: "docker error",
|
||||
logFile: "up-error-docker.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: true,
|
||||
name: "docker error",
|
||||
logFile: "up-error-docker.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: true,
|
||||
wantContainerID: false,
|
||||
},
|
||||
{
|
||||
name: "bad outcome",
|
||||
logFile: "up-error-bad-outcome.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: true,
|
||||
name: "bad outcome",
|
||||
logFile: "up-error-bad-outcome.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: true,
|
||||
wantContainerID: false,
|
||||
},
|
||||
{
|
||||
name: "does not exist",
|
||||
logFile: "up-error-does-not-exist.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: true,
|
||||
name: "does not exist",
|
||||
logFile: "up-error-does-not-exist.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: true,
|
||||
wantContainerID: false,
|
||||
},
|
||||
{
|
||||
name: "with remove existing container",
|
||||
@@ -100,8 +107,21 @@ func TestDevcontainerCLI_ArgsAndParsing(t *testing.T) {
|
||||
opts: []agentcontainers.DevcontainerCLIUpOptions{
|
||||
agentcontainers.WithRemoveExistingContainer(),
|
||||
},
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace --remove-existing-container",
|
||||
wantError: false,
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace --remove-existing-container",
|
||||
wantError: false,
|
||||
wantContainerID: true,
|
||||
},
|
||||
{
|
||||
// This test verifies that when a lifecycle script like
|
||||
// postCreateCommand fails, the CLI returns both an error
|
||||
// and a container ID. The caller can then proceed with
|
||||
// agent injection into the created container.
|
||||
name: "lifecycle script failure with container",
|
||||
logFile: "up-error-lifecycle-script.log",
|
||||
workspace: "/test/workspace",
|
||||
wantArgs: "up --log-format json --workspace-folder /test/workspace",
|
||||
wantError: true,
|
||||
wantContainerID: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -122,10 +142,13 @@ func TestDevcontainerCLI_ArgsAndParsing(t *testing.T) {
|
||||
containerID, err := dccli.Up(ctx, tt.workspace, tt.config, tt.opts...)
|
||||
if tt.wantError {
|
||||
assert.Error(t, err, "want error")
|
||||
assert.Empty(t, containerID, "expected empty container ID")
|
||||
} else {
|
||||
assert.NoError(t, err, "want no error")
|
||||
}
|
||||
if tt.wantContainerID {
|
||||
assert.NotEmpty(t, containerID, "expected non-empty container ID")
|
||||
} else {
|
||||
assert.Empty(t, containerID, "expected empty container ID")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
Generated
Vendored
+147
File diff suppressed because one or more lines are too long
@@ -391,10 +391,19 @@ func (s *Server) sessionHandler(session ssh.Session) {
|
||||
env := session.Environ()
|
||||
magicType, magicTypeRaw, env := extractMagicSessionType(env)
|
||||
|
||||
// It's not safe to assume RemoteAddr() returns a non-nil value. slog.F usage is fine because it correctly
|
||||
// handles nil.
|
||||
// c.f. https://github.com/coder/internal/issues/1143
|
||||
remoteAddr := session.RemoteAddr()
|
||||
remoteAddrString := ""
|
||||
if remoteAddr != nil {
|
||||
remoteAddrString = remoteAddr.String()
|
||||
}
|
||||
|
||||
if !s.trackSession(session, true) {
|
||||
reason := "unable to accept new session, server is closing"
|
||||
// Report connection attempt even if we couldn't accept it.
|
||||
disconnected := s.config.ReportConnection(id, magicType, session.RemoteAddr().String())
|
||||
disconnected := s.config.ReportConnection(id, magicType, remoteAddrString)
|
||||
defer disconnected(1, reason)
|
||||
|
||||
logger.Info(ctx, reason)
|
||||
@@ -429,7 +438,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
|
||||
scr := &sessionCloseTracker{Session: session}
|
||||
session = scr
|
||||
|
||||
disconnected := s.config.ReportConnection(id, magicType, session.RemoteAddr().String())
|
||||
disconnected := s.config.ReportConnection(id, magicType, remoteAddrString)
|
||||
defer func() {
|
||||
disconnected(scr.exitCode(), reason)
|
||||
}()
|
||||
|
||||
@@ -176,7 +176,7 @@ func (x *x11Forwarder) listenForConnections(
|
||||
var originPort uint32
|
||||
|
||||
if tcpConn, ok := conn.(*net.TCPConn); ok {
|
||||
if tcpAddr, ok := tcpConn.LocalAddr().(*net.TCPAddr); ok {
|
||||
if tcpAddr, ok := tcpConn.LocalAddr().(*net.TCPAddr); ok && tcpAddr != nil {
|
||||
originAddr = tcpAddr.IP.String()
|
||||
// #nosec G115 - Safe conversion as TCP port numbers are within uint32 range (0-65535)
|
||||
originPort = uint32(tcpAddr.Port)
|
||||
|
||||
@@ -74,11 +74,21 @@ func (s *Server) Serve(ctx, hardCtx context.Context, l net.Listener) (retErr err
|
||||
break
|
||||
}
|
||||
clog := s.logger.With(
|
||||
slog.F("remote", conn.RemoteAddr().String()),
|
||||
slog.F("local", conn.LocalAddr().String()))
|
||||
slog.F("remote", conn.RemoteAddr()),
|
||||
slog.F("local", conn.LocalAddr()))
|
||||
clog.Info(ctx, "accepted conn")
|
||||
|
||||
// It's not safe to assume RemoteAddr() returns a non-nil value. slog.F usage is fine because it correctly
|
||||
// handles nil.
|
||||
// c.f. https://github.com/coder/internal/issues/1143
|
||||
remoteAddr := conn.RemoteAddr()
|
||||
remoteAddrString := ""
|
||||
if remoteAddr != nil {
|
||||
remoteAddrString = remoteAddr.String()
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
disconnected := s.reportConnection(uuid.New(), conn.RemoteAddr().String())
|
||||
disconnected := s.reportConnection(uuid.New(), remoteAddrString)
|
||||
closed := make(chan struct{})
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -106,6 +106,9 @@ var _ OutputFormat = &tableFormat{}
|
||||
//
|
||||
// defaultColumns is optional and specifies the default columns to display. If
|
||||
// not specified, all columns are displayed by default.
|
||||
//
|
||||
// If the data is empty, an empty string is returned. Callers should check for
|
||||
// this and provide an appropriate message to the user.
|
||||
func TableFormat(out any, defaultColumns []string) OutputFormat {
|
||||
v := reflect.Indirect(reflect.ValueOf(out))
|
||||
if v.Kind() != reflect.Slice {
|
||||
|
||||
@@ -180,6 +180,12 @@ func DisplayTable(out any, sort string, filterColumns []string) (string, error)
|
||||
func renderTable(out any, sort string, headers table.Row, filterColumns []string) (string, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(out))
|
||||
|
||||
// Return empty string for empty data. Callers should check for this
|
||||
// and provide an appropriate message to the user.
|
||||
if v.Kind() == reflect.Slice && v.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
headers = filterHeaders(headers, filterColumns)
|
||||
columnConfigs := createColumnConfigs(headers, filterColumns)
|
||||
|
||||
|
||||
@@ -472,6 +472,15 @@ alice 1
|
||||
require.NoError(t, err)
|
||||
compareTables(t, expected, out)
|
||||
})
|
||||
|
||||
t.Run("Empty", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var in []tableTest4
|
||||
out, err := cliui.DisplayTable(in, "", nil)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, out)
|
||||
})
|
||||
}
|
||||
|
||||
// compareTables normalizes the incoming table lines
|
||||
|
||||
@@ -90,7 +90,6 @@ func TestExpRpty(t *testing.T) {
|
||||
wantLabel := "coder.devcontainers.TestExpRpty.Container"
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
pool, err := dockertest.NewPool("")
|
||||
require.NoError(t, err, "Could not connect to docker")
|
||||
ct, err := pool.RunWithOptions(&dockertest.RunOptions{
|
||||
@@ -128,14 +127,15 @@ func TestExpRpty(t *testing.T) {
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
cmdDone := tGo(t, func() {
|
||||
err := inv.WithContext(ctx).Run()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
pty.ExpectMatch(" #")
|
||||
pty.ExpectMatchContext(ctx, " #")
|
||||
pty.WriteLine("hostname")
|
||||
pty.ExpectMatch(ct.Container.Config.Hostname)
|
||||
pty.ExpectMatchContext(ctx, ct.Container.Config.Hostname)
|
||||
pty.WriteLine("exit")
|
||||
<-cmdDone
|
||||
})
|
||||
|
||||
+10
-8
@@ -1559,6 +1559,15 @@ func (r *RootCmd) scaletestDashboard() *serpent.Command {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
}
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
outputs, err := output.parse()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse --output flags")
|
||||
}
|
||||
reg := prometheus.NewRegistry()
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
|
||||
defer func() {
|
||||
// Allow time for traces to flush even if command context is
|
||||
// canceled. This is a no-op if tracing is not enabled.
|
||||
@@ -1570,14 +1579,7 @@ func (r *RootCmd) scaletestDashboard() *serpent.Command {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
|
||||
<-time.After(prometheusFlags.Wait)
|
||||
}()
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
outputs, err := output.parse()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse --output flags")
|
||||
}
|
||||
reg := prometheus.NewRegistry()
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
|
||||
metrics := dashboard.NewMetrics(reg)
|
||||
|
||||
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
|
||||
|
||||
@@ -84,14 +84,6 @@ func (r *RootCmd) scaletestPrebuilds() *serpent.Command {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
|
||||
if err := closeTracing(ctx); err != nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
|
||||
}
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
|
||||
<-time.After(prometheusFlags.Wait)
|
||||
}()
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
@@ -101,6 +93,15 @@ func (r *RootCmd) scaletestPrebuilds() *serpent.Command {
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
|
||||
defer func() {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
|
||||
if err := closeTracing(ctx); err != nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
|
||||
}
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
|
||||
<-time.After(prometheusFlags.Wait)
|
||||
}()
|
||||
|
||||
err = client.PutPrebuildsSettings(ctx, codersdk.PrebuildsSettings{
|
||||
ReconciliationPaused: true,
|
||||
})
|
||||
|
||||
+6
-6
@@ -139,7 +139,12 @@ func (r *RootCmd) list() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(res) == 0 && formatter.FormatID() != cliui.JSONFormat().ID() {
|
||||
out, err := formatter.Format(inv.Context(), res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
pretty.Fprintf(inv.Stderr, cliui.DefaultStyles.Prompt, "No workspaces found! Create one:\n")
|
||||
_, _ = fmt.Fprintln(inv.Stderr)
|
||||
_, _ = fmt.Fprintln(inv.Stderr, " "+pretty.Sprint(cliui.DefaultStyles.Code, "coder create <name>"))
|
||||
@@ -147,11 +152,6 @@ func (r *RootCmd) list() *serpent.Command {
|
||||
return nil
|
||||
}
|
||||
|
||||
out, err := formatter.Format(inv.Context(), res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
@@ -170,6 +170,11 @@ func (r *RootCmd) listOrganizationMembers(orgContext *OrganizationContext) *serp
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No organization members found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
@@ -92,6 +92,11 @@ func (r *RootCmd) showOrganizationRoles(orgContext *OrganizationContext) *serpen
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No organization roles found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
@@ -110,6 +110,11 @@ func (r *RootCmd) provisionerJobsList() *serpent.Command {
|
||||
return xerrors.Errorf("display provisioner daemons: %w", err)
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No provisioner jobs found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stdout, out)
|
||||
|
||||
return nil
|
||||
|
||||
+5
-5
@@ -74,11 +74,6 @@ func (r *RootCmd) provisionerList() *serpent.Command {
|
||||
return xerrors.Errorf("list provisioner daemons: %w", err)
|
||||
}
|
||||
|
||||
if len(daemons) == 0 {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, "No provisioner daemons found")
|
||||
return nil
|
||||
}
|
||||
|
||||
var rows []provisionerDaemonRow
|
||||
for _, daemon := range daemons {
|
||||
rows = append(rows, provisionerDaemonRow{
|
||||
@@ -92,6 +87,11 @@ func (r *RootCmd) provisionerList() *serpent.Command {
|
||||
return xerrors.Errorf("display provisioner daemons: %w", err)
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No provisioner daemons found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stdout, out)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -129,6 +129,11 @@ func (r *RootCmd) scheduleShow() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No schedules found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
+3
-3
@@ -2052,7 +2052,6 @@ func TestSSH_Container(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
pool, err := dockertest.NewPool("")
|
||||
require.NoError(t, err, "Could not connect to docker")
|
||||
ct, err := pool.RunWithOptions(&dockertest.RunOptions{
|
||||
@@ -2087,14 +2086,15 @@ func TestSSH_Container(t *testing.T) {
|
||||
clitest.SetupConfig(t, client, root)
|
||||
ptty := ptytest.New(t).Attach(inv)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
cmdDone := tGo(t, func() {
|
||||
err := inv.WithContext(ctx).Run()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ptty.ExpectMatch(" #")
|
||||
ptty.ExpectMatchContext(ctx, " #")
|
||||
ptty.WriteLine("hostname")
|
||||
ptty.ExpectMatch(ct.Container.Config.Hostname)
|
||||
ptty.ExpectMatchContext(ctx, ct.Container.Config.Hostname)
|
||||
ptty.WriteLine("exit")
|
||||
<-cmdDone
|
||||
})
|
||||
|
||||
+4
-6
@@ -157,12 +157,6 @@ func (r *RootCmd) taskList() *serpent.Command {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If no rows and not JSON, show a friendly message.
|
||||
if len(tasks) == 0 && formatter.FormatID() != cliui.JSONFormat().ID() {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "No tasks found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
rows := make([]taskListRow, len(tasks))
|
||||
now := time.Now()
|
||||
for i := range tasks {
|
||||
@@ -173,6 +167,10 @@ func (r *RootCmd) taskList() *serpent.Command {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("format tasks: %w", err)
|
||||
}
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No tasks found.")
|
||||
return nil
|
||||
}
|
||||
_, _ = fmt.Fprintln(inv.Stdout, out)
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -59,6 +59,11 @@ func (r *RootCmd) taskLogs() *serpent.Command {
|
||||
return xerrors.Errorf("format task logs: %w", err)
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No task logs found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stdout, out)
|
||||
return nil
|
||||
},
|
||||
|
||||
+6
-6
@@ -30,18 +30,18 @@ func (r *RootCmd) templateList() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(templates) == 0 {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "%s No templates found! Create one:\n\n", Caret)
|
||||
_, _ = fmt.Fprintln(inv.Stderr, color.HiMagentaString(" $ coder templates push <directory>\n"))
|
||||
return nil
|
||||
}
|
||||
|
||||
rows := templatesToRows(templates...)
|
||||
out, err := formatter.Format(inv.Context(), rows)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "%s No templates found! Create one:\n\n", Caret)
|
||||
_, _ = fmt.Fprintln(inv.Stderr, color.HiMagentaString(" $ coder templates push <directory>\n"))
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
@@ -106,7 +106,7 @@ func (r *RootCmd) templatePresetsList() *serpent.Command {
|
||||
if len(presets) == 0 {
|
||||
cliui.Infof(
|
||||
inv.Stdout,
|
||||
"No presets found for template %q and template-version %q.\n", template.Name, version.Name,
|
||||
"No presets found for template %q and template-version %q.", template.Name, version.Name,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func (r *RootCmd) templatePresetsList() *serpent.Command {
|
||||
if formatter.FormatID() == "table" {
|
||||
cliui.Infof(
|
||||
inv.Stdout,
|
||||
"Showing presets for template %q and template version %q.\n", template.Name, version.Name,
|
||||
"Showing presets for template %q and template version %q.", template.Name, version.Name,
|
||||
)
|
||||
}
|
||||
rows := templatePresetsToRows(presets...)
|
||||
@@ -124,6 +124,11 @@ func (r *RootCmd) templatePresetsList() *serpent.Command {
|
||||
return xerrors.Errorf("render table: %w", err)
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No template presets found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
@@ -121,6 +121,11 @@ func (r *RootCmd) templateVersionsList() *serpent.Command {
|
||||
return xerrors.Errorf("render table: %w", err)
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No template versions found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
+38
@@ -118,12 +118,23 @@ AI BRIDGE OPTIONS:
|
||||
requests (requires the "oauth2" and "mcp-server-http" experiments to
|
||||
be enabled).
|
||||
|
||||
--aibridge-max-concurrency int, $CODER_AIBRIDGE_MAX_CONCURRENCY (default: 0)
|
||||
Maximum number of concurrent AI Bridge requests. Set to 0 to disable
|
||||
(unlimited).
|
||||
|
||||
--aibridge-openai-base-url string, $CODER_AIBRIDGE_OPENAI_BASE_URL (default: https://api.openai.com/v1/)
|
||||
The base URL of the OpenAI API.
|
||||
|
||||
--aibridge-openai-key string, $CODER_AIBRIDGE_OPENAI_KEY
|
||||
The key to authenticate against the OpenAI API.
|
||||
|
||||
--aibridge-rate-limit int, $CODER_AIBRIDGE_RATE_LIMIT (default: 0)
|
||||
Maximum number of AI Bridge requests per rate window. Set to 0 to
|
||||
disable rate limiting.
|
||||
|
||||
--aibridge-rate-window duration, $CODER_AIBRIDGE_RATE_WINDOW (default: 1m)
|
||||
Duration of the rate limiting window for AI Bridge requests.
|
||||
|
||||
CLIENT OPTIONS:
|
||||
These options change the behavior of how clients interact with the Coder.
|
||||
Clients include the Coder CLI, Coder Desktop, IDE extensions, and the web UI.
|
||||
@@ -696,6 +707,33 @@ updating, and deleting workspace resources.
|
||||
Number of provisioner daemons to create on start. If builds are stuck
|
||||
in queued state for a long time, consider increasing this.
|
||||
|
||||
RETENTION OPTIONS:
|
||||
Configure data retention policies for various database tables. Retention
|
||||
policies automatically purge old data to reduce database size and improve
|
||||
performance. Setting a retention duration to 0 disables automatic purging for
|
||||
that data type.
|
||||
|
||||
--api-keys-retention duration, $CODER_API_KEYS_RETENTION (default: 7d)
|
||||
How long expired API keys are retained before being deleted. Keeping
|
||||
expired keys allows the backend to return a more helpful error when a
|
||||
user tries to use an expired key. Set to 0 to disable automatic
|
||||
deletion of expired keys.
|
||||
|
||||
--audit-logs-retention duration, $CODER_AUDIT_LOGS_RETENTION (default: 0)
|
||||
How long audit log entries are retained. Set to 0 to disable (keep
|
||||
indefinitely). We advise keeping audit logs for at least a year, and
|
||||
in accordance with your compliance requirements.
|
||||
|
||||
--connection-logs-retention duration, $CODER_CONNECTION_LOGS_RETENTION (default: 0)
|
||||
How long connection log entries are retained. Set to 0 to disable
|
||||
(keep indefinitely).
|
||||
|
||||
--workspace-agent-logs-retention duration, $CODER_WORKSPACE_AGENT_LOGS_RETENTION (default: 7d)
|
||||
How long workspace agent logs are retained. Logs from non-latest
|
||||
builds are deleted if the agent hasn't connected within this period.
|
||||
Logs from the latest build are always retained. Set to 0 to disable
|
||||
automatic deletion.
|
||||
|
||||
TELEMETRY OPTIONS:
|
||||
Telemetry is critical to our ability to improve Coder. We strip all personal
|
||||
information before sending data to our servers. Please only disable telemetry
|
||||
|
||||
+35
-13
@@ -720,25 +720,12 @@ aibridge:
|
||||
# The base URL of the OpenAI API.
|
||||
# (default: https://api.openai.com/v1/, type: string)
|
||||
openai_base_url: https://api.openai.com/v1/
|
||||
# The key to authenticate against the OpenAI API.
|
||||
# (default: <unset>, type: string)
|
||||
openai_key: ""
|
||||
# The base URL of the Anthropic API.
|
||||
# (default: https://api.anthropic.com/, type: string)
|
||||
anthropic_base_url: https://api.anthropic.com/
|
||||
# The key to authenticate against the Anthropic API.
|
||||
# (default: <unset>, type: string)
|
||||
anthropic_key: ""
|
||||
# The AWS Bedrock API region.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_region: ""
|
||||
# The access key to authenticate against the AWS Bedrock API.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_access_key: ""
|
||||
# The access key secret to use with the access key to authenticate against the AWS
|
||||
# Bedrock API.
|
||||
# (default: <unset>, type: string)
|
||||
bedrock_access_key_secret: ""
|
||||
# The model to use when making requests to the AWS Bedrock API.
|
||||
# (default: global.anthropic.claude-sonnet-4-5-20250929-v1:0, type: string)
|
||||
bedrock_model: global.anthropic.claude-sonnet-4-5-20250929-v1:0
|
||||
@@ -755,3 +742,38 @@ aibridge:
|
||||
# (token, prompt, tool use).
|
||||
# (default: 60d, type: duration)
|
||||
retention: 1440h0m0s
|
||||
# Maximum number of concurrent AI Bridge requests. Set to 0 to disable
|
||||
# (unlimited).
|
||||
# (default: 0, type: int)
|
||||
max_concurrency: 0
|
||||
# Maximum number of AI Bridge requests per rate window. Set to 0 to disable rate
|
||||
# limiting.
|
||||
# (default: 0, type: int)
|
||||
rate_limit: 0
|
||||
# Duration of the rate limiting window for AI Bridge requests.
|
||||
# (default: 1m, type: duration)
|
||||
rate_window: 1m0s
|
||||
# Configure data retention policies for various database tables. Retention
|
||||
# policies automatically purge old data to reduce database size and improve
|
||||
# performance. Setting a retention duration to 0 disables automatic purging for
|
||||
# that data type.
|
||||
retention:
|
||||
# How long audit log entries are retained. Set to 0 to disable (keep
|
||||
# indefinitely). We advise keeping audit logs for at least a year, and in
|
||||
# accordance with your compliance requirements.
|
||||
# (default: 0, type: duration)
|
||||
audit_logs: 0s
|
||||
# How long connection log entries are retained. Set to 0 to disable (keep
|
||||
# indefinitely).
|
||||
# (default: 0, type: duration)
|
||||
connection_logs: 0s
|
||||
# How long expired API keys are retained before being deleted. Keeping expired
|
||||
# keys allows the backend to return a more helpful error when a user tries to use
|
||||
# an expired key. Set to 0 to disable automatic deletion of expired keys.
|
||||
# (default: 7d, type: duration)
|
||||
api_keys: 168h0m0s
|
||||
# How long workspace agent logs are retained. Logs from non-latest builds are
|
||||
# deleted if the agent hasn't connected within this period. Logs from the latest
|
||||
# build are always retained. Set to 0 to disable automatic deletion.
|
||||
# (default: 7d, type: duration)
|
||||
workspace_agent_logs: 168h0m0s
|
||||
|
||||
+5
-7
@@ -246,13 +246,6 @@ func (r *RootCmd) listTokens() *serpent.Command {
|
||||
return xerrors.Errorf("list tokens: %w", err)
|
||||
}
|
||||
|
||||
if len(tokens) == 0 {
|
||||
cliui.Infof(
|
||||
inv.Stdout,
|
||||
"No tokens found.\n",
|
||||
)
|
||||
}
|
||||
|
||||
displayTokens = make([]tokenListRow, len(tokens))
|
||||
|
||||
for i, token := range tokens {
|
||||
@@ -264,6 +257,11 @@ func (r *RootCmd) listTokens() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Info(inv.Stderr, "No tokens found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
@@ -34,6 +34,7 @@ func TestTokens(t *testing.T) {
|
||||
clitest.SetupConfig(t, client, root)
|
||||
buf := new(bytes.Buffer)
|
||||
inv.Stdout = buf
|
||||
inv.Stderr = buf
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
res := buf.String()
|
||||
|
||||
@@ -58,6 +58,11 @@ func (r *RootCmd) userList() *serpent.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if out == "" {
|
||||
cliui.Infof(inv.Stderr, "No users found.")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
|
||||
@@ -1680,8 +1680,8 @@ func TestTasksNotification(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, workspaceAgent.Apps, 1)
|
||||
require.GreaterOrEqual(t, len(workspaceAgent.Apps[0].Statuses), 1)
|
||||
latestStatusIndex := len(workspaceAgent.Apps[0].Statuses) - 1
|
||||
require.Equal(t, tc.newAppStatus, workspaceAgent.Apps[0].Statuses[latestStatusIndex].State)
|
||||
// Statuses are ordered by created_at DESC, so the first element is the latest.
|
||||
require.Equal(t, tc.newAppStatus, workspaceAgent.Apps[0].Statuses[0].State)
|
||||
|
||||
if tc.isNotificationSent {
|
||||
// Then: A notification is sent to the workspace owner (memberUser)
|
||||
|
||||
Generated
+36
-2
@@ -1800,7 +1800,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Organizations"
|
||||
"Enterprise"
|
||||
],
|
||||
"summary": "Add new license",
|
||||
"operationId": "add-new-license",
|
||||
@@ -1836,7 +1836,7 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Organizations"
|
||||
"Enterprise"
|
||||
],
|
||||
"summary": "Update license entitlements",
|
||||
"operationId": "update-license-entitlements",
|
||||
@@ -11877,9 +11877,19 @@ const docTemplate = `{
|
||||
"inject_coder_mcp_tools": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"max_concurrency": {
|
||||
"description": "Overload protection settings.",
|
||||
"type": "integer"
|
||||
},
|
||||
"openai": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeOpenAIConfig"
|
||||
},
|
||||
"rate_limit": {
|
||||
"type": "integer"
|
||||
},
|
||||
"rate_window": {
|
||||
"type": "integer"
|
||||
},
|
||||
"retention": {
|
||||
"type": "integer"
|
||||
}
|
||||
@@ -14302,6 +14312,9 @@ const docTemplate = `{
|
||||
"redirect_to_access_url": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"retention": {
|
||||
"$ref": "#/definitions/codersdk.RetentionConfig"
|
||||
},
|
||||
"scim_api_key": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -17728,6 +17741,27 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.RetentionConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"api_keys": {
|
||||
"description": "APIKeys controls how long expired API keys are retained before being deleted.\nKeys are only deleted if they have been expired for at least this duration.\nDefaults to 7 days to preserve existing behavior.",
|
||||
"type": "integer"
|
||||
},
|
||||
"audit_logs": {
|
||||
"description": "AuditLogs controls how long audit log entries are retained.\nSet to 0 to disable (keep indefinitely).",
|
||||
"type": "integer"
|
||||
},
|
||||
"connection_logs": {
|
||||
"description": "ConnectionLogs controls how long connection log entries are retained.\nSet to 0 to disable (keep indefinitely).",
|
||||
"type": "integer"
|
||||
},
|
||||
"workspace_agent_logs": {
|
||||
"description": "WorkspaceAgentLogs controls how long workspace agent logs are retained.\nLogs are deleted if the agent hasn't connected within this period.\nLogs from the latest build are always retained regardless of age.\nDefaults to 7 days to preserve existing behavior.",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.Role": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
Generated
+36
-2
@@ -1570,7 +1570,7 @@
|
||||
],
|
||||
"consumes": ["application/json"],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Organizations"],
|
||||
"tags": ["Enterprise"],
|
||||
"summary": "Add new license",
|
||||
"operationId": "add-new-license",
|
||||
"parameters": [
|
||||
@@ -1602,7 +1602,7 @@
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Organizations"],
|
||||
"tags": ["Enterprise"],
|
||||
"summary": "Update license entitlements",
|
||||
"operationId": "update-license-entitlements",
|
||||
"responses": {
|
||||
@@ -10543,9 +10543,19 @@
|
||||
"inject_coder_mcp_tools": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"max_concurrency": {
|
||||
"description": "Overload protection settings.",
|
||||
"type": "integer"
|
||||
},
|
||||
"openai": {
|
||||
"$ref": "#/definitions/codersdk.AIBridgeOpenAIConfig"
|
||||
},
|
||||
"rate_limit": {
|
||||
"type": "integer"
|
||||
},
|
||||
"rate_window": {
|
||||
"type": "integer"
|
||||
},
|
||||
"retention": {
|
||||
"type": "integer"
|
||||
}
|
||||
@@ -12886,6 +12896,9 @@
|
||||
"redirect_to_access_url": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"retention": {
|
||||
"$ref": "#/definitions/codersdk.RetentionConfig"
|
||||
},
|
||||
"scim_api_key": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -16190,6 +16203,27 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.RetentionConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"api_keys": {
|
||||
"description": "APIKeys controls how long expired API keys are retained before being deleted.\nKeys are only deleted if they have been expired for at least this duration.\nDefaults to 7 days to preserve existing behavior.",
|
||||
"type": "integer"
|
||||
},
|
||||
"audit_logs": {
|
||||
"description": "AuditLogs controls how long audit log entries are retained.\nSet to 0 to disable (keep indefinitely).",
|
||||
"type": "integer"
|
||||
},
|
||||
"connection_logs": {
|
||||
"description": "ConnectionLogs controls how long connection log entries are retained.\nSet to 0 to disable (keep indefinitely).",
|
||||
"type": "integer"
|
||||
},
|
||||
"workspace_agent_logs": {
|
||||
"description": "WorkspaceAgentLogs controls how long workspace agent logs are retained.\nLogs are deleted if the agent hasn't connected within this period.\nLogs from the latest build are always retained regardless of age.\nDefaults to 7 days to preserve existing behavior.",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.Role": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
+1
-2
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/httpauthz"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
@@ -342,7 +341,7 @@ func (api *API) tokens(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
keys, err = httpauthz.AuthorizationFilter(api.HTTPAuth, r, policy.ActionRead, keys)
|
||||
keys, err = AuthorizeFilter(api.HTTPAuth, r, policy.ActionRead, keys)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error fetching keys.",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
@@ -14,6 +15,33 @@ import (
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
// AuthorizeFilter takes a list of objects and returns the filtered list of
|
||||
// objects that the user is authorized to perform the given action on.
|
||||
// This is faster than calling Authorize() on each object.
|
||||
func AuthorizeFilter[O rbac.Objecter](h *HTTPAuthorizer, r *http.Request, action policy.Action, objects []O) ([]O, error) {
|
||||
roles := httpmw.UserAuthorization(r.Context())
|
||||
objects, err := rbac.Filter(r.Context(), h.Authorizer, roles, action, objects)
|
||||
if err != nil {
|
||||
// Log the error as Filter should not be erroring.
|
||||
h.Logger.Error(r.Context(), "authorization filter failed",
|
||||
slog.Error(err),
|
||||
slog.F("user_id", roles.ID),
|
||||
slog.F("username", roles),
|
||||
slog.F("roles", roles.SafeRoleNames()),
|
||||
slog.F("scope", roles.SafeScopeName()),
|
||||
slog.F("route", r.URL.Path),
|
||||
slog.F("action", action),
|
||||
)
|
||||
return nil, err
|
||||
}
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
type HTTPAuthorizer struct {
|
||||
Authorizer rbac.Authorizer
|
||||
Logger slog.Logger
|
||||
}
|
||||
|
||||
// Authorize will return false if the user is not authorized to do the action.
|
||||
// This function will log appropriately, but the caller must return an
|
||||
// error to the api client.
|
||||
@@ -27,6 +55,57 @@ func (api *API) Authorize(r *http.Request, action policy.Action, object rbac.Obj
|
||||
return api.HTTPAuth.Authorize(r, action, object)
|
||||
}
|
||||
|
||||
// Authorize will return false if the user is not authorized to do the action.
|
||||
// This function will log appropriately, but the caller must return an
|
||||
// error to the api client.
|
||||
// Eg:
|
||||
//
|
||||
// if !h.Authorize(...) {
|
||||
// httpapi.Forbidden(rw)
|
||||
// return
|
||||
// }
|
||||
func (h *HTTPAuthorizer) Authorize(r *http.Request, action policy.Action, object rbac.Objecter) bool {
|
||||
roles := httpmw.UserAuthorization(r.Context())
|
||||
err := h.Authorizer.Authorize(r.Context(), roles, action, object.RBACObject())
|
||||
if err != nil {
|
||||
// Log the errors for debugging
|
||||
internalError := new(rbac.UnauthorizedError)
|
||||
logger := h.Logger
|
||||
if xerrors.As(err, internalError) {
|
||||
logger = h.Logger.With(slog.F("internal_error", internalError.Internal()))
|
||||
}
|
||||
// Log information for debugging. This will be very helpful
|
||||
// in the early days
|
||||
logger.Warn(r.Context(), "requester is not authorized to access the object",
|
||||
slog.F("roles", roles.SafeRoleNames()),
|
||||
slog.F("actor_id", roles.ID),
|
||||
slog.F("actor_name", roles),
|
||||
slog.F("scope", roles.SafeScopeName()),
|
||||
slog.F("route", r.URL.Path),
|
||||
slog.F("action", action),
|
||||
slog.F("object", object),
|
||||
)
|
||||
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AuthorizeSQLFilter returns an authorization filter that can used in a
|
||||
// SQL 'WHERE' clause. If the filter is used, the resulting rows returned
|
||||
// from postgres are already authorized, and the caller does not need to
|
||||
// call 'Authorize()' on the returned objects.
|
||||
// Note the authorization is only for the given action and object type.
|
||||
func (h *HTTPAuthorizer) AuthorizeSQLFilter(r *http.Request, action policy.Action, objectType string) (rbac.PreparedAuthorized, error) {
|
||||
roles := httpmw.UserAuthorization(r.Context())
|
||||
prepared, err := h.Authorizer.Prepare(r.Context(), roles, action, objectType)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("prepare filter: %w", err)
|
||||
}
|
||||
|
||||
return prepared, nil
|
||||
}
|
||||
|
||||
// checkAuthorization returns if the current API key can use the given
|
||||
// permissions, factoring in the current user's roles and the API key scopes.
|
||||
//
|
||||
|
||||
+7
-13
@@ -21,8 +21,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/httpauthz"
|
||||
"github.com/coder/coder/v2/coderd/insightsapi"
|
||||
"github.com/coder/coder/v2/coderd/oauth2provider"
|
||||
"github.com/coder/coder/v2/coderd/pproflabel"
|
||||
"github.com/coder/coder/v2/coderd/prebuilds"
|
||||
@@ -586,7 +584,7 @@ func New(options *Options) *API {
|
||||
ID: uuid.New(),
|
||||
Options: options,
|
||||
RootHandler: r,
|
||||
HTTPAuth: &httpauthz.HTTPAuthorizer{
|
||||
HTTPAuth: &HTTPAuthorizer{
|
||||
Authorizer: options.Authorizer,
|
||||
Logger: options.Logger,
|
||||
},
|
||||
@@ -802,8 +800,6 @@ func New(options *Options) *API {
|
||||
APIKeyEncryptionKeycache: options.AppEncryptionKeyCache,
|
||||
})
|
||||
|
||||
api.insightsAPI = insightsapi.NewAPI(api.Logger, api.Database, api.HTTPAuth)
|
||||
|
||||
apiKeyMiddleware := httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: options.Database,
|
||||
ActivateDormantUser: ActivateDormantUser(options.Logger, &api.Auditor, options.Database),
|
||||
@@ -1528,11 +1524,11 @@ func New(options *Options) *API {
|
||||
})
|
||||
r.Route("/insights", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
r.Get("/daus", api.insightsAPI.DeploymentDAUs)
|
||||
r.Get("/user-activity", api.insightsAPI.UserActivity)
|
||||
r.Get("/user-status-counts", api.insightsAPI.UserStatusCounts)
|
||||
r.Get("/user-latency", api.insightsAPI.UserLatency)
|
||||
r.Get("/templates", api.insightsAPI.Templates)
|
||||
r.Get("/daus", api.deploymentDAUs)
|
||||
r.Get("/user-activity", api.insightsUserActivity)
|
||||
r.Get("/user-status-counts", api.insightsUserStatusCounts)
|
||||
r.Get("/user-latency", api.insightsUserLatency)
|
||||
r.Get("/templates", api.insightsTemplates)
|
||||
})
|
||||
r.Route("/debug", func(r chi.Router) {
|
||||
r.Use(
|
||||
@@ -1806,7 +1802,7 @@ type API struct {
|
||||
|
||||
UpdatesProvider tailnet.WorkspaceUpdatesProvider
|
||||
|
||||
HTTPAuth *httpauthz.HTTPAuthorizer
|
||||
HTTPAuth *HTTPAuthorizer
|
||||
|
||||
// APIHandler serves "/api/v2"
|
||||
APIHandler chi.Router
|
||||
@@ -1841,8 +1837,6 @@ type API struct {
|
||||
// dbRolluper rolls up template usage stats from raw agent and app
|
||||
// stats. This is used to provide insights in the WebUI.
|
||||
dbRolluper *dbrollup.Rolluper
|
||||
|
||||
insightsAPI *insightsapi.API
|
||||
}
|
||||
|
||||
// Close waits for all WebSocket connections to drain before returning.
|
||||
|
||||
@@ -2,6 +2,7 @@ package coderd_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -34,6 +35,9 @@ import (
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// updateGoldenFiles is a flag that can be set to update golden files.
|
||||
var updateGoldenFiles = flag.Bool("update", false, "Update golden files")
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m, testutil.GoleakOptions...)
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package coderdtest
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmock"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
)
|
||||
|
||||
func MockedDatabaseWithAuthz(t testing.TB, logger slog.Logger) (*gomock.Controller, *dbmock.MockStore, database.Store, rbac.Authorizer) {
|
||||
ctrl := gomock.NewController(t)
|
||||
mDB := dbmock.NewMockStore(ctrl)
|
||||
auth := rbac.NewStrictCachingAuthorizer(prometheus.NewRegistry())
|
||||
accessControlStore := &atomic.Pointer[dbauthz.AccessControlStore]{}
|
||||
var acs dbauthz.AccessControlStore = dbauthz.AGPLTemplateAccessControlStore{}
|
||||
accessControlStore.Store(&acs)
|
||||
// dbauthz will call Wrappers() to check for wrapped databases
|
||||
mDB.EXPECT().Wrappers().Return([]string{}).AnyTimes()
|
||||
authDB := dbauthz.New(mDB, auth, logger, accessControlStore)
|
||||
return ctrl, mDB, authDB, auth
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package coderdtest
|
||||
|
||||
import "github.com/coder/coder/v2/coderd/rbac"
|
||||
|
||||
func OwnerSubject() rbac.Subject {
|
||||
return rbac.Subject{
|
||||
FriendlyName: "coderdtest-owner",
|
||||
Email: "owner@coderd.test",
|
||||
Type: rbac.SubjectTypeUser,
|
||||
ID: "coderdtest-owner-id",
|
||||
Roles: rbac.RoleIdentifiers{rbac.RoleOwner()},
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
}
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
func TestEndpointsDocumented(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
swaggerComments, err := coderdtest.ParseSwaggerComments("..", "../insightsapi")
|
||||
swaggerComments, err := coderdtest.ParseSwaggerComments("..")
|
||||
require.NoError(t, err, "can't parse swagger comments")
|
||||
require.NotEmpty(t, swaggerComments, "swagger comments must be present")
|
||||
|
||||
|
||||
@@ -1732,7 +1732,7 @@ func (q *querier) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Contex
|
||||
return q.db.DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int32, error) {
|
||||
func (q *querier) DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceAibridgeInterception); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
@@ -1749,6 +1749,20 @@ func (q *querier) DeleteOldAuditLogConnectionEvents(ctx context.Context, thresho
|
||||
return q.db.DeleteOldAuditLogConnectionEvents(ctx, threshold)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldAuditLogs(ctx context.Context, arg database.DeleteOldAuditLogsParams) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return q.db.DeleteOldAuditLogs(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldConnectionLogs(ctx context.Context, arg database.DeleteOldConnectionLogsParams) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return q.db.DeleteOldConnectionLogs(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldNotificationMessages(ctx context.Context) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceNotificationMessage); err != nil {
|
||||
return err
|
||||
@@ -1770,9 +1784,9 @@ func (q *querier) DeleteOldTelemetryLocks(ctx context.Context, beforeTime time.T
|
||||
return q.db.DeleteOldTelemetryLocks(ctx, beforeTime)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
||||
func (q *querier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) (int64, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceSystem); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
return q.db.DeleteOldWorkspaceAgentLogs(ctx, threshold)
|
||||
}
|
||||
|
||||
@@ -324,6 +324,10 @@ func (s *MethodTestSuite) TestAuditLogs() {
|
||||
dbm.EXPECT().DeleteOldAuditLogConnectionEvents(gomock.Any(), database.DeleteOldAuditLogConnectionEventsParams{}).Return(nil).AnyTimes()
|
||||
check.Args(database.DeleteOldAuditLogConnectionEventsParams{}).Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("DeleteOldAuditLogs", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().DeleteOldAuditLogs(gomock.Any(), database.DeleteOldAuditLogsParams{}).Return(int64(0), nil).AnyTimes()
|
||||
check.Args(database.DeleteOldAuditLogsParams{}).Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestConnectionLogs() {
|
||||
@@ -355,6 +359,10 @@ func (s *MethodTestSuite) TestConnectionLogs() {
|
||||
dbm.EXPECT().CountConnectionLogs(gomock.Any(), database.CountConnectionLogsParams{}).Return(int64(0), nil).AnyTimes()
|
||||
check.Args(database.CountConnectionLogsParams{}, emptyPreparedAuthorized{}).Asserts(rbac.ResourceConnectionLog, policy.ActionRead)
|
||||
}))
|
||||
s.Run("DeleteOldConnectionLogs", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
dbm.EXPECT().DeleteOldConnectionLogs(gomock.Any(), database.DeleteOldConnectionLogsParams{}).Return(int64(0), nil).AnyTimes()
|
||||
check.Args(database.DeleteOldConnectionLogsParams{}).Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestFile() {
|
||||
@@ -3219,7 +3227,7 @@ func (s *MethodTestSuite) TestSystemFunctions() {
|
||||
}))
|
||||
s.Run("DeleteOldWorkspaceAgentLogs", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
t := time.Time{}
|
||||
dbm.EXPECT().DeleteOldWorkspaceAgentLogs(gomock.Any(), t).Return(nil).AnyTimes()
|
||||
dbm.EXPECT().DeleteOldWorkspaceAgentLogs(gomock.Any(), t).Return(int64(0), nil).AnyTimes()
|
||||
check.Args(t).Asserts(rbac.ResourceSystem, policy.ActionDelete)
|
||||
}))
|
||||
s.Run("InsertWorkspaceAgentStats", s.Mocked(func(dbm *dbmock.MockStore, _ *gofakeit.Faker, check *expects) {
|
||||
@@ -4697,7 +4705,7 @@ func (s *MethodTestSuite) TestAIBridge() {
|
||||
|
||||
s.Run("DeleteOldAIBridgeRecords", s.Mocked(func(db *dbmock.MockStore, faker *gofakeit.Faker, check *expects) {
|
||||
t := dbtime.Now()
|
||||
db.EXPECT().DeleteOldAIBridgeRecords(gomock.Any(), t).Return(int32(0), nil).AnyTimes()
|
||||
db.EXPECT().DeleteOldAIBridgeRecords(gomock.Any(), t).Return(int64(0), nil).AnyTimes()
|
||||
check.Args(t).Asserts(rbac.ResourceAibridgeInterception, policy.ActionDelete)
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -396,7 +396,7 @@ func (m queryMetricsStore) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx conte
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int32, error) {
|
||||
func (m queryMetricsStore) DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteOldAIBridgeRecords(ctx, beforeTime)
|
||||
m.queryLatencies.WithLabelValues("DeleteOldAIBridgeRecords").Observe(time.Since(start).Seconds())
|
||||
@@ -410,6 +410,20 @@ func (m queryMetricsStore) DeleteOldAuditLogConnectionEvents(ctx context.Context
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteOldAuditLogs(ctx context.Context, arg database.DeleteOldAuditLogsParams) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteOldAuditLogs(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteOldAuditLogs").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteOldConnectionLogs(ctx context.Context, arg database.DeleteOldConnectionLogsParams) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.DeleteOldConnectionLogs(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteOldConnectionLogs").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteOldNotificationMessages(ctx context.Context) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteOldNotificationMessages(ctx)
|
||||
@@ -431,11 +445,11 @@ func (m queryMetricsStore) DeleteOldTelemetryLocks(ctx context.Context, periodEn
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteOldWorkspaceAgentLogs(ctx context.Context, arg time.Time) error {
|
||||
func (m queryMetricsStore) DeleteOldWorkspaceAgentLogs(ctx context.Context, arg time.Time) (int64, error) {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteOldWorkspaceAgentLogs(ctx, arg)
|
||||
r0, r1 := m.s.DeleteOldWorkspaceAgentLogs(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("DeleteOldWorkspaceAgentLogs").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteOldWorkspaceAgentStats(ctx context.Context) error {
|
||||
|
||||
@@ -725,10 +725,10 @@ func (mr *MockStoreMockRecorder) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx
|
||||
}
|
||||
|
||||
// DeleteOldAIBridgeRecords mocks base method.
|
||||
func (m *MockStore) DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int32, error) {
|
||||
func (m *MockStore) DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteOldAIBridgeRecords", ctx, beforeTime)
|
||||
ret0, _ := ret[0].(int32)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@@ -753,6 +753,36 @@ func (mr *MockStoreMockRecorder) DeleteOldAuditLogConnectionEvents(ctx, arg any)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldAuditLogConnectionEvents", reflect.TypeOf((*MockStore)(nil).DeleteOldAuditLogConnectionEvents), ctx, arg)
|
||||
}
|
||||
|
||||
// DeleteOldAuditLogs mocks base method.
|
||||
func (m *MockStore) DeleteOldAuditLogs(ctx context.Context, arg database.DeleteOldAuditLogsParams) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteOldAuditLogs", ctx, arg)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DeleteOldAuditLogs indicates an expected call of DeleteOldAuditLogs.
|
||||
func (mr *MockStoreMockRecorder) DeleteOldAuditLogs(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldAuditLogs", reflect.TypeOf((*MockStore)(nil).DeleteOldAuditLogs), ctx, arg)
|
||||
}
|
||||
|
||||
// DeleteOldConnectionLogs mocks base method.
|
||||
func (m *MockStore) DeleteOldConnectionLogs(ctx context.Context, arg database.DeleteOldConnectionLogsParams) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteOldConnectionLogs", ctx, arg)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DeleteOldConnectionLogs indicates an expected call of DeleteOldConnectionLogs.
|
||||
func (mr *MockStoreMockRecorder) DeleteOldConnectionLogs(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldConnectionLogs", reflect.TypeOf((*MockStore)(nil).DeleteOldConnectionLogs), ctx, arg)
|
||||
}
|
||||
|
||||
// DeleteOldNotificationMessages mocks base method.
|
||||
func (m *MockStore) DeleteOldNotificationMessages(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -796,11 +826,12 @@ func (mr *MockStoreMockRecorder) DeleteOldTelemetryLocks(ctx, periodEndingAtBefo
|
||||
}
|
||||
|
||||
// DeleteOldWorkspaceAgentLogs mocks base method.
|
||||
func (m *MockStore) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
||||
func (m *MockStore) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteOldWorkspaceAgentLogs", ctx, threshold)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DeleteOldWorkspaceAgentLogs indicates an expected call of DeleteOldWorkspaceAgentLogs.
|
||||
|
||||
@@ -18,13 +18,16 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
delay = 10 * time.Minute
|
||||
maxAgentLogAge = 7 * 24 * time.Hour
|
||||
delay = 10 * time.Minute
|
||||
// Connection events are now inserted into the `connection_logs` table.
|
||||
// We'll slowly remove old connection events from the `audit_logs` table,
|
||||
// but we won't touch the `connection_logs` table.
|
||||
// We'll slowly remove old connection events from the `audit_logs` table.
|
||||
// The `connection_logs` table is purged based on the configured retention.
|
||||
maxAuditLogConnectionEventAge = 90 * 24 * time.Hour // 90 days
|
||||
auditLogConnectionEventBatchSize = 1000
|
||||
// Batch size for connection log deletion.
|
||||
connectionLogsBatchSize = 10000
|
||||
// Batch size for audit log deletion.
|
||||
auditLogsBatchSize = 10000
|
||||
// Telemetry heartbeats are used to deduplicate events across replicas. We
|
||||
// don't need to persist heartbeat rows for longer than 24 hours, as they
|
||||
// are only used for deduplication across replicas. The time needs to be
|
||||
@@ -62,9 +65,14 @@ func New(ctx context.Context, logger slog.Logger, db database.Store, vals *coder
|
||||
return nil
|
||||
}
|
||||
|
||||
deleteOldWorkspaceAgentLogsBefore := start.Add(-maxAgentLogAge)
|
||||
if err := tx.DeleteOldWorkspaceAgentLogs(ctx, deleteOldWorkspaceAgentLogsBefore); err != nil {
|
||||
return xerrors.Errorf("failed to delete old workspace agent logs: %w", err)
|
||||
var purgedWorkspaceAgentLogs int64
|
||||
workspaceAgentLogsRetention := vals.Retention.WorkspaceAgentLogs.Value()
|
||||
if workspaceAgentLogsRetention > 0 {
|
||||
deleteOldWorkspaceAgentLogsBefore := start.Add(-workspaceAgentLogsRetention)
|
||||
purgedWorkspaceAgentLogs, err = tx.DeleteOldWorkspaceAgentLogs(ctx, deleteOldWorkspaceAgentLogsBefore)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to delete old workspace agent logs: %w", err)
|
||||
}
|
||||
}
|
||||
if err := tx.DeleteOldWorkspaceAgentStats(ctx); err != nil {
|
||||
return xerrors.Errorf("failed to delete old workspace agent stats: %w", err)
|
||||
@@ -78,18 +86,24 @@ func New(ctx context.Context, logger slog.Logger, db database.Store, vals *coder
|
||||
if err := tx.ExpirePrebuildsAPIKeys(ctx, dbtime.Time(start)); err != nil {
|
||||
return xerrors.Errorf("failed to expire prebuilds user api keys: %w", err)
|
||||
}
|
||||
expiredAPIKeys, err := tx.DeleteExpiredAPIKeys(ctx, database.DeleteExpiredAPIKeysParams{
|
||||
// Leave expired keys for a week to allow the backend to know the difference
|
||||
// between a 404 and an expired key. This purge code is just to bound the size of
|
||||
// the table to something more reasonable.
|
||||
Before: dbtime.Time(start.Add(time.Hour * 24 * 7 * -1)),
|
||||
// There could be a lot of expired keys here, so set a limit to prevent this
|
||||
// taking too long.
|
||||
// This runs every 10 minutes, so it deletes ~1.5m keys per day at most.
|
||||
LimitCount: 10000,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to delete expired api keys: %w", err)
|
||||
|
||||
var expiredAPIKeys int64
|
||||
apiKeysRetention := vals.Retention.APIKeys.Value()
|
||||
if apiKeysRetention > 0 {
|
||||
// Delete keys that have been expired for at least the retention period.
|
||||
// A higher retention period allows the backend to return a more helpful
|
||||
// error message when a user tries to use an expired key.
|
||||
deleteExpiredKeysBefore := start.Add(-apiKeysRetention)
|
||||
expiredAPIKeys, err = tx.DeleteExpiredAPIKeys(ctx, database.DeleteExpiredAPIKeysParams{
|
||||
Before: dbtime.Time(deleteExpiredKeysBefore),
|
||||
// There could be a lot of expired keys here, so set a limit to prevent
|
||||
// this taking too long. This runs every 10 minutes, so it deletes
|
||||
// ~1.5m keys per day at most.
|
||||
LimitCount: 10000,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to delete expired api keys: %w", err)
|
||||
}
|
||||
}
|
||||
deleteOldTelemetryLocksBefore := start.Add(-maxTelemetryHeartbeatAge)
|
||||
if err := tx.DeleteOldTelemetryLocks(ctx, deleteOldTelemetryLocksBefore); err != nil {
|
||||
@@ -104,16 +118,49 @@ func New(ctx context.Context, logger slog.Logger, db database.Store, vals *coder
|
||||
return xerrors.Errorf("failed to delete old audit log connection events: %w", err)
|
||||
}
|
||||
|
||||
deleteAIBridgeRecordsBefore := start.Add(-vals.AI.BridgeConfig.Retention.Value())
|
||||
// nolint:gocritic // Needs to run as aibridge context.
|
||||
purgedAIBridgeRecords, err := tx.DeleteOldAIBridgeRecords(dbauthz.AsAIBridged(ctx), deleteAIBridgeRecordsBefore)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to delete old aibridge records: %w", err)
|
||||
var purgedAIBridgeRecords int64
|
||||
aibridgeRetention := vals.AI.BridgeConfig.Retention.Value()
|
||||
if aibridgeRetention > 0 {
|
||||
deleteAIBridgeRecordsBefore := start.Add(-aibridgeRetention)
|
||||
// nolint:gocritic // Needs to run as aibridge context.
|
||||
purgedAIBridgeRecords, err = tx.DeleteOldAIBridgeRecords(dbauthz.AsAIBridged(ctx), deleteAIBridgeRecordsBefore)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to delete old aibridge records: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var purgedConnectionLogs int64
|
||||
connectionLogsRetention := vals.Retention.ConnectionLogs.Value()
|
||||
if connectionLogsRetention > 0 {
|
||||
deleteConnectionLogsBefore := start.Add(-connectionLogsRetention)
|
||||
purgedConnectionLogs, err = tx.DeleteOldConnectionLogs(ctx, database.DeleteOldConnectionLogsParams{
|
||||
BeforeTime: deleteConnectionLogsBefore,
|
||||
LimitCount: connectionLogsBatchSize,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to delete old connection logs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var purgedAuditLogs int64
|
||||
auditLogsRetention := vals.Retention.AuditLogs.Value()
|
||||
if auditLogsRetention > 0 {
|
||||
deleteAuditLogsBefore := start.Add(-auditLogsRetention)
|
||||
purgedAuditLogs, err = tx.DeleteOldAuditLogs(ctx, database.DeleteOldAuditLogsParams{
|
||||
BeforeTime: deleteAuditLogsBefore,
|
||||
LimitCount: auditLogsBatchSize,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to delete old audit logs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "purged old database entries",
|
||||
slog.F("workspace_agent_logs", purgedWorkspaceAgentLogs),
|
||||
slog.F("expired_api_keys", expiredAPIKeys),
|
||||
slog.F("aibridge_records", purgedAIBridgeRecords),
|
||||
slog.F("connection_logs", purgedConnectionLogs),
|
||||
slog.F("audit_logs", purgedAuditLogs),
|
||||
slog.F("duration", clk.Since(start)),
|
||||
)
|
||||
|
||||
|
||||
@@ -246,7 +246,11 @@ func TestDeleteOldWorkspaceAgentLogs(t *testing.T) {
|
||||
// After dbpurge completes, the ticker is reset. Trap this call.
|
||||
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{}, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{
|
||||
Retention: codersdk.RetentionConfig{
|
||||
WorkspaceAgentLogs: serpent.Duration(7 * 24 * time.Hour),
|
||||
},
|
||||
}, clk)
|
||||
defer closer.Close()
|
||||
<-done // doTick() has now run.
|
||||
|
||||
@@ -392,6 +396,90 @@ func mustCreateAgentLogs(ctx context.Context, t *testing.T, db database.Store, a
|
||||
require.NotEmpty(t, agentLogs, "agent logs must be present")
|
||||
}
|
||||
|
||||
func TestDeleteOldWorkspaceAgentLogsRetention(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
now := time.Date(2025, 1, 15, 7, 30, 0, 0, time.UTC)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
retentionConfig codersdk.RetentionConfig
|
||||
logsAge time.Duration
|
||||
expectDeleted bool
|
||||
}{
|
||||
{
|
||||
name: "RetentionEnabled",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
WorkspaceAgentLogs: serpent.Duration(7 * 24 * time.Hour), // 7 days
|
||||
},
|
||||
logsAge: 8 * 24 * time.Hour, // 8 days ago
|
||||
expectDeleted: true,
|
||||
},
|
||||
{
|
||||
name: "RetentionDisabled",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
WorkspaceAgentLogs: serpent.Duration(0),
|
||||
},
|
||||
logsAge: 60 * 24 * time.Hour, // 60 days ago
|
||||
expectDeleted: false,
|
||||
},
|
||||
|
||||
{
|
||||
name: "CustomRetention30Days",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
WorkspaceAgentLogs: serpent.Duration(30 * 24 * time.Hour), // 30 days
|
||||
},
|
||||
logsAge: 31 * 24 * time.Hour, // 31 days ago
|
||||
expectDeleted: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
clk := quartz.NewMock(t)
|
||||
clk.Set(now).MustWait(ctx)
|
||||
|
||||
oldTime := now.Add(-tc.logsAge)
|
||||
|
||||
db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
_ = dbgen.OrganizationMember(t, db, database.OrganizationMember{UserID: user.ID, OrganizationID: org.ID})
|
||||
tv := dbgen.TemplateVersion(t, db, database.TemplateVersion{OrganizationID: org.ID, CreatedBy: user.ID})
|
||||
tmpl := dbgen.Template(t, db, database.Template{OrganizationID: org.ID, ActiveVersionID: tv.ID, CreatedBy: user.ID})
|
||||
|
||||
ws := dbgen.Workspace(t, db, database.WorkspaceTable{Name: "test-ws", OwnerID: user.ID, OrganizationID: org.ID, TemplateID: tmpl.ID})
|
||||
wb1 := mustCreateWorkspaceBuild(t, db, org, tv, ws.ID, oldTime, 1)
|
||||
wb2 := mustCreateWorkspaceBuild(t, db, org, tv, ws.ID, oldTime, 2)
|
||||
agent1 := mustCreateAgent(t, db, wb1)
|
||||
agent2 := mustCreateAgent(t, db, wb2)
|
||||
mustCreateAgentLogs(ctx, t, db, agent1, &oldTime, "agent 1 logs")
|
||||
mustCreateAgentLogs(ctx, t, db, agent2, &oldTime, "agent 2 logs")
|
||||
|
||||
// Run the purge.
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{
|
||||
Retention: tc.retentionConfig,
|
||||
}, clk)
|
||||
defer closer.Close()
|
||||
testutil.TryReceive(ctx, t, done)
|
||||
|
||||
// Verify results.
|
||||
if tc.expectDeleted {
|
||||
assertNoWorkspaceAgentLogs(ctx, t, db, agent1.ID)
|
||||
} else {
|
||||
assertWorkspaceAgentLogs(ctx, t, db, agent1.ID, "agent 1 logs")
|
||||
}
|
||||
// Latest build logs are always retained.
|
||||
assertWorkspaceAgentLogs(ctx, t, db, agent2.ID, "agent 2 logs")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:paralleltest // It uses LockIDDBPurge.
|
||||
func TestDeleteOldProvisionerDaemons(t *testing.T) {
|
||||
// TODO: must refactor DeleteOldProvisionerDaemons to allow passing in cutoff
|
||||
@@ -759,171 +847,684 @@ func TestDeleteOldTelemetryHeartbeats(t *testing.T) {
|
||||
}, testutil.WaitShort, testutil.IntervalFast, "it should delete old telemetry heartbeats")
|
||||
}
|
||||
|
||||
func TestDeleteOldConnectionLogs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
now := time.Date(2025, 1, 15, 7, 30, 0, 0, time.UTC)
|
||||
retentionPeriod := 30 * 24 * time.Hour
|
||||
afterThreshold := now.Add(-retentionPeriod).Add(-24 * time.Hour) // 31 days ago (older than threshold)
|
||||
beforeThreshold := now.Add(-15 * 24 * time.Hour) // 15 days ago (newer than threshold)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
retentionConfig codersdk.RetentionConfig
|
||||
oldLogTime time.Time
|
||||
recentLogTime *time.Time // nil means no recent log created
|
||||
expectOldDeleted bool
|
||||
expectedLogsRemaining int
|
||||
}{
|
||||
{
|
||||
name: "RetentionEnabled",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
ConnectionLogs: serpent.Duration(retentionPeriod),
|
||||
},
|
||||
oldLogTime: afterThreshold,
|
||||
recentLogTime: &beforeThreshold,
|
||||
expectOldDeleted: true,
|
||||
expectedLogsRemaining: 1, // only recent log remains
|
||||
},
|
||||
{
|
||||
name: "RetentionDisabled",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
ConnectionLogs: serpent.Duration(0),
|
||||
},
|
||||
oldLogTime: now.Add(-365 * 24 * time.Hour), // 1 year ago
|
||||
recentLogTime: nil,
|
||||
expectOldDeleted: false,
|
||||
expectedLogsRemaining: 1, // old log is kept
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
clk := quartz.NewMock(t)
|
||||
clk.Set(now).MustWait(ctx)
|
||||
|
||||
db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
|
||||
// Setup test fixtures.
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
_ = dbgen.OrganizationMember(t, db, database.OrganizationMember{UserID: user.ID, OrganizationID: org.ID})
|
||||
tv := dbgen.TemplateVersion(t, db, database.TemplateVersion{OrganizationID: org.ID, CreatedBy: user.ID})
|
||||
tmpl := dbgen.Template(t, db, database.Template{OrganizationID: org.ID, ActiveVersionID: tv.ID, CreatedBy: user.ID})
|
||||
workspace := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: tmpl.ID,
|
||||
})
|
||||
|
||||
// Create old connection log.
|
||||
oldLog := dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: tc.oldLogTime,
|
||||
OrganizationID: org.ID,
|
||||
WorkspaceOwnerID: user.ID,
|
||||
WorkspaceID: workspace.ID,
|
||||
WorkspaceName: workspace.Name,
|
||||
AgentName: "agent1",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
|
||||
// Create recent connection log if specified.
|
||||
var recentLog database.ConnectionLog
|
||||
if tc.recentLogTime != nil {
|
||||
recentLog = dbgen.ConnectionLog(t, db, database.UpsertConnectionLogParams{
|
||||
ID: uuid.New(),
|
||||
Time: *tc.recentLogTime,
|
||||
OrganizationID: org.ID,
|
||||
WorkspaceOwnerID: user.ID,
|
||||
WorkspaceID: workspace.ID,
|
||||
WorkspaceName: workspace.Name,
|
||||
AgentName: "agent2",
|
||||
Type: database.ConnectionTypeSsh,
|
||||
ConnectionStatus: database.ConnectionStatusConnected,
|
||||
})
|
||||
}
|
||||
|
||||
// Run the purge.
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{
|
||||
Retention: tc.retentionConfig,
|
||||
}, clk)
|
||||
defer closer.Close()
|
||||
testutil.TryReceive(ctx, t, done)
|
||||
|
||||
// Verify results.
|
||||
logs, err := db.GetConnectionLogsOffset(ctx, database.GetConnectionLogsOffsetParams{
|
||||
LimitOpt: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, logs, tc.expectedLogsRemaining, "unexpected number of logs remaining")
|
||||
|
||||
logIDs := make([]uuid.UUID, len(logs))
|
||||
for i, log := range logs {
|
||||
logIDs[i] = log.ConnectionLog.ID
|
||||
}
|
||||
|
||||
if tc.expectOldDeleted {
|
||||
require.NotContains(t, logIDs, oldLog.ID, "old connection log should be deleted")
|
||||
} else {
|
||||
require.Contains(t, logIDs, oldLog.ID, "old connection log should NOT be deleted")
|
||||
}
|
||||
|
||||
if tc.recentLogTime != nil {
|
||||
require.Contains(t, logIDs, recentLog.ID, "recent connection log should be kept")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteOldAIBridgeRecords(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
clk := quartz.NewMock(t)
|
||||
now := time.Date(2025, 1, 15, 7, 30, 0, 0, time.UTC)
|
||||
retentionPeriod := 30 * 24 * time.Hour // 30 days
|
||||
afterThreshold := now.Add(-retentionPeriod).Add(-24 * time.Hour) // 31 days ago (older than threshold)
|
||||
beforeThreshold := now.Add(-15 * 24 * time.Hour) // 15 days ago (newer than threshold)
|
||||
closeBeforeThreshold := now.Add(-retentionPeriod).Add(24 * time.Hour) // 29 days ago
|
||||
clk.Set(now).MustWait(ctx)
|
||||
|
||||
db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
|
||||
// Create old AI Bridge interception (should be deleted)
|
||||
oldInterception := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
|
||||
ID: uuid.New(),
|
||||
APIKeyID: sql.NullString{},
|
||||
InitiatorID: user.ID,
|
||||
Provider: "anthropic",
|
||||
Model: "claude-3-5-sonnet",
|
||||
StartedAt: afterThreshold,
|
||||
}, &afterThreshold)
|
||||
|
||||
// Create old interception with related records (should all be deleted)
|
||||
oldInterceptionWithRelated := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
|
||||
ID: uuid.New(),
|
||||
APIKeyID: sql.NullString{},
|
||||
InitiatorID: user.ID,
|
||||
Provider: "openai",
|
||||
Model: "gpt-4",
|
||||
StartedAt: afterThreshold,
|
||||
}, &afterThreshold)
|
||||
|
||||
_ = dbgen.AIBridgeTokenUsage(t, db, database.InsertAIBridgeTokenUsageParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: oldInterceptionWithRelated.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
InputTokens: 100,
|
||||
OutputTokens: 50,
|
||||
CreatedAt: afterThreshold,
|
||||
})
|
||||
|
||||
_ = dbgen.AIBridgeUserPrompt(t, db, database.InsertAIBridgeUserPromptParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: oldInterceptionWithRelated.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
Prompt: "test prompt",
|
||||
CreatedAt: afterThreshold,
|
||||
})
|
||||
|
||||
_ = dbgen.AIBridgeToolUsage(t, db, database.InsertAIBridgeToolUsageParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: oldInterceptionWithRelated.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
Tool: "test-tool",
|
||||
ServerUrl: sql.NullString{String: "http://test", Valid: true},
|
||||
Input: "{}",
|
||||
Injected: true,
|
||||
CreatedAt: afterThreshold,
|
||||
})
|
||||
|
||||
// Create recent AI Bridge interception (should be kept)
|
||||
recentInterception := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
|
||||
ID: uuid.New(),
|
||||
APIKeyID: sql.NullString{},
|
||||
InitiatorID: user.ID,
|
||||
Provider: "anthropic",
|
||||
Model: "claude-3-5-sonnet",
|
||||
StartedAt: beforeThreshold,
|
||||
}, &beforeThreshold)
|
||||
|
||||
// Create interception close to threshold (should be kept)
|
||||
nearThresholdInterception := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
|
||||
ID: uuid.New(),
|
||||
APIKeyID: sql.NullString{},
|
||||
InitiatorID: user.ID,
|
||||
Provider: "anthropic",
|
||||
Model: "claude-3-5-sonnet",
|
||||
StartedAt: closeBeforeThreshold,
|
||||
}, &closeBeforeThreshold)
|
||||
|
||||
_ = dbgen.AIBridgeTokenUsage(t, db, database.InsertAIBridgeTokenUsageParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: nearThresholdInterception.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
InputTokens: 100,
|
||||
OutputTokens: 50,
|
||||
CreatedAt: closeBeforeThreshold,
|
||||
})
|
||||
|
||||
_ = dbgen.AIBridgeUserPrompt(t, db, database.InsertAIBridgeUserPromptParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: nearThresholdInterception.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
Prompt: "test prompt",
|
||||
CreatedAt: closeBeforeThreshold,
|
||||
})
|
||||
|
||||
_ = dbgen.AIBridgeToolUsage(t, db, database.InsertAIBridgeToolUsageParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: nearThresholdInterception.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
Tool: "test-tool",
|
||||
ServerUrl: sql.NullString{String: "http://test", Valid: true},
|
||||
Input: "{}",
|
||||
Injected: true,
|
||||
CreatedAt: closeBeforeThreshold,
|
||||
})
|
||||
|
||||
// Run the purge with configured retention period
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{
|
||||
AI: codersdk.AIConfig{
|
||||
BridgeConfig: codersdk.AIBridgeConfig{
|
||||
Retention: serpent.Duration(retentionPeriod),
|
||||
},
|
||||
},
|
||||
}, clk)
|
||||
defer closer.Close()
|
||||
// Wait for tick
|
||||
testutil.TryReceive(ctx, t, done)
|
||||
|
||||
// Verify results by querying all AI Bridge records
|
||||
interceptions, err := db.GetAIBridgeInterceptions(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract interception IDs for comparison
|
||||
interceptionIDs := make([]uuid.UUID, len(interceptions))
|
||||
for i, interception := range interceptions {
|
||||
interceptionIDs[i] = interception.ID
|
||||
type testFixtures struct {
|
||||
oldInterception database.AIBridgeInterception
|
||||
oldInterceptionWithRelated database.AIBridgeInterception
|
||||
recentInterception database.AIBridgeInterception
|
||||
nearThresholdInterception database.AIBridgeInterception
|
||||
}
|
||||
|
||||
require.NotContains(t, interceptionIDs, oldInterception.ID, "old interception should be deleted")
|
||||
require.NotContains(t, interceptionIDs, oldInterceptionWithRelated.ID, "old interception with related records should be deleted")
|
||||
testCases := []struct {
|
||||
name string
|
||||
retention time.Duration
|
||||
verify func(t *testing.T, ctx context.Context, db database.Store, fixtures testFixtures)
|
||||
}{
|
||||
{
|
||||
name: "RetentionEnabled",
|
||||
retention: retentionPeriod,
|
||||
verify: func(t *testing.T, ctx context.Context, db database.Store, fixtures testFixtures) {
|
||||
t.Helper()
|
||||
|
||||
// Verify related records were also deleted
|
||||
oldTokenUsages, err := db.GetAIBridgeTokenUsagesByInterceptionID(ctx, oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, oldTokenUsages, "old token usages should be deleted")
|
||||
interceptions, err := db.GetAIBridgeInterceptions(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, interceptions, 2, "expected 2 interceptions remaining")
|
||||
|
||||
oldUserPrompts, err := db.GetAIBridgeUserPromptsByInterceptionID(ctx, oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, oldUserPrompts, "old user prompts should be deleted")
|
||||
interceptionIDs := make([]uuid.UUID, len(interceptions))
|
||||
for i, interception := range interceptions {
|
||||
interceptionIDs[i] = interception.ID
|
||||
}
|
||||
|
||||
oldToolUsages, err := db.GetAIBridgeToolUsagesByInterceptionID(ctx, oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, oldToolUsages, "old tool usages should be deleted")
|
||||
require.NotContains(t, interceptionIDs, fixtures.oldInterception.ID, "old interception should be deleted")
|
||||
require.NotContains(t, interceptionIDs, fixtures.oldInterceptionWithRelated.ID, "old interception with related records should be deleted")
|
||||
require.Contains(t, interceptionIDs, fixtures.recentInterception.ID, "recent interception should be kept")
|
||||
require.Contains(t, interceptionIDs, fixtures.nearThresholdInterception.ID, "near threshold interception should be kept")
|
||||
|
||||
require.Contains(t, interceptionIDs, recentInterception.ID, "recent interception should be kept")
|
||||
require.Contains(t, interceptionIDs, nearThresholdInterception.ID, "near threshold interception should be kept")
|
||||
// Verify related records were deleted for old interception.
|
||||
oldTokenUsages, err := db.GetAIBridgeTokenUsagesByInterceptionID(ctx, fixtures.oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, oldTokenUsages, "old token usages should be deleted")
|
||||
|
||||
// Verify related records were NOT deleted
|
||||
newTokenUsages, err := db.GetAIBridgeTokenUsagesByInterceptionID(ctx, nearThresholdInterception.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, newTokenUsages, 1, "near threshold token usages should not be deleted")
|
||||
oldUserPrompts, err := db.GetAIBridgeUserPromptsByInterceptionID(ctx, fixtures.oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, oldUserPrompts, "old user prompts should be deleted")
|
||||
|
||||
newUserPrompts, err := db.GetAIBridgeUserPromptsByInterceptionID(ctx, nearThresholdInterception.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, newUserPrompts, 1, "near threshold user prompts should not be deleted")
|
||||
oldToolUsages, err := db.GetAIBridgeToolUsagesByInterceptionID(ctx, fixtures.oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, oldToolUsages, "old tool usages should be deleted")
|
||||
|
||||
newToolUsages, err := db.GetAIBridgeToolUsagesByInterceptionID(ctx, nearThresholdInterception.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, newToolUsages, 1, "near threshold tool usages should not be deleted")
|
||||
// Verify related records were NOT deleted for near-threshold interception.
|
||||
newTokenUsages, err := db.GetAIBridgeTokenUsagesByInterceptionID(ctx, fixtures.nearThresholdInterception.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, newTokenUsages, 1, "near threshold token usages should not be deleted")
|
||||
|
||||
newUserPrompts, err := db.GetAIBridgeUserPromptsByInterceptionID(ctx, fixtures.nearThresholdInterception.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, newUserPrompts, 1, "near threshold user prompts should not be deleted")
|
||||
|
||||
newToolUsages, err := db.GetAIBridgeToolUsagesByInterceptionID(ctx, fixtures.nearThresholdInterception.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, newToolUsages, 1, "near threshold tool usages should not be deleted")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RetentionDisabled",
|
||||
retention: 0,
|
||||
verify: func(t *testing.T, ctx context.Context, db database.Store, fixtures testFixtures) {
|
||||
t.Helper()
|
||||
|
||||
interceptions, err := db.GetAIBridgeInterceptions(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, interceptions, 4, "expected all 4 interceptions to be retained")
|
||||
|
||||
interceptionIDs := make([]uuid.UUID, len(interceptions))
|
||||
for i, interception := range interceptions {
|
||||
interceptionIDs[i] = interception.ID
|
||||
}
|
||||
|
||||
require.Contains(t, interceptionIDs, fixtures.oldInterception.ID, "old interception should be kept")
|
||||
require.Contains(t, interceptionIDs, fixtures.oldInterceptionWithRelated.ID, "old interception with related records should be kept")
|
||||
require.Contains(t, interceptionIDs, fixtures.recentInterception.ID, "recent interception should be kept")
|
||||
require.Contains(t, interceptionIDs, fixtures.nearThresholdInterception.ID, "near threshold interception should be kept")
|
||||
|
||||
// Verify all related records were kept.
|
||||
oldTokenUsages, err := db.GetAIBridgeTokenUsagesByInterceptionID(ctx, fixtures.oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, oldTokenUsages, 1, "old token usages should be kept")
|
||||
|
||||
oldUserPrompts, err := db.GetAIBridgeUserPromptsByInterceptionID(ctx, fixtures.oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, oldUserPrompts, 1, "old user prompts should be kept")
|
||||
|
||||
oldToolUsages, err := db.GetAIBridgeToolUsagesByInterceptionID(ctx, fixtures.oldInterceptionWithRelated.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, oldToolUsages, 1, "old tool usages should be kept")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
clk := quartz.NewMock(t)
|
||||
clk.Set(now).MustWait(ctx)
|
||||
|
||||
db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
|
||||
// Create old AI Bridge interception (should be deleted when retention enabled).
|
||||
oldInterception := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
|
||||
ID: uuid.New(),
|
||||
APIKeyID: sql.NullString{},
|
||||
InitiatorID: user.ID,
|
||||
Provider: "anthropic",
|
||||
Model: "claude-3-5-sonnet",
|
||||
StartedAt: afterThreshold,
|
||||
}, &afterThreshold)
|
||||
|
||||
// Create old interception with related records (should all be deleted when retention enabled).
|
||||
oldInterceptionWithRelated := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
|
||||
ID: uuid.New(),
|
||||
APIKeyID: sql.NullString{},
|
||||
InitiatorID: user.ID,
|
||||
Provider: "openai",
|
||||
Model: "gpt-4",
|
||||
StartedAt: afterThreshold,
|
||||
}, &afterThreshold)
|
||||
|
||||
_ = dbgen.AIBridgeTokenUsage(t, db, database.InsertAIBridgeTokenUsageParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: oldInterceptionWithRelated.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
InputTokens: 100,
|
||||
OutputTokens: 50,
|
||||
CreatedAt: afterThreshold,
|
||||
})
|
||||
|
||||
_ = dbgen.AIBridgeUserPrompt(t, db, database.InsertAIBridgeUserPromptParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: oldInterceptionWithRelated.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
Prompt: "test prompt",
|
||||
CreatedAt: afterThreshold,
|
||||
})
|
||||
|
||||
_ = dbgen.AIBridgeToolUsage(t, db, database.InsertAIBridgeToolUsageParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: oldInterceptionWithRelated.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
Tool: "test-tool",
|
||||
ServerUrl: sql.NullString{String: "http://test", Valid: true},
|
||||
Input: "{}",
|
||||
Injected: true,
|
||||
CreatedAt: afterThreshold,
|
||||
})
|
||||
|
||||
// Create recent AI Bridge interception (should be kept).
|
||||
recentInterception := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
|
||||
ID: uuid.New(),
|
||||
APIKeyID: sql.NullString{},
|
||||
InitiatorID: user.ID,
|
||||
Provider: "anthropic",
|
||||
Model: "claude-3-5-sonnet",
|
||||
StartedAt: beforeThreshold,
|
||||
}, &beforeThreshold)
|
||||
|
||||
// Create interception close to threshold (should be kept).
|
||||
nearThresholdInterception := dbgen.AIBridgeInterception(t, db, database.InsertAIBridgeInterceptionParams{
|
||||
ID: uuid.New(),
|
||||
APIKeyID: sql.NullString{},
|
||||
InitiatorID: user.ID,
|
||||
Provider: "anthropic",
|
||||
Model: "claude-3-5-sonnet",
|
||||
StartedAt: closeBeforeThreshold,
|
||||
}, &closeBeforeThreshold)
|
||||
|
||||
_ = dbgen.AIBridgeTokenUsage(t, db, database.InsertAIBridgeTokenUsageParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: nearThresholdInterception.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
InputTokens: 100,
|
||||
OutputTokens: 50,
|
||||
CreatedAt: closeBeforeThreshold,
|
||||
})
|
||||
|
||||
_ = dbgen.AIBridgeUserPrompt(t, db, database.InsertAIBridgeUserPromptParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: nearThresholdInterception.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
Prompt: "test prompt",
|
||||
CreatedAt: closeBeforeThreshold,
|
||||
})
|
||||
|
||||
_ = dbgen.AIBridgeToolUsage(t, db, database.InsertAIBridgeToolUsageParams{
|
||||
ID: uuid.New(),
|
||||
InterceptionID: nearThresholdInterception.ID,
|
||||
ProviderResponseID: "resp-1",
|
||||
Tool: "test-tool",
|
||||
ServerUrl: sql.NullString{String: "http://test", Valid: true},
|
||||
Input: "{}",
|
||||
Injected: true,
|
||||
CreatedAt: closeBeforeThreshold,
|
||||
})
|
||||
|
||||
fixtures := testFixtures{
|
||||
oldInterception: oldInterception,
|
||||
oldInterceptionWithRelated: oldInterceptionWithRelated,
|
||||
recentInterception: recentInterception,
|
||||
nearThresholdInterception: nearThresholdInterception,
|
||||
}
|
||||
|
||||
// Run the purge with configured retention period.
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{
|
||||
AI: codersdk.AIConfig{
|
||||
BridgeConfig: codersdk.AIBridgeConfig{
|
||||
Retention: serpent.Duration(tc.retention),
|
||||
},
|
||||
},
|
||||
}, clk)
|
||||
defer closer.Close()
|
||||
testutil.TryReceive(ctx, t, done)
|
||||
|
||||
tc.verify(t, ctx, db, fixtures)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteOldAuditLogs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
now := time.Date(2025, 1, 15, 7, 30, 0, 0, time.UTC)
|
||||
retentionPeriod := 30 * 24 * time.Hour
|
||||
afterThreshold := now.Add(-retentionPeriod).Add(-24 * time.Hour) // 31 days ago (older than threshold)
|
||||
beforeThreshold := now.Add(-15 * 24 * time.Hour) // 15 days ago (newer than threshold)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
retentionConfig codersdk.RetentionConfig
|
||||
oldLogTime time.Time
|
||||
recentLogTime *time.Time // nil means no recent log created
|
||||
expectOldDeleted bool
|
||||
expectedLogsRemaining int
|
||||
}{
|
||||
{
|
||||
name: "RetentionEnabled",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
AuditLogs: serpent.Duration(retentionPeriod),
|
||||
},
|
||||
oldLogTime: afterThreshold,
|
||||
recentLogTime: &beforeThreshold,
|
||||
expectOldDeleted: true,
|
||||
expectedLogsRemaining: 1, // only recent log remains
|
||||
},
|
||||
{
|
||||
name: "RetentionDisabled",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
AuditLogs: serpent.Duration(0),
|
||||
},
|
||||
oldLogTime: now.Add(-365 * 24 * time.Hour), // 1 year ago
|
||||
recentLogTime: nil,
|
||||
expectOldDeleted: false,
|
||||
expectedLogsRemaining: 1, // old log is kept
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
clk := quartz.NewMock(t)
|
||||
clk.Set(now).MustWait(ctx)
|
||||
|
||||
db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
|
||||
// Setup test fixtures.
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
|
||||
// Create old audit log.
|
||||
oldLog := dbgen.AuditLog(t, db, database.AuditLog{
|
||||
UserID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
Time: tc.oldLogTime,
|
||||
Action: database.AuditActionCreate,
|
||||
ResourceType: database.ResourceTypeWorkspace,
|
||||
})
|
||||
|
||||
// Create recent audit log if specified.
|
||||
var recentLog database.AuditLog
|
||||
if tc.recentLogTime != nil {
|
||||
recentLog = dbgen.AuditLog(t, db, database.AuditLog{
|
||||
UserID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
Time: *tc.recentLogTime,
|
||||
Action: database.AuditActionCreate,
|
||||
ResourceType: database.ResourceTypeWorkspace,
|
||||
})
|
||||
}
|
||||
|
||||
// Run the purge.
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{
|
||||
Retention: tc.retentionConfig,
|
||||
}, clk)
|
||||
defer closer.Close()
|
||||
testutil.TryReceive(ctx, t, done)
|
||||
|
||||
// Verify results.
|
||||
logs, err := db.GetAuditLogsOffset(ctx, database.GetAuditLogsOffsetParams{
|
||||
LimitOpt: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, logs, tc.expectedLogsRemaining, "unexpected number of logs remaining")
|
||||
|
||||
logIDs := make([]uuid.UUID, len(logs))
|
||||
for i, log := range logs {
|
||||
logIDs[i] = log.AuditLog.ID
|
||||
}
|
||||
|
||||
if tc.expectOldDeleted {
|
||||
require.NotContains(t, logIDs, oldLog.ID, "old audit log should be deleted")
|
||||
} else {
|
||||
require.Contains(t, logIDs, oldLog.ID, "old audit log should NOT be deleted")
|
||||
}
|
||||
|
||||
if tc.recentLogTime != nil {
|
||||
require.Contains(t, logIDs, recentLog.ID, "recent audit log should be kept")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ConnectionEventsNotDeleted is a special case that tests multiple audit
|
||||
// action types, so it's kept as a separate subtest.
|
||||
t.Run("ConnectionEventsNotDeleted", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
clk := quartz.NewMock(t)
|
||||
clk.Set(now).MustWait(ctx)
|
||||
|
||||
db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
|
||||
// Create old connection events (should NOT be deleted by audit logs retention).
|
||||
oldConnectLog := dbgen.AuditLog(t, db, database.AuditLog{
|
||||
UserID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
Time: afterThreshold,
|
||||
Action: database.AuditActionConnect,
|
||||
ResourceType: database.ResourceTypeWorkspace,
|
||||
})
|
||||
|
||||
oldDisconnectLog := dbgen.AuditLog(t, db, database.AuditLog{
|
||||
UserID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
Time: afterThreshold,
|
||||
Action: database.AuditActionDisconnect,
|
||||
ResourceType: database.ResourceTypeWorkspace,
|
||||
})
|
||||
|
||||
oldOpenLog := dbgen.AuditLog(t, db, database.AuditLog{
|
||||
UserID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
Time: afterThreshold,
|
||||
Action: database.AuditActionOpen,
|
||||
ResourceType: database.ResourceTypeWorkspace,
|
||||
})
|
||||
|
||||
oldCloseLog := dbgen.AuditLog(t, db, database.AuditLog{
|
||||
UserID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
Time: afterThreshold,
|
||||
Action: database.AuditActionClose,
|
||||
ResourceType: database.ResourceTypeWorkspace,
|
||||
})
|
||||
|
||||
// Create old non-connection audit log (should be deleted).
|
||||
oldCreateLog := dbgen.AuditLog(t, db, database.AuditLog{
|
||||
UserID: user.ID,
|
||||
OrganizationID: org.ID,
|
||||
Time: afterThreshold,
|
||||
Action: database.AuditActionCreate,
|
||||
ResourceType: database.ResourceTypeWorkspace,
|
||||
})
|
||||
|
||||
// Run the purge with audit logs retention enabled.
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{
|
||||
Retention: codersdk.RetentionConfig{
|
||||
AuditLogs: serpent.Duration(retentionPeriod),
|
||||
},
|
||||
}, clk)
|
||||
defer closer.Close()
|
||||
testutil.TryReceive(ctx, t, done)
|
||||
|
||||
// Verify results.
|
||||
logs, err := db.GetAuditLogsOffset(ctx, database.GetAuditLogsOffsetParams{
|
||||
LimitOpt: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, logs, 4, "should have 4 connection event logs remaining")
|
||||
|
||||
logIDs := make([]uuid.UUID, len(logs))
|
||||
for i, log := range logs {
|
||||
logIDs[i] = log.AuditLog.ID
|
||||
}
|
||||
|
||||
// Connection events should NOT be deleted by audit logs retention.
|
||||
require.Contains(t, logIDs, oldConnectLog.ID, "old connect log should NOT be deleted by audit logs retention")
|
||||
require.Contains(t, logIDs, oldDisconnectLog.ID, "old disconnect log should NOT be deleted by audit logs retention")
|
||||
require.Contains(t, logIDs, oldOpenLog.ID, "old open log should NOT be deleted by audit logs retention")
|
||||
require.Contains(t, logIDs, oldCloseLog.ID, "old close log should NOT be deleted by audit logs retention")
|
||||
|
||||
// Non-connection event should be deleted.
|
||||
require.NotContains(t, logIDs, oldCreateLog.ID, "old create log should be deleted by audit logs retention")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteExpiredAPIKeys(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
now := time.Date(2025, 1, 15, 7, 30, 0, 0, time.UTC)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
retentionConfig codersdk.RetentionConfig
|
||||
oldExpiredTime time.Time
|
||||
recentExpiredTime *time.Time // nil means no recent expired key created
|
||||
activeTime *time.Time // nil means no active key created
|
||||
expectOldExpiredDeleted bool
|
||||
expectedKeysRemaining int
|
||||
}{
|
||||
{
|
||||
name: "RetentionEnabled",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
APIKeys: serpent.Duration(7 * 24 * time.Hour), // 7 days
|
||||
},
|
||||
oldExpiredTime: now.Add(-8 * 24 * time.Hour), // Expired 8 days ago
|
||||
recentExpiredTime: ptr(now.Add(-6 * 24 * time.Hour)), // Expired 6 days ago
|
||||
activeTime: ptr(now.Add(24 * time.Hour)), // Expires tomorrow
|
||||
expectOldExpiredDeleted: true,
|
||||
expectedKeysRemaining: 2, // recent expired + active
|
||||
},
|
||||
{
|
||||
name: "RetentionDisabled",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
APIKeys: serpent.Duration(0),
|
||||
},
|
||||
oldExpiredTime: now.Add(-365 * 24 * time.Hour), // Expired 1 year ago
|
||||
recentExpiredTime: nil,
|
||||
activeTime: nil,
|
||||
expectOldExpiredDeleted: false,
|
||||
expectedKeysRemaining: 1, // old expired is kept
|
||||
},
|
||||
|
||||
{
|
||||
name: "CustomRetention30Days",
|
||||
retentionConfig: codersdk.RetentionConfig{
|
||||
APIKeys: serpent.Duration(30 * 24 * time.Hour), // 30 days
|
||||
},
|
||||
oldExpiredTime: now.Add(-31 * 24 * time.Hour), // Expired 31 days ago
|
||||
recentExpiredTime: ptr(now.Add(-29 * 24 * time.Hour)), // Expired 29 days ago
|
||||
activeTime: nil,
|
||||
expectOldExpiredDeleted: true,
|
||||
expectedKeysRemaining: 1, // only recent expired remains
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
clk := quartz.NewMock(t)
|
||||
clk.Set(now).MustWait(ctx)
|
||||
|
||||
db, _ := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
|
||||
// Create API key that expired long ago.
|
||||
oldExpiredKey, _ := dbgen.APIKey(t, db, database.APIKey{
|
||||
UserID: user.ID,
|
||||
ExpiresAt: tc.oldExpiredTime,
|
||||
TokenName: "old-expired-key",
|
||||
})
|
||||
|
||||
// Create API key that expired recently if specified.
|
||||
var recentExpiredKey database.APIKey
|
||||
if tc.recentExpiredTime != nil {
|
||||
recentExpiredKey, _ = dbgen.APIKey(t, db, database.APIKey{
|
||||
UserID: user.ID,
|
||||
ExpiresAt: *tc.recentExpiredTime,
|
||||
TokenName: "recent-expired-key",
|
||||
})
|
||||
}
|
||||
|
||||
// Create API key that hasn't expired yet if specified.
|
||||
var activeKey database.APIKey
|
||||
if tc.activeTime != nil {
|
||||
activeKey, _ = dbgen.APIKey(t, db, database.APIKey{
|
||||
UserID: user.ID,
|
||||
ExpiresAt: *tc.activeTime,
|
||||
TokenName: "active-key",
|
||||
})
|
||||
}
|
||||
|
||||
// Run the purge.
|
||||
done := awaitDoTick(ctx, t, clk)
|
||||
closer := dbpurge.New(ctx, logger, db, &codersdk.DeploymentValues{
|
||||
Retention: tc.retentionConfig,
|
||||
}, clk)
|
||||
defer closer.Close()
|
||||
testutil.TryReceive(ctx, t, done)
|
||||
|
||||
// Verify total keys remaining.
|
||||
keys, err := db.GetAPIKeysLastUsedAfter(ctx, time.Time{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, keys, tc.expectedKeysRemaining, "unexpected number of keys remaining")
|
||||
|
||||
// Verify results.
|
||||
_, err = db.GetAPIKeyByID(ctx, oldExpiredKey.ID)
|
||||
if tc.expectOldExpiredDeleted {
|
||||
require.Error(t, err, "old expired key should be deleted")
|
||||
} else {
|
||||
require.NoError(t, err, "old expired key should NOT be deleted")
|
||||
}
|
||||
|
||||
if tc.recentExpiredTime != nil {
|
||||
_, err = db.GetAPIKeyByID(ctx, recentExpiredKey.ID)
|
||||
require.NoError(t, err, "recently expired key should be kept")
|
||||
}
|
||||
|
||||
if tc.activeTime != nil {
|
||||
_, err = db.GetAPIKeyByID(ctx, activeKey.ID)
|
||||
require.NoError(t, err, "active key should be kept")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ptr is a helper to create a pointer to a value.
|
||||
func ptr[T any](v T) *T {
|
||||
return &v
|
||||
}
|
||||
|
||||
Generated
+2
@@ -3449,6 +3449,8 @@ COMMENT ON INDEX workspace_app_audit_sessions_unique_index IS 'Unique index to e
|
||||
|
||||
CREATE INDEX workspace_app_stats_workspace_id_idx ON workspace_app_stats USING btree (workspace_id);
|
||||
|
||||
CREATE INDEX workspace_app_statuses_app_id_idx ON workspace_app_statuses USING btree (app_id, created_at DESC);
|
||||
|
||||
CREATE INDEX workspace_modules_created_at_idx ON workspace_modules USING btree (created_at);
|
||||
|
||||
CREATE INDEX workspace_next_start_at_idx ON workspaces USING btree (next_start_at) WHERE (deleted = false);
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
DROP INDEX IF EXISTS workspace_app_statuses_app_id_idx;
|
||||
@@ -0,0 +1 @@
|
||||
CREATE INDEX workspace_app_statuses_app_id_idx ON workspace_app_statuses (app_id, created_at DESC);
|
||||
@@ -104,8 +104,13 @@ type sqlcQuerier interface {
|
||||
DeleteOAuth2ProviderAppSecretByID(ctx context.Context, id uuid.UUID) error
|
||||
DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppTokensByAppAndUserIDParams) error
|
||||
// Cumulative count.
|
||||
DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int32, error)
|
||||
DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int64, error)
|
||||
DeleteOldAuditLogConnectionEvents(ctx context.Context, arg DeleteOldAuditLogConnectionEventsParams) error
|
||||
// Deletes old audit logs based on retention policy, excluding deprecated
|
||||
// connection events (connect, disconnect, open, close) which are handled
|
||||
// separately by DeleteOldAuditLogConnectionEvents.
|
||||
DeleteOldAuditLogs(ctx context.Context, arg DeleteOldAuditLogsParams) (int64, error)
|
||||
DeleteOldConnectionLogs(ctx context.Context, arg DeleteOldConnectionLogsParams) (int64, error)
|
||||
// Delete all notification messages which have not been updated for over a week.
|
||||
DeleteOldNotificationMessages(ctx context.Context) error
|
||||
// Delete provisioner daemons that have been created at least a week ago
|
||||
@@ -115,10 +120,10 @@ type sqlcQuerier interface {
|
||||
DeleteOldProvisionerDaemons(ctx context.Context) error
|
||||
// Deletes old telemetry locks from the telemetry_locks table.
|
||||
DeleteOldTelemetryLocks(ctx context.Context, periodEndingAtBefore time.Time) error
|
||||
// If an agent hasn't connected in the last 7 days, we purge it's logs.
|
||||
// If an agent hasn't connected within the retention period, we purge its logs.
|
||||
// Exception: if the logs are related to the latest build, we keep those around.
|
||||
// Logs can take up a lot of space, so it's important we clean up frequently.
|
||||
DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error
|
||||
DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) (int64, error)
|
||||
DeleteOldWorkspaceAgentStats(ctx context.Context) error
|
||||
DeleteOrganizationMember(ctx context.Context, arg DeleteOrganizationMemberParams) error
|
||||
DeleteProvisionerKey(ctx context.Context, id uuid.UUID) error
|
||||
|
||||
@@ -354,17 +354,18 @@ WITH
|
||||
WHERE id IN (SELECT id FROM to_delete)
|
||||
RETURNING 1
|
||||
)
|
||||
SELECT
|
||||
SELECT (
|
||||
(SELECT COUNT(*) FROM tool_usages) +
|
||||
(SELECT COUNT(*) FROM token_usages) +
|
||||
(SELECT COUNT(*) FROM user_prompts) +
|
||||
(SELECT COUNT(*) FROM interceptions) as total_deleted
|
||||
(SELECT COUNT(*) FROM interceptions)
|
||||
)::bigint as total_deleted
|
||||
`
|
||||
|
||||
// Cumulative count.
|
||||
func (q *sqlQuerier) DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int32, error) {
|
||||
func (q *sqlQuerier) DeleteOldAIBridgeRecords(ctx context.Context, beforeTime time.Time) (int64, error) {
|
||||
row := q.db.QueryRowContext(ctx, deleteOldAIBridgeRecords, beforeTime)
|
||||
var total_deleted int32
|
||||
var total_deleted int64
|
||||
err := row.Scan(&total_deleted)
|
||||
return total_deleted, err
|
||||
}
|
||||
@@ -1107,24 +1108,20 @@ func (q *sqlQuerier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteExpiredAPIKeys = `-- name: DeleteExpiredAPIKeys :one
|
||||
const deleteExpiredAPIKeys = `-- name: DeleteExpiredAPIKeys :execrows
|
||||
WITH expired_keys AS (
|
||||
SELECT id
|
||||
FROM api_keys
|
||||
-- expired keys only
|
||||
WHERE expires_at < $1::timestamptz
|
||||
LIMIT $2
|
||||
),
|
||||
deleted_rows AS (
|
||||
DELETE FROM
|
||||
api_keys
|
||||
USING
|
||||
expired_keys
|
||||
WHERE
|
||||
api_keys.id = expired_keys.id
|
||||
RETURNING api_keys.id
|
||||
)
|
||||
SELECT COUNT(deleted_rows.id) AS deleted_count FROM deleted_rows
|
||||
)
|
||||
DELETE FROM
|
||||
api_keys
|
||||
USING
|
||||
expired_keys
|
||||
WHERE
|
||||
api_keys.id = expired_keys.id
|
||||
`
|
||||
|
||||
type DeleteExpiredAPIKeysParams struct {
|
||||
@@ -1133,10 +1130,11 @@ type DeleteExpiredAPIKeysParams struct {
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) DeleteExpiredAPIKeys(ctx context.Context, arg DeleteExpiredAPIKeysParams) (int64, error) {
|
||||
row := q.db.QueryRowContext(ctx, deleteExpiredAPIKeys, arg.Before, arg.LimitCount)
|
||||
var deleted_count int64
|
||||
err := row.Scan(&deleted_count)
|
||||
return deleted_count, err
|
||||
result, err := q.db.ExecContext(ctx, deleteExpiredAPIKeys, arg.Before, arg.LimitCount)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RowsAffected()
|
||||
}
|
||||
|
||||
const expirePrebuildsAPIKeys = `-- name: ExpirePrebuildsAPIKeys :exec
|
||||
@@ -1637,6 +1635,37 @@ func (q *sqlQuerier) DeleteOldAuditLogConnectionEvents(ctx context.Context, arg
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteOldAuditLogs = `-- name: DeleteOldAuditLogs :execrows
|
||||
WITH old_logs AS (
|
||||
SELECT id
|
||||
FROM audit_logs
|
||||
WHERE
|
||||
"time" < $1::timestamp with time zone
|
||||
AND action NOT IN ('connect', 'disconnect', 'open', 'close')
|
||||
ORDER BY "time" ASC
|
||||
LIMIT $2
|
||||
)
|
||||
DELETE FROM audit_logs
|
||||
USING old_logs
|
||||
WHERE audit_logs.id = old_logs.id
|
||||
`
|
||||
|
||||
type DeleteOldAuditLogsParams struct {
|
||||
BeforeTime time.Time `db:"before_time" json:"before_time"`
|
||||
LimitCount int32 `db:"limit_count" json:"limit_count"`
|
||||
}
|
||||
|
||||
// Deletes old audit logs based on retention policy, excluding deprecated
|
||||
// connection events (connect, disconnect, open, close) which are handled
|
||||
// separately by DeleteOldAuditLogConnectionEvents.
|
||||
func (q *sqlQuerier) DeleteOldAuditLogs(ctx context.Context, arg DeleteOldAuditLogsParams) (int64, error) {
|
||||
result, err := q.db.ExecContext(ctx, deleteOldAuditLogs, arg.BeforeTime, arg.LimitCount)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RowsAffected()
|
||||
}
|
||||
|
||||
const getAuditLogsOffset = `-- name: GetAuditLogsOffset :many
|
||||
SELECT audit_logs.id, audit_logs.time, audit_logs.user_id, audit_logs.organization_id, audit_logs.ip, audit_logs.user_agent, audit_logs.resource_type, audit_logs.resource_id, audit_logs.resource_target, audit_logs.action, audit_logs.diff, audit_logs.status_code, audit_logs.additional_fields, audit_logs.request_id, audit_logs.resource_icon,
|
||||
-- sqlc.embed(users) would be nice but it does not seem to play well with
|
||||
@@ -2095,6 +2124,32 @@ func (q *sqlQuerier) CountConnectionLogs(ctx context.Context, arg CountConnectio
|
||||
return count, err
|
||||
}
|
||||
|
||||
const deleteOldConnectionLogs = `-- name: DeleteOldConnectionLogs :execrows
|
||||
WITH old_logs AS (
|
||||
SELECT id
|
||||
FROM connection_logs
|
||||
WHERE connect_time < $1::timestamp with time zone
|
||||
ORDER BY connect_time ASC
|
||||
LIMIT $2
|
||||
)
|
||||
DELETE FROM connection_logs
|
||||
USING old_logs
|
||||
WHERE connection_logs.id = old_logs.id
|
||||
`
|
||||
|
||||
type DeleteOldConnectionLogsParams struct {
|
||||
BeforeTime time.Time `db:"before_time" json:"before_time"`
|
||||
LimitCount int32 `db:"limit_count" json:"limit_count"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) DeleteOldConnectionLogs(ctx context.Context, arg DeleteOldConnectionLogsParams) (int64, error) {
|
||||
result, err := q.db.ExecContext(ctx, deleteOldConnectionLogs, arg.BeforeTime, arg.LimitCount)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RowsAffected()
|
||||
}
|
||||
|
||||
const getConnectionLogsOffset = `-- name: GetConnectionLogsOffset :many
|
||||
SELECT
|
||||
connection_logs.id, connection_logs.connect_time, connection_logs.organization_id, connection_logs.workspace_owner_id, connection_logs.workspace_id, connection_logs.workspace_name, connection_logs.agent_name, connection_logs.type, connection_logs.ip, connection_logs.code, connection_logs.user_agent, connection_logs.user_id, connection_logs.slug_or_port, connection_logs.connection_id, connection_logs.disconnect_time, connection_logs.disconnect_reason,
|
||||
@@ -17792,7 +17847,7 @@ func (q *sqlQuerier) UpdateVolumeResourceMonitor(ctx context.Context, arg Update
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteOldWorkspaceAgentLogs = `-- name: DeleteOldWorkspaceAgentLogs :exec
|
||||
const deleteOldWorkspaceAgentLogs = `-- name: DeleteOldWorkspaceAgentLogs :execrows
|
||||
WITH
|
||||
latest_builds AS (
|
||||
SELECT
|
||||
@@ -17835,12 +17890,15 @@ WITH
|
||||
DELETE FROM workspace_agent_logs WHERE agent_id IN (SELECT id FROM old_agents)
|
||||
`
|
||||
|
||||
// If an agent hasn't connected in the last 7 days, we purge it's logs.
|
||||
// If an agent hasn't connected within the retention period, we purge its logs.
|
||||
// Exception: if the logs are related to the latest build, we keep those around.
|
||||
// Logs can take up a lot of space, so it's important we clean up frequently.
|
||||
func (q *sqlQuerier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
||||
_, err := q.db.ExecContext(ctx, deleteOldWorkspaceAgentLogs, threshold)
|
||||
return err
|
||||
func (q *sqlQuerier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) (int64, error) {
|
||||
result, err := q.db.ExecContext(ctx, deleteOldWorkspaceAgentLogs, threshold)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RowsAffected()
|
||||
}
|
||||
|
||||
const deleteWorkspaceSubAgentByID = `-- name: DeleteWorkspaceSubAgentByID :exec
|
||||
@@ -20188,6 +20246,7 @@ func (q *sqlQuerier) GetWorkspaceAppByAgentIDAndSlug(ctx context.Context, arg Ge
|
||||
|
||||
const getWorkspaceAppStatusesByAppIDs = `-- name: GetWorkspaceAppStatusesByAppIDs :many
|
||||
SELECT id, created_at, agent_id, app_id, workspace_id, state, message, uri FROM workspace_app_statuses WHERE app_id = ANY($1 :: uuid [ ])
|
||||
ORDER BY created_at DESC, id DESC
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetWorkspaceAppStatusesByAppIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAppStatus, error) {
|
||||
|
||||
@@ -360,8 +360,9 @@ WITH
|
||||
RETURNING 1
|
||||
)
|
||||
-- Cumulative count.
|
||||
SELECT
|
||||
SELECT (
|
||||
(SELECT COUNT(*) FROM tool_usages) +
|
||||
(SELECT COUNT(*) FROM token_usages) +
|
||||
(SELECT COUNT(*) FROM user_prompts) +
|
||||
(SELECT COUNT(*) FROM interceptions) as total_deleted;
|
||||
(SELECT COUNT(*) FROM interceptions)
|
||||
)::bigint as total_deleted;
|
||||
|
||||
@@ -85,25 +85,20 @@ DELETE FROM
|
||||
WHERE
|
||||
user_id = $1;
|
||||
|
||||
-- name: DeleteExpiredAPIKeys :one
|
||||
-- name: DeleteExpiredAPIKeys :execrows
|
||||
WITH expired_keys AS (
|
||||
SELECT id
|
||||
FROM api_keys
|
||||
-- expired keys only
|
||||
WHERE expires_at < @before::timestamptz
|
||||
LIMIT @limit_count
|
||||
),
|
||||
deleted_rows AS (
|
||||
DELETE FROM
|
||||
api_keys
|
||||
USING
|
||||
expired_keys
|
||||
WHERE
|
||||
api_keys.id = expired_keys.id
|
||||
RETURNING api_keys.id
|
||||
)
|
||||
SELECT COUNT(deleted_rows.id) AS deleted_count FROM deleted_rows;
|
||||
;
|
||||
)
|
||||
DELETE FROM
|
||||
api_keys
|
||||
USING
|
||||
expired_keys
|
||||
WHERE
|
||||
api_keys.id = expired_keys.id;
|
||||
|
||||
-- name: ExpirePrebuildsAPIKeys :exec
|
||||
-- Firstly, collect api_keys owned by the prebuilds user that correlate
|
||||
|
||||
@@ -253,3 +253,20 @@ WHERE id IN (
|
||||
ORDER BY "time" ASC
|
||||
LIMIT @limit_count
|
||||
);
|
||||
|
||||
-- name: DeleteOldAuditLogs :execrows
|
||||
-- Deletes old audit logs based on retention policy, excluding deprecated
|
||||
-- connection events (connect, disconnect, open, close) which are handled
|
||||
-- separately by DeleteOldAuditLogConnectionEvents.
|
||||
WITH old_logs AS (
|
||||
SELECT id
|
||||
FROM audit_logs
|
||||
WHERE
|
||||
"time" < @before_time::timestamp with time zone
|
||||
AND action NOT IN ('connect', 'disconnect', 'open', 'close')
|
||||
ORDER BY "time" ASC
|
||||
LIMIT @limit_count
|
||||
)
|
||||
DELETE FROM audit_logs
|
||||
USING old_logs
|
||||
WHERE audit_logs.id = old_logs.id;
|
||||
|
||||
@@ -239,6 +239,18 @@ WHERE
|
||||
-- @authorize_filter
|
||||
;
|
||||
|
||||
-- name: DeleteOldConnectionLogs :execrows
|
||||
WITH old_logs AS (
|
||||
SELECT id
|
||||
FROM connection_logs
|
||||
WHERE connect_time < @before_time::timestamp with time zone
|
||||
ORDER BY connect_time ASC
|
||||
LIMIT @limit_count
|
||||
)
|
||||
DELETE FROM connection_logs
|
||||
USING old_logs
|
||||
WHERE connection_logs.id = old_logs.id;
|
||||
|
||||
-- name: UpsertConnectionLog :one
|
||||
INSERT INTO connection_logs (
|
||||
id,
|
||||
|
||||
@@ -199,10 +199,10 @@ INSERT INTO
|
||||
-- name: GetWorkspaceAgentLogSourcesByAgentIDs :many
|
||||
SELECT * FROM workspace_agent_log_sources WHERE workspace_agent_id = ANY(@ids :: uuid [ ]);
|
||||
|
||||
-- If an agent hasn't connected in the last 7 days, we purge it's logs.
|
||||
-- If an agent hasn't connected within the retention period, we purge its logs.
|
||||
-- Exception: if the logs are related to the latest build, we keep those around.
|
||||
-- Logs can take up a lot of space, so it's important we clean up frequently.
|
||||
-- name: DeleteOldWorkspaceAgentLogs :exec
|
||||
-- name: DeleteOldWorkspaceAgentLogs :execrows
|
||||
WITH
|
||||
latest_builds AS (
|
||||
SELECT
|
||||
|
||||
@@ -71,7 +71,8 @@ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
RETURNING *;
|
||||
|
||||
-- name: GetWorkspaceAppStatusesByAppIDs :many
|
||||
SELECT * FROM workspace_app_statuses WHERE app_id = ANY(@ids :: uuid [ ]);
|
||||
SELECT * FROM workspace_app_statuses WHERE app_id = ANY(@ids :: uuid [ ])
|
||||
ORDER BY created_at DESC, id DESC;
|
||||
|
||||
-- name: GetLatestWorkspaceAppStatusByAppID :one
|
||||
SELECT *
|
||||
|
||||
@@ -153,7 +153,9 @@ func freeUDPPort(t *testing.T) uint16 {
|
||||
})
|
||||
require.NoError(t, err, "listen on random UDP port")
|
||||
|
||||
_, port, err := net.SplitHostPort(l.LocalAddr().String())
|
||||
localAddr := l.LocalAddr()
|
||||
require.NotNil(t, localAddr, "local address is nil")
|
||||
_, port, err := net.SplitHostPort(localAddr.String())
|
||||
require.NoError(t, err, "split host port")
|
||||
|
||||
portUint, err := strconv.ParseUint(port, 10, 16)
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
package httpauthz
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
)
|
||||
|
||||
// AuthorizationFilter takes a list of objects and returns the filtered list of
|
||||
// objects that the user is authorized to perform the given action on.
|
||||
// This is faster than calling Authorize() on each object.
|
||||
func AuthorizationFilter[O rbac.Objecter](h *HTTPAuthorizer, r *http.Request, action policy.Action, objects []O) ([]O, error) {
|
||||
roles := httpmw.UserAuthorization(r.Context())
|
||||
objects, err := rbac.Filter(r.Context(), h.Authorizer, roles, action, objects)
|
||||
if err != nil {
|
||||
// Log the error as Filter should not be erroring.
|
||||
h.Logger.Error(r.Context(), "authorization filter failed",
|
||||
slog.Error(err),
|
||||
slog.F("user_id", roles.ID),
|
||||
slog.F("username", roles),
|
||||
slog.F("roles", roles.SafeRoleNames()),
|
||||
slog.F("scope", roles.SafeScopeName()),
|
||||
slog.F("route", r.URL.Path),
|
||||
slog.F("action", action),
|
||||
)
|
||||
return nil, err
|
||||
}
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
type HTTPAuthorizer struct {
|
||||
Authorizer rbac.Authorizer
|
||||
Logger slog.Logger
|
||||
}
|
||||
|
||||
// AuthorizeSQLFilter returns an authorization filter that can used in a
|
||||
// SQL 'WHERE' clause. If the filter is used, the resulting rows returned
|
||||
// from postgres are already authorized, and the caller does not need to
|
||||
// call 'Authorize()' on the returned objects.
|
||||
// Note the authorization is only for the given action and object type.
|
||||
func (h *HTTPAuthorizer) AuthorizeSQLFilter(r *http.Request, action policy.Action, objectType string) (rbac.PreparedAuthorized, error) {
|
||||
roles := httpmw.UserAuthorization(r.Context())
|
||||
prepared, err := h.Authorizer.Prepare(r.Context(), roles, action, objectType)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("prepare filter: %w", err)
|
||||
}
|
||||
|
||||
return prepared, nil
|
||||
}
|
||||
|
||||
// Authorize will return false if the user is not authorized to do the action.
|
||||
// This function will log appropriately, but the caller must return an
|
||||
// error to the api client.
|
||||
// Eg:
|
||||
//
|
||||
// if !h.Authorize(...) {
|
||||
// httpapi.Forbidden(rw)
|
||||
// return
|
||||
// }
|
||||
func (h *HTTPAuthorizer) Authorize(r *http.Request, action policy.Action, object rbac.Objecter) bool {
|
||||
roles := httpmw.UserAuthorization(r.Context())
|
||||
err := h.Authorizer.Authorize(r.Context(), roles, action, object.RBACObject())
|
||||
if err != nil {
|
||||
// Log the errors for debugging
|
||||
internalError := new(rbac.UnauthorizedError)
|
||||
logger := h.Logger
|
||||
if xerrors.As(err, internalError) {
|
||||
logger = h.Logger.With(slog.F("internal_error", internalError.Internal()))
|
||||
}
|
||||
// Log information for debugging. This will be very helpful
|
||||
// in the early days
|
||||
logger.Warn(r.Context(), "requester is not authorized to access the object",
|
||||
slog.F("roles", roles.SafeRoleNames()),
|
||||
slog.F("actor_id", roles.ID),
|
||||
slog.F("actor_name", roles),
|
||||
slog.F("scope", roles.SafeScopeName()),
|
||||
slog.F("route", r.URL.Path),
|
||||
slog.F("action", action),
|
||||
slog.F("object", object),
|
||||
)
|
||||
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package insightsapi
|
||||
package coderd
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -34,16 +34,16 @@ const insightsTimeLayout = time.RFC3339
|
||||
// @Param tz_offset query int true "Time-zone offset (e.g. -2)"
|
||||
// @Success 200 {object} codersdk.DAUsResponse
|
||||
// @Router /insights/daus [get]
|
||||
func (a *API) DeploymentDAUs(rw http.ResponseWriter, r *http.Request) {
|
||||
if !a.authorizer.Authorize(r, policy.ActionRead, rbac.ResourceDeploymentConfig) {
|
||||
func (api *API) deploymentDAUs(rw http.ResponseWriter, r *http.Request) {
|
||||
if !api.Authorize(r, policy.ActionRead, rbac.ResourceDeploymentConfig) {
|
||||
httpapi.Forbidden(rw)
|
||||
return
|
||||
}
|
||||
|
||||
a.DAUsForTemplates(rw, r, nil)
|
||||
api.returnDAUsInternal(rw, r, nil)
|
||||
}
|
||||
|
||||
func (a *API) DAUsForTemplates(rw http.ResponseWriter, r *http.Request, templateIDs []uuid.UUID) {
|
||||
func (api *API) returnDAUsInternal(rw http.ResponseWriter, r *http.Request, templateIDs []uuid.UUID) {
|
||||
ctx := r.Context()
|
||||
|
||||
p := httpapi.NewQueryParamParser()
|
||||
@@ -66,7 +66,7 @@ func (a *API) DAUsForTemplates(rw http.ResponseWriter, r *http.Request, template
|
||||
// Always return 60 days of data (2 months).
|
||||
sixtyDaysAgo := nextHourInLoc.In(loc).Truncate(24*time.Hour).AddDate(0, 0, -60)
|
||||
|
||||
rows, err := a.database.GetTemplateInsightsByInterval(ctx, database.GetTemplateInsightsByIntervalParams{
|
||||
rows, err := api.Database.GetTemplateInsightsByInterval(ctx, database.GetTemplateInsightsByIntervalParams{
|
||||
StartTime: sixtyDaysAgo,
|
||||
EndTime: nextHourInLoc,
|
||||
IntervalDays: 1,
|
||||
@@ -107,7 +107,7 @@ func (a *API) DAUsForTemplates(rw http.ResponseWriter, r *http.Request, template
|
||||
// @Param template_ids query []string false "Template IDs" collectionFormat(csv)
|
||||
// @Success 200 {object} codersdk.UserActivityInsightsResponse
|
||||
// @Router /insights/user-activity [get]
|
||||
func (a *API) UserActivity(rw http.ResponseWriter, r *http.Request) {
|
||||
func (api *API) insightsUserActivity(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
p := httpapi.NewQueryParamParser().
|
||||
@@ -135,7 +135,7 @@ func (a *API) UserActivity(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
rows, err := a.database.GetUserActivityInsights(ctx, database.GetUserActivityInsightsParams{
|
||||
rows, err := api.Database.GetUserActivityInsights(ctx, database.GetUserActivityInsightsParams{
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
TemplateIDs: templateIDs,
|
||||
@@ -210,7 +210,7 @@ func (a *API) UserActivity(rw http.ResponseWriter, r *http.Request) {
|
||||
// @Param template_ids query []string false "Template IDs" collectionFormat(csv)
|
||||
// @Success 200 {object} codersdk.UserLatencyInsightsResponse
|
||||
// @Router /insights/user-latency [get]
|
||||
func (a *API) UserLatency(rw http.ResponseWriter, r *http.Request) {
|
||||
func (api *API) insightsUserLatency(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
p := httpapi.NewQueryParamParser().
|
||||
@@ -238,7 +238,7 @@ func (a *API) UserLatency(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
rows, err := a.database.GetUserLatencyInsights(ctx, database.GetUserLatencyInsightsParams{
|
||||
rows, err := api.Database.GetUserLatencyInsights(ctx, database.GetUserLatencyInsightsParams{
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
TemplateIDs: templateIDs,
|
||||
@@ -301,7 +301,7 @@ func (a *API) UserLatency(rw http.ResponseWriter, r *http.Request) {
|
||||
// @Param tz_offset query int true "Time-zone offset (e.g. -2)"
|
||||
// @Success 200 {object} codersdk.GetUserStatusCountsResponse
|
||||
// @Router /insights/user-status-counts [get]
|
||||
func (a *API) UserStatusCounts(rw http.ResponseWriter, r *http.Request) {
|
||||
func (api *API) insightsUserStatusCounts(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
p := httpapi.NewQueryParamParser()
|
||||
@@ -322,7 +322,7 @@ func (a *API) UserStatusCounts(rw http.ResponseWriter, r *http.Request) {
|
||||
nextHourInLoc := dbtime.Now().Truncate(time.Hour).Add(time.Hour).In(loc)
|
||||
sixtyDaysAgo := dbtime.StartOfDay(nextHourInLoc).AddDate(0, 0, -60)
|
||||
|
||||
rows, err := a.database.GetUserStatusCounts(ctx, database.GetUserStatusCountsParams{
|
||||
rows, err := api.Database.GetUserStatusCounts(ctx, database.GetUserStatusCountsParams{
|
||||
StartTime: sixtyDaysAgo,
|
||||
EndTime: nextHourInLoc,
|
||||
// #nosec G115 - Interval value is small and fits in int32 (typically days or hours)
|
||||
@@ -366,7 +366,7 @@ func (a *API) UserStatusCounts(rw http.ResponseWriter, r *http.Request) {
|
||||
// @Param template_ids query []string false "Template IDs" collectionFormat(csv)
|
||||
// @Success 200 {object} codersdk.TemplateInsightsResponse
|
||||
// @Router /insights/templates [get]
|
||||
func (a *API) Templates(rw http.ResponseWriter, r *http.Request) {
|
||||
func (api *API) insightsTemplates(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
p := httpapi.NewQueryParamParser().
|
||||
@@ -418,7 +418,7 @@ func (a *API) Templates(rw http.ResponseWriter, r *http.Request) {
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
if interval != "" && slices.Contains(sections, codersdk.TemplateInsightsSectionIntervalReports) {
|
||||
dailyUsage, err = a.database.GetTemplateInsightsByInterval(egCtx, database.GetTemplateInsightsByIntervalParams{
|
||||
dailyUsage, err = api.Database.GetTemplateInsightsByInterval(egCtx, database.GetTemplateInsightsByIntervalParams{
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
TemplateIDs: templateIDs,
|
||||
@@ -436,7 +436,7 @@ func (a *API) Templates(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
var err error
|
||||
usage, err = a.database.GetTemplateInsights(egCtx, database.GetTemplateInsightsParams{
|
||||
usage, err = api.Database.GetTemplateInsights(egCtx, database.GetTemplateInsightsParams{
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
TemplateIDs: templateIDs,
|
||||
@@ -452,7 +452,7 @@ func (a *API) Templates(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
var err error
|
||||
appUsage, err = a.database.GetTemplateAppInsights(egCtx, database.GetTemplateAppInsightsParams{
|
||||
appUsage, err = api.Database.GetTemplateAppInsights(egCtx, database.GetTemplateAppInsightsParams{
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
TemplateIDs: templateIDs,
|
||||
@@ -471,7 +471,7 @@ func (a *API) Templates(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
var err error
|
||||
parameterRows, err = a.database.GetTemplateParameterInsights(ctx, database.GetTemplateParameterInsightsParams{
|
||||
parameterRows, err = api.Database.GetTemplateParameterInsights(ctx, database.GetTemplateParameterInsightsParams{
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
TemplateIDs: templateIDs,
|
||||
@@ -1,4 +1,4 @@
|
||||
package insightsapi
|
||||
package coderd
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,13 +1,11 @@
|
||||
package insightsapi_test
|
||||
package coderd_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -18,12 +16,9 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
@@ -33,8 +28,6 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbrollup"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/httpauthz"
|
||||
"github.com/coder/coder/v2/coderd/insightsapi"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/workspaceapps"
|
||||
"github.com/coder/coder/v2/coderd/workspacestats"
|
||||
@@ -46,13 +39,6 @@ import (
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// updateGoldenFiles is a flag that can be set to update golden files.
|
||||
var updateGoldenFiles = flag.Bool("update", false, "Update golden files")
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m, testutil.GoleakOptions...)
|
||||
}
|
||||
|
||||
func TestDeploymentInsights(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -156,52 +142,6 @@ func TestDeploymentInsights(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentDAUs(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
logger := testutil.Logger(t).Leveled(slog.LevelDebug)
|
||||
tzOffset := 4
|
||||
loc := time.FixedZone("", tzOffset*3600)
|
||||
|
||||
_, mDB, authDB, auth := coderdtest.MockedDatabaseWithAuthz(t, logger)
|
||||
mDB.EXPECT().GetTemplateInsightsByInterval(gomock.Any(), gomock.Cond(func(arg database.GetTemplateInsightsByIntervalParams) bool {
|
||||
return len(arg.TemplateIDs) == 0 &&
|
||||
arg.IntervalDays == 1
|
||||
})).
|
||||
Times(1).
|
||||
Return([]database.GetTemplateInsightsByIntervalRow{
|
||||
{
|
||||
StartTime: time.Date(2025, 12, 1, 0, 0, 0, 0, loc),
|
||||
EndTime: time.Date(2025, 12, 1, 23, 59, 59, 0, loc),
|
||||
ActiveUsers: 3,
|
||||
},
|
||||
{
|
||||
StartTime: time.Date(2025, 12, 2, 0, 0, 0, 0, loc),
|
||||
EndTime: time.Date(2025, 12, 2, 23, 59, 59, 0, loc),
|
||||
ActiveUsers: 30,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
uut := insightsapi.NewAPI(logger, authDB, &httpauthz.HTTPAuthorizer{
|
||||
Authorizer: auth,
|
||||
Logger: logger.Named("httpauth"),
|
||||
})
|
||||
|
||||
rw := httptest.NewRecorder()
|
||||
reqCtx := dbauthz.As(ctx, coderdtest.OwnerSubject())
|
||||
req := httptest.NewRequestWithContext(reqCtx, http.MethodGet, "/api/v2/insights/daus?tz_offset=4", nil)
|
||||
uut.DeploymentDAUs(rw, req)
|
||||
daus, err := codersdk.DecodeResponse[codersdk.DAUsResponse](rw.Result())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, codersdk.DAUsResponse{
|
||||
Entries: []codersdk.DAUEntry{
|
||||
{Date: "2025-12-01", Amount: 3},
|
||||
{Date: "2025-12-02", Amount: 30},
|
||||
},
|
||||
TZHourOffset: 4,
|
||||
}, daus)
|
||||
}
|
||||
|
||||
func TestUserActivityInsights_SanityCheck(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package insightsapi
|
||||
|
||||
import (
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/httpauthz"
|
||||
)
|
||||
|
||||
type API struct {
|
||||
logger slog.Logger
|
||||
authorizer *httpauthz.HTTPAuthorizer
|
||||
database database.Store
|
||||
}
|
||||
|
||||
func NewAPI(
|
||||
logger slog.Logger,
|
||||
db database.Store,
|
||||
authorizer *httpauthz.HTTPAuthorizer,
|
||||
) *API {
|
||||
a := &API{
|
||||
logger: logger.Named("insightsapi"),
|
||||
authorizer: authorizer,
|
||||
database: db,
|
||||
}
|
||||
return a
|
||||
}
|
||||
@@ -87,7 +87,9 @@ func (c *Cache) refreshTemplateBuildTimes(ctx context.Context) error {
|
||||
//nolint:gocritic // This is a system service.
|
||||
ctx = dbauthz.AsSystemRestricted(ctx)
|
||||
|
||||
templates, err := c.database.GetTemplates(ctx)
|
||||
templates, err := c.database.GetTemplatesWithFilter(ctx, database.GetTemplatesWithFilterParams{
|
||||
Deleted: false,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentmetrics"
|
||||
"github.com/coder/coder/v2/coderd/pproflabel"
|
||||
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -47,6 +49,7 @@ type MetricsAggregator struct {
|
||||
|
||||
log slog.Logger
|
||||
metricsCleanupInterval time.Duration
|
||||
clock quartz.Clock
|
||||
|
||||
collectCh chan (chan []prometheus.Metric)
|
||||
updateCh chan updateRequest
|
||||
@@ -151,7 +154,7 @@ func (am *annotatedMetric) shallowCopy() annotatedMetric {
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetricsAggregator(logger slog.Logger, registerer prometheus.Registerer, duration time.Duration, aggregateByLabels []string) (*MetricsAggregator, error) {
|
||||
func NewMetricsAggregator(logger slog.Logger, registerer prometheus.Registerer, duration time.Duration, aggregateByLabels []string, options ...func(*MetricsAggregator)) (*MetricsAggregator, error) {
|
||||
metricsCleanupInterval := defaultMetricsCleanupInterval
|
||||
if duration > 0 {
|
||||
metricsCleanupInterval = duration
|
||||
@@ -192,9 +195,10 @@ func NewMetricsAggregator(logger slog.Logger, registerer prometheus.Registerer,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MetricsAggregator{
|
||||
ma := &MetricsAggregator{
|
||||
log: logger.Named(loggerName),
|
||||
metricsCleanupInterval: metricsCleanupInterval,
|
||||
clock: quartz.NewReal(),
|
||||
|
||||
store: map[metricKey]annotatedMetric{},
|
||||
|
||||
@@ -206,7 +210,19 @@ func NewMetricsAggregator(logger slog.Logger, registerer prometheus.Registerer,
|
||||
cleanupHistogram: cleanupHistogram,
|
||||
|
||||
aggregateByLabels: aggregateByLabels,
|
||||
}, nil
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(ma)
|
||||
}
|
||||
|
||||
return ma, nil
|
||||
}
|
||||
|
||||
func WithClock(clock quartz.Clock) func(*MetricsAggregator) {
|
||||
return func(ma *MetricsAggregator) {
|
||||
ma.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
// labelAggregator is used to control cardinality of collected Prometheus metrics by pre-aggregating series based on given labels.
|
||||
@@ -349,7 +365,7 @@ func (ma *MetricsAggregator) Run(ctx context.Context) func() {
|
||||
ma.log.Debug(ctx, "clean expired metrics")
|
||||
|
||||
timer := prometheus.NewTimer(ma.cleanupHistogram)
|
||||
now := time.Now()
|
||||
now := ma.clock.Now()
|
||||
|
||||
for key, val := range ma.store {
|
||||
if now.After(val.expiryDate) {
|
||||
@@ -407,7 +423,7 @@ func (ma *MetricsAggregator) getOrCreateDesc(name string, help string, baseLabel
|
||||
}
|
||||
key := cacheKeyForDesc(name, baseLabelNames, extraLabels)
|
||||
if d, ok := ma.descCache[key]; ok {
|
||||
d.lastUsed = time.Now()
|
||||
d.lastUsed = ma.clock.Now()
|
||||
ma.descCache[key] = d
|
||||
return d.desc
|
||||
}
|
||||
@@ -419,7 +435,7 @@ func (ma *MetricsAggregator) getOrCreateDesc(name string, help string, baseLabel
|
||||
labels[nBase+i] = l.Name
|
||||
}
|
||||
d := prometheus.NewDesc(name, help, labels, nil)
|
||||
ma.descCache[key] = descCacheEntry{d, time.Now()}
|
||||
ma.descCache[key] = descCacheEntry{d, ma.clock.Now()}
|
||||
return d
|
||||
}
|
||||
|
||||
@@ -497,7 +513,7 @@ func (ma *MetricsAggregator) Update(ctx context.Context, labels AgentMetricLabel
|
||||
templateName: labels.TemplateName,
|
||||
metrics: metrics,
|
||||
|
||||
timestamp: time.Now(),
|
||||
timestamp: ma.clock.Now(),
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
ma.log.Debug(ctx, "update request is canceled")
|
||||
@@ -508,7 +524,7 @@ func (ma *MetricsAggregator) Update(ctx context.Context, labels AgentMetricLabel
|
||||
|
||||
// Move to a function for testability
|
||||
func (ma *MetricsAggregator) cleanupDescCache() {
|
||||
now := time.Now()
|
||||
now := ma.clock.Now()
|
||||
for key, entry := range ma.descCache {
|
||||
if now.Sub(entry.lastUsed) > ma.metricsCleanupInterval {
|
||||
delete(ma.descCache, key)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentmetrics"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
func TestDescCache_DescExpire(t *testing.T) {
|
||||
@@ -62,8 +63,9 @@ func TestDescCache_DescExpire(t *testing.T) {
|
||||
func TestDescCacheTimestampUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
mClock := quartz.NewMock(t)
|
||||
registry := prometheus.NewRegistry()
|
||||
ma, err := NewMetricsAggregator(slogtest.Make(t, nil), registry, time.Hour, nil)
|
||||
ma, err := NewMetricsAggregator(slogtest.Make(t, nil), registry, time.Hour, nil, WithClock(mClock))
|
||||
require.NoError(t, err)
|
||||
|
||||
baseLabelNames := []string{"label1", "label2"}
|
||||
@@ -78,6 +80,9 @@ func TestDescCacheTimestampUpdate(t *testing.T) {
|
||||
initialEntry := ma.descCache[key]
|
||||
initialTime := initialEntry.lastUsed
|
||||
|
||||
// Advance the mock clock to ensure a different timestamp
|
||||
mClock.Advance(time.Second)
|
||||
|
||||
desc2 := ma.getOrCreateDesc("test_metric", "help text", baseLabelNames, extraLabels)
|
||||
require.NotNil(t, desc2)
|
||||
|
||||
|
||||
@@ -648,9 +648,9 @@ func (a RegoAuthorizer) newPartialAuthorizer(ctx context.Context, subject Subjec
|
||||
return pAuth, nil
|
||||
}
|
||||
|
||||
// SQLAuthorizeFilter is a compiled partial query that can be converted to SQL.
|
||||
// AuthorizeFilter is a compiled partial query that can be converted to SQL.
|
||||
// This allows enforcing the policy on the database side in a WHERE clause.
|
||||
type SQLAuthorizeFilter interface {
|
||||
type AuthorizeFilter interface {
|
||||
SQLString() string
|
||||
}
|
||||
|
||||
@@ -681,7 +681,7 @@ func ConfigWorkspaces() regosql.ConvertConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func Compile(cfg regosql.ConvertConfig, pa *PartialAuthorizer) (SQLAuthorizeFilter, error) {
|
||||
func Compile(cfg regosql.ConvertConfig, pa *PartialAuthorizer) (AuthorizeFilter, error) {
|
||||
root, err := regosql.ConvertRegoAst(cfg, pa.partialQueries)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("convert rego ast: %w", err)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
package rbac
|
||||
+1
-1
@@ -994,7 +994,7 @@ func (api *API) notifyUsersOfTemplateDeprecation(ctx context.Context, template d
|
||||
func (api *API) templateDAUs(rw http.ResponseWriter, r *http.Request) {
|
||||
template := httpmw.TemplateParam(r)
|
||||
|
||||
api.insightsAPI.DAUsForTemplates(rw, r, []uuid.UUID{template.ID})
|
||||
api.returnDAUsInternal(rw, r, []uuid.UUID{template.ID})
|
||||
}
|
||||
|
||||
// @Summary Get template examples by organization
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user