Compare commits

...

4 Commits

Author SHA1 Message Date
Cian Johnston 31c4bf39f9 fix: address copilot review feedback on testutil.Eventually usage
- Wrap testutil.Eventually return with require.True in Await* helpers
  (coderdtest.go) so tests fail immediately on timeout instead of
  returning zero-value structs.
- Replace testutil.Context(t, ...) inside polling loops with
  context.WithTimeout(ctx, ...) to avoid unbounded t.Cleanup
  accumulation (integration.go, workspaceproxy_test.go).
- Wrap testutil.Eventually return with require.True where variables
  populated inside the condition are used unconditionally afterward
  (workspaceagents_test.go, notifications_test.go).
2026-03-26 09:16:01 +00:00
Cian Johnston c74a5718bb fix(cli): use fresh context in TestCloserStack_Context
The test cancels ctx to trigger the closer stack's context handler,
then was passing that already-canceled ctx to testutil.Eventually.
Unlike the old require.Eventually, testutil.Eventually respects
context cancellation and fails immediately on a canceled context.

Use a separate waitCtx for the Eventually call.
2026-03-25 23:32:58 +00:00
Cian Johnston ac7a760e06 refactor: add lint rule to detect require/assert.Eventually usage
Add a useTestutilEventually ruleguard rule to scripts/rules.go that
flags any usage of require.Eventually, require.Eventuallyf,
assert.Eventually, or assert.Eventuallyf and directs developers to
use testutil.Eventually instead.

Also clean up the now-redundant require/assert.Eventually magic
number checks from useStandardTimeoutsAndDelaysInTests since the
new rule catches all usage of those functions.
2026-03-25 23:16:14 +00:00
Cian Johnston 42b5e8d257 refactor: banish require.Eventually to the shadow realm
Replace all 286 occurrences of require.Eventually, assert.Eventually,
require.Eventuallyf, and assert.Eventuallyf with the context-aware
testutil.Eventually across 83 files.

testutil.Eventually is superior because it:
- Takes a context.Context with a deadline instead of a bare timeout
- Runs the condition function inline (not in a goroutine) so
  require.* calls inside don't cause data races
- Passes the context to the condition so it can be used for
  cancellation-aware operations

Also updates stale comments referencing the old function names and
fixes the doWithRetries/requestWithRetries signatures in apptest to
accept context properly.
2026-03-25 22:57:22 +00:00
84 changed files with 801 additions and 666 deletions
+71 -68
View File
@@ -152,7 +152,7 @@ func TestAgent_Stats_SSH(t *testing.T) {
// We are looking for four different stats to be reported. They might not all
// arrive at the same time, so we loop until we've seen them all.
var connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountSSHSeen bool
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
var ok bool
s, ok = <-stats
if !ok {
@@ -171,10 +171,7 @@ func TestAgent_Stats_SSH(t *testing.T) {
sessionCountSSHSeen = true
}
return connectionCountSeen && rxBytesSeen && txBytesSeen && sessionCountSSHSeen
}, testutil.WaitLong, testutil.IntervalFast,
"never saw all stats: %+v, saw connectionCount: %t, rxBytes: %t, txBytes: %t, sessionCountSsh: %t",
s, connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountSSHSeen,
)
}, testutil.IntervalFast, "never saw all stats: %+v, saw connectionCount: %t, rxBytes: %t, txBytes: %t, sessionCountSsh: %t", s, connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountSSHSeen)
_, err = stdin.Write([]byte("exit 0\n"))
require.NoError(t, err, "writing exit to stdin")
_ = stdin.Close()
@@ -208,7 +205,7 @@ func TestAgent_Stats_ReconnectingPTY(t *testing.T) {
// We are looking for four different stats to be reported. They might not all
// arrive at the same time, so we loop until we've seen them all.
var connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountReconnectingPTYSeen bool
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
var ok bool
s, ok = <-stats
if !ok {
@@ -227,10 +224,7 @@ func TestAgent_Stats_ReconnectingPTY(t *testing.T) {
sessionCountReconnectingPTYSeen = true
}
return connectionCountSeen && rxBytesSeen && txBytesSeen && sessionCountReconnectingPTYSeen
}, testutil.WaitLong, testutil.IntervalFast,
"never saw all stats: %+v, saw connectionCount: %t, rxBytes: %t, txBytes: %t, sessionCountReconnectingPTY: %t",
s, connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountReconnectingPTYSeen,
)
}, testutil.IntervalFast, "never saw all stats: %+v, saw connectionCount: %t, rxBytes: %t, txBytes: %t, sessionCountReconnectingPTY: %t", s, connectionCountSeen, rxBytesSeen, txBytesSeen, sessionCountReconnectingPTYSeen)
}
func TestAgent_Stats_Magic(t *testing.T) {
@@ -280,7 +274,7 @@ func TestAgent_Stats_Magic(t *testing.T) {
require.NoError(t, err)
err = session.Shell()
require.NoError(t, err)
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
s, ok := <-stats
t.Logf("got stats: ok=%t, ConnectionCount=%d, RxBytes=%d, TxBytes=%d, SessionCountVSCode=%d, ConnectionMedianLatencyMS=%f",
ok, s.ConnectionCount, s.RxBytes, s.TxBytes, s.SessionCountVscode, s.ConnectionMedianLatencyMs)
@@ -291,9 +285,7 @@ func TestAgent_Stats_Magic(t *testing.T) {
// Ensure that connection latency is being counted!
// If it isn't, it's set to -1.
s.ConnectionMedianLatencyMs >= 0
}, testutil.WaitLong, testutil.IntervalFast,
"never saw stats",
)
}, testutil.IntervalFast, "never saw stats")
_, err = stdin.Write([]byte("exit 0\n"))
require.NoError(t, err, "writing exit to stdin")
@@ -350,29 +342,25 @@ func TestAgent_Stats_Magic(t *testing.T) {
_ = tunneledConn.Close()
})
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
s, ok := <-stats
t.Logf("got stats with conn open: ok=%t, ConnectionCount=%d, SessionCountJetBrains=%d",
ok, s.ConnectionCount, s.SessionCountJetbrains)
return ok && s.SessionCountJetbrains == 1
}, testutil.WaitLong, testutil.IntervalFast,
"never saw stats with conn open",
)
}, testutil.IntervalFast, "never saw stats with conn open")
// Kill the server and connection after checking for the echo.
requireEcho(t, tunneledConn)
_ = echoServerCmd.Process.Kill()
_ = tunneledConn.Close()
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
s, ok := <-stats
t.Logf("got stats after disconnect %t, %d",
ok, s.SessionCountJetbrains)
return ok &&
s.SessionCountJetbrains == 0
}, testutil.WaitLong, testutil.IntervalFast,
"never saw stats after conn closes",
)
}, testutil.IntervalFast, "never saw stats after conn closes")
assertConnectionReport(t, agentClient, proto.Connection_JETBRAINS, 0, "")
})
@@ -1387,21 +1375,23 @@ func TestAgent_Metadata(t *testing.T) {
})
var gotMd map[string]agentsdk.Metadata
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
gotMd = client.GetMetadata()
return len(gotMd) == 2
}, testutil.WaitShort, testutil.IntervalFast/2)
}, testutil.IntervalFast/2)
collectedAt1 := gotMd["greeting1"].CollectedAt
collectedAt2 := gotMd["greeting2"].CollectedAt
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
gotMd = client.GetMetadata()
if len(gotMd) != 2 {
panic("unexpected number of metadata")
}
return !gotMd["greeting2"].CollectedAt.Equal(collectedAt2)
}, testutil.WaitShort, testutil.IntervalFast/2)
}, testutil.IntervalFast/2)
require.Equal(t, gotMd["greeting1"].CollectedAt, collectedAt1, "metadata should not be collected again")
})
@@ -1423,18 +1413,20 @@ func TestAgent_Metadata(t *testing.T) {
})
var gotMd map[string]agentsdk.Metadata
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
gotMd = client.GetMetadata()
return len(gotMd) == 1
}, testutil.WaitShort, testutil.IntervalFast/2)
}, testutil.IntervalFast/2)
collectedAt1 := gotMd["greeting"].CollectedAt
require.Equal(t, "hello", strings.TrimSpace(gotMd["greeting"].Value))
if !assert.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
if !testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
gotMd = client.GetMetadata()
return gotMd["greeting"].CollectedAt.After(collectedAt1)
}, testutil.WaitShort, testutil.IntervalFast/2) {
}, testutil.IntervalFast/2) {
t.Fatalf("expected metadata to be collected again")
}
})
@@ -1475,9 +1467,10 @@ func TestAgentMetadata_Timing(t *testing.T) {
opts.ReportMetadataInterval = intervalUnit
})
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return len(client.GetMetadata()) == 2
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
for start := time.Now(); time.Since(start) < testutil.WaitMedium; time.Sleep(testutil.IntervalMedium) {
md := client.GetMetadata()
@@ -1536,10 +1529,11 @@ func TestAgent_Lifecycle(t *testing.T) {
}
var got []codersdk.WorkspaceAgentLifecycle
assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
got = client.GetLifecycleStates()
return slices.Contains(got, want[len(want)-1])
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
require.Equal(t, want, got[:len(want)])
})
@@ -1561,10 +1555,11 @@ func TestAgent_Lifecycle(t *testing.T) {
}
var got []codersdk.WorkspaceAgentLifecycle
assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
got = client.GetLifecycleStates()
return slices.Contains(got, want[len(want)-1])
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
require.Equal(t, want, got[:len(want)])
})
@@ -1586,10 +1581,11 @@ func TestAgent_Lifecycle(t *testing.T) {
}
var got []codersdk.WorkspaceAgentLifecycle
assert.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got = client.GetLifecycleStates()
return len(got) > 0 && got[len(got)-1] == want[len(want)-1]
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
require.Equal(t, want, got)
})
@@ -1605,9 +1601,10 @@ func TestAgent_Lifecycle(t *testing.T) {
}},
}, 0)
assert.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return slices.Contains(client.GetLifecycleStates(), codersdk.WorkspaceAgentLifecycleReady)
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// Start close asynchronously so that we an inspect the state.
done := make(chan struct{})
@@ -1627,11 +1624,11 @@ func TestAgent_Lifecycle(t *testing.T) {
}
var got []codersdk.WorkspaceAgentLifecycle
assert.Eventually(t, func() bool {
ctx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got = client.GetLifecycleStates()
return slices.Contains(got, want[len(want)-1])
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
require.Equal(t, want, got[:len(want)])
})
@@ -1646,9 +1643,10 @@ func TestAgent_Lifecycle(t *testing.T) {
}},
}, 0)
assert.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return slices.Contains(client.GetLifecycleStates(), codersdk.WorkspaceAgentLifecycleReady)
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// Start close asynchronously so that we an inspect the state.
done := make(chan struct{})
@@ -1669,10 +1667,11 @@ func TestAgent_Lifecycle(t *testing.T) {
}
var got []codersdk.WorkspaceAgentLifecycle
assert.Eventually(t, func() bool {
ctx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got = client.GetLifecycleStates()
return slices.Contains(got, want[len(want)-1])
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
require.Equal(t, want, got[:len(want)])
})
@@ -1688,9 +1687,10 @@ func TestAgent_Lifecycle(t *testing.T) {
}},
}, 0)
assert.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return slices.Contains(client.GetLifecycleStates(), codersdk.WorkspaceAgentLifecycleReady)
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// Start close asynchronously so that we an inspect the state.
done := make(chan struct{})
@@ -1711,10 +1711,11 @@ func TestAgent_Lifecycle(t *testing.T) {
}
var got []codersdk.WorkspaceAgentLifecycle
assert.Eventually(t, func() bool {
ctx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got = client.GetLifecycleStates()
return slices.Contains(got, want[len(want)-1])
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
require.Equal(t, want, got[:len(want)])
})
@@ -1759,7 +1760,8 @@ func TestAgent_Lifecycle(t *testing.T) {
// agent.Close() loads the shutdown script from the agent metadata.
// The metadata is populated just before execution of the startup script, so it's mandatory to wait
// until the startup starts.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
outputPath := filepath.Join(os.TempDir(), "coder-startup-script.log")
content, err := afero.ReadFile(fs, outputPath)
if err != nil {
@@ -1767,7 +1769,7 @@ func TestAgent_Lifecycle(t *testing.T) {
return false
}
return len(content) > 0 // something is in the startup log file
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// In order to avoid shutting down the agent before it is fully started and triggering
// errors, we'll wait until the agent is fully up. It's a bit hokey, but among the last things the agent starts
@@ -2026,11 +2028,10 @@ func TestAgent_ReconnectingPTYContainer(t *testing.T) {
require.NoError(t, err, "Could not stop container")
}()
// Wait for container to start
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
ct, ok := pool.ContainerByName(ct.Container.Name)
return ok && ct.Container.State.Running
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
}, testutil.IntervalSlow, "Container did not start in time")
// nolint: dogsled
conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
o.Devcontainers = true
@@ -2259,7 +2260,8 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
t.Logf("Waiting for container with label: devcontainer.local_folder=%s", tempWorkspaceFolder)
var container docker.APIContainers
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitSuperLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
containers, err := pool.Client.ListContainers(docker.ListContainersOptions{All: true})
if err != nil {
t.Logf("Error listing containers: %v", err)
@@ -2278,7 +2280,7 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
}
return false
}, testutil.WaitSuperLong, testutil.IntervalMedium, "no container with workspace folder label found")
}, testutil.IntervalMedium, "no container with workspace folder label found")
defer func() {
// We can't rely on pool here because the container is not
// managed by it (it is managed by @devcontainer/cli).
@@ -2958,7 +2960,7 @@ func TestAgent_UpdatedDERP(t *testing.T) {
require.NoError(t, err)
t.Log("pushed DERPMap update to agent")
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
conn := uut.TailnetConn()
if conn == nil {
return false
@@ -2967,7 +2969,7 @@ func TestAgent_UpdatedDERP(t *testing.T) {
preferredDERP := conn.Node().PreferredDERP
t.Logf("agent Conn DERPMap with regionIDs %v, PreferredDERP %d", regionIDs, preferredDERP)
return len(regionIDs) == 1 && regionIDs[0] == 2 && preferredDERP == 2
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
t.Log("agent got the new DERPMap")
// Connect from a second client and make sure it uses the new DERP map.
@@ -3076,9 +3078,9 @@ func TestAgent_ReconnectNoLifecycleReemit(t *testing.T) {
defer closer.Close()
// Wait for the agent to reach Ready state.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return slices.Contains(client.GetLifecycleStates(), codersdk.WorkspaceAgentLifecycleReady)
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
statesBefore := slices.Clone(client.GetLifecycleStates())
@@ -3127,10 +3129,10 @@ func TestAgent_WriteVSCodeConfigs(t *testing.T) {
home, err := os.UserHomeDir()
require.NoError(t, err)
name := filepath.Join(home, ".vscode-server", "data", "Machine", "settings.json")
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
_, err := filesystem.Stat(name)
return err == nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestAgent_DebugServer(t *testing.T) {
@@ -3713,7 +3715,7 @@ func TestAgent_Metrics_SSH(t *testing.T) {
}
var actual []*promgo.MetricFamily
assert.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
actual, err = registry.Gather()
if err != nil {
return false
@@ -3723,7 +3725,7 @@ func TestAgent_Metrics_SSH(t *testing.T) {
count += len(m.GetMetric())
}
return count == len(expected)
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
i := 0
for _, mf := range actual {
@@ -3786,10 +3788,11 @@ func assertConnectionReport(t testing.TB, agentClient *agenttest.Client, connect
t.Helper()
var reports []*proto.ReportConnectionRequest
if !assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
if !testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
reports = agentClient.GetConnectionReports()
return len(reports) >= 2
}, testutil.WaitMedium, testutil.IntervalFast, "waiting for 2 connection reports or more; got %d", len(reports)) {
}, testutil.IntervalFast, "waiting for 2 connection reports or more; got %d", len(reports)) {
return
}
+13 -12
View File
@@ -3937,12 +3937,12 @@ func TestAPI(t *testing.T) {
Op: fsnotify.Write,
})
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
err = api.RefreshContainers(ctx)
require.NoError(t, err)
return len(fakeSAC.agents) == 1
}, testutil.WaitShort, testutil.IntervalFast, "subagent should be created after config change")
}, testutil.IntervalFast, "subagent should be created after config change")
t.Log("Phase 2: Cont, waiting for sub agent to exit")
exitSubAgentOnce.Do(func() {
@@ -3977,12 +3977,12 @@ func TestAPI(t *testing.T) {
Op: fsnotify.Write,
})
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
err = api.RefreshContainers(ctx)
require.NoError(t, err)
return len(fakeSAC.agents) == 0
}, testutil.WaitShort, testutil.IntervalFast, "subagent should be deleted after config change")
}, testutil.IntervalFast, "subagent should be deleted after config change")
req = httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx)
rec = httptest.NewRecorder()
@@ -4544,7 +4544,8 @@ func TestDevcontainerDiscovery(t *testing.T) {
tickerTrap.Close()
// Wait until all projects have been discovered
require.Eventuallyf(t, func() bool {
ctx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
req := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
@@ -4554,7 +4555,7 @@ func TestDevcontainerDiscovery(t *testing.T) {
require.NoError(t, err)
return len(got.Devcontainers) >= len(tt.expected)
}, testutil.WaitShort, testutil.IntervalFast, "dev containers never found")
}, testutil.IntervalFast, "dev containers never found")
// Now projects have been discovered, we'll allow the updater loop
// to set the appropriate status for these containers.
@@ -4736,7 +4737,6 @@ func TestDevcontainerDiscovery(t *testing.T) {
t.Parallel()
var (
ctx = testutil.Context(t, testutil.WaitShort)
logger = testutil.Logger(t)
mClock = quartz.NewMock(t)
@@ -4772,7 +4772,8 @@ func TestDevcontainerDiscovery(t *testing.T) {
// Given: We allow the discover routing to progress
var got codersdk.WorkspaceAgentListContainersResponse
require.Eventuallyf(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
req := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
@@ -4786,7 +4787,7 @@ func TestDevcontainerDiscovery(t *testing.T) {
upCalledMu.Unlock()
return len(got.Devcontainers) >= tt.expectDevcontainerCount && upCalledCount >= tt.expectUpCalledCount
}, testutil.WaitShort, testutil.IntervalFast, "dev containers never found")
}, testutil.IntervalFast, "dev containers never found")
// Close the API. We expect this not to fail because we should have finished
// at this point.
@@ -4812,7 +4813,6 @@ func TestDevcontainerDiscovery(t *testing.T) {
t.Run("Disabled", func(t *testing.T) {
t.Parallel()
var (
ctx = testutil.Context(t, testutil.WaitShort)
logger = testutil.Logger(t)
mClock = quartz.NewMock(t)
mDCCLI = acmock.NewMockDevcontainerCLI(gomock.NewController(t))
@@ -4863,7 +4863,8 @@ func TestDevcontainerDiscovery(t *testing.T) {
r.Mount("/", api.Routes())
// When: All expected dev containers have been found.
require.Eventuallyf(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
req := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
@@ -4873,7 +4874,7 @@ func TestDevcontainerDiscovery(t *testing.T) {
require.NoError(t, err)
return len(got.Devcontainers) >= 1
}, testutil.WaitShort, testutil.IntervalFast, "dev containers never found")
}, testutil.IntervalFast, "dev containers never found")
// Then: We expect the mock infra to not fail.
})
@@ -1,6 +1,7 @@
package agentcontainers_test
import (
"context"
"os"
"path/filepath"
"runtime"
@@ -49,10 +50,11 @@ func TestIntegrationDockerCLI(t *testing.T) {
})
// Wait for container to start.
require.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
ct, ok := pool.ContainerByName(ct.Container.Name)
return ok && ct.Container.State.Running
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
}, testutil.IntervalSlow, "Container did not start in time")
dcli := agentcontainers.NewDockerCLI(agentexec.DefaultExecer)
containerName := strings.TrimPrefix(ct.Container.Name, "/")
@@ -159,10 +161,10 @@ func TestIntegrationDockerCLIStop(t *testing.T) {
})
// Given: The container is running
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
ct, ok := pool.ContainerByName(ct.Container.Name)
return ok && ct.Container.State.Running
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
}, testutil.IntervalSlow, "Container did not start in time")
dcli := agentcontainers.NewDockerCLI(agentexec.DefaultExecer)
containerName := strings.TrimPrefix(ct.Container.Name, "/")
@@ -207,10 +209,10 @@ func TestIntegrationDockerCLIRemove(t *testing.T) {
containerName := strings.TrimPrefix(ct.Container.Name, "/")
// Wait for the container to exit.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
ct, ok := pool.ContainerByName(ct.Container.Name)
return ok && !ct.Container.State.Running
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not stop in time")
}, testutil.IntervalSlow, "Container did not stop in time")
dcli := agentcontainers.NewDockerCLI(agentexec.DefaultExecer)
+4 -3
View File
@@ -73,13 +73,14 @@ func TestIntegrationDocker(t *testing.T) {
t.Logf("Purged container %q", ct.Container.Name)
})
// Wait for container to start
require.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
ct, ok := pool.ContainerByName(ct.Container.Name)
return ok && ct.Container.State.Running
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
}, testutil.IntervalSlow, "Container did not start in time")
dcl := agentcontainers.NewDockerCLI(agentexec.DefaultExecer)
ctx := testutil.Context(t, testutil.WaitShort)
ctx = testutil.Context(t, testutil.WaitShort)
actual, err := dcl.List(ctx)
require.NoError(t, err, "Could not list containers")
require.Empty(t, actual.Warnings, "Expected no warnings")
+6 -4
View File
@@ -96,9 +96,10 @@ func TestBoundaryLogs_EndToEnd(t *testing.T) {
}
sendBoundaryLogsRequest(t, conn, req)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return len(sink.Entries()) >= 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
entries := sink.Entries()
require.Len(t, entries, 1)
@@ -130,9 +131,10 @@ func TestBoundaryLogs_EndToEnd(t *testing.T) {
}
sendBoundaryLogsRequest(t, conn, req2)
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return len(sink.Entries()) >= 2
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
entries = sink.Entries()
entry = entries[1]
+39 -26
View File
@@ -161,10 +161,11 @@ func TestServer_ReceiveAndForwardLogs(t *testing.T) {
sendLogs(t, conn, req)
// Wait for the reporter to receive the log.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
logs := reporter.getLogs()
return len(logs) == 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
logs := reporter.getLogs()
require.Len(t, logs, 1)
@@ -220,10 +221,11 @@ func TestServer_MultipleMessages(t *testing.T) {
sendLogs(t, conn, req)
}
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
logs := reporter.getLogs()
return len(logs) == 5
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
cancel()
<-forwarderDone
@@ -281,10 +283,11 @@ func TestServer_MultipleConnections(t *testing.T) {
}
wg.Wait()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
logs := reporter.getLogs()
return len(logs) == numConns
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
cancel()
<-forwarderDone
@@ -390,10 +393,11 @@ func TestServer_ForwarderContinuesAfterError(t *testing.T) {
sendLogs(t, conn, req2)
// Only the second message should be recorded.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
logs := reporter.getLogs()
return len(logs) == 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
logs := reporter.getLogs()
require.Len(t, logs, 1)
@@ -482,10 +486,11 @@ func TestServer_InvalidProtobuf(t *testing.T) {
}
sendLogs(t, conn, req)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
logs := reporter.getLogs()
return len(logs) == 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
cancel()
<-forwarderDone
@@ -524,10 +529,11 @@ func TestServer_InvalidHeader(t *testing.T) {
// The server closes the connection on invalid header, so the next
// write should fail with a broken pipe error.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
_, err := conn.Write([]byte{0x00})
return err != nil
}, testutil.WaitShort, testutil.IntervalFast, name)
}, testutil.IntervalFast, name)
}
// TagV1 with length exceeding MaxMessageSizeV1.
@@ -583,10 +589,11 @@ func TestServer_AllowRequest(t *testing.T) {
}
sendLogs(t, conn, req)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
logs := reporter.getLogs()
return len(logs) == 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
logs := reporter.getLogs()
require.Len(t, logs, 1)
@@ -642,9 +649,10 @@ func TestServer_TagV1BackwardsCompatibility(t *testing.T) {
}
sendLogsV1(t, conn, v1Req)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return len(reporter.getLogs()) == 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// Now send a TagV2 message on the same connection to verify both
// tag versions work interleaved.
@@ -664,9 +672,10 @@ func TestServer_TagV1BackwardsCompatibility(t *testing.T) {
}
sendLogs(t, conn, v2Req)
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return len(reporter.getLogs()) == 2
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
logs := reporter.getLogs()
require.Equal(t, "https://example.com/v1", logs[0].Logs[0].GetHttpRequest().Url)
@@ -719,9 +728,10 @@ func TestServer_Metrics(t *testing.T) {
sendLogs(t, conn, makeReq(1))
}
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return getCounterVecValue(t, reg, "agent_boundary_log_proxy_batches_dropped_total", "buffer_full") >= 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
require.GreaterOrEqual(t,
getCounterVecValue(t, reg, "agent_boundary_log_proxy_logs_dropped_total", "buffer_full"),
float64(1))
@@ -774,18 +784,20 @@ func TestServer_Metrics(t *testing.T) {
// The metric is incremented after ReportBoundaryLogs returns, so we
// need to poll briefly.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return getCounterVecValue(t, reg, "agent_boundary_log_proxy_batches_dropped_total", "forward_failed") >= 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Equal(t, float64(2),
getCounterVecValue(t, reg, "agent_boundary_log_proxy_logs_dropped_total", "forward_failed"))
// Phase 2: forward succeeds.
sendLogs(t, conn, makeReq(1))
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return len(reporter.getLogs()) >= 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Equal(t, float64(1),
getCounterValue(t, reg, "agent_boundary_log_proxy_batches_forwarded_total"))
@@ -798,9 +810,10 @@ func TestServer_Metrics(t *testing.T) {
// Status is handled immediately by the reader goroutine, not by the
// forwarder, so poll metrics directly.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return getCounterVecValue(t, reg, "agent_boundary_log_proxy_logs_dropped_total", "boundary_channel_full") >= 5
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Equal(t, float64(5),
getCounterVecValue(t, reg, "agent_boundary_log_proxy_logs_dropped_total", "boundary_channel_full"))
require.Equal(t, float64(3),
+6 -4
View File
@@ -76,7 +76,8 @@ func TestEngine_IndexPicksUpNewFile(t *testing.T) {
require.NoError(t, eng.AddRoot(ctx, dir))
createFile(t, dir, "newfile_unique.txt", "world")
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
results, sErr := eng.Search(ctx, "newfile_unique", filefinder.DefaultSearchOptions())
if sErr != nil {
return false
@@ -87,7 +88,7 @@ func TestEngine_IndexPicksUpNewFile(t *testing.T) {
}
}
return false
}, testutil.WaitShort, testutil.IntervalFast, "expected newfile_unique.txt to appear via watcher")
}, testutil.IntervalFast, "expected newfile_unique.txt to appear via watcher")
}
func TestEngine_IndexRemovesDeletedFile(t *testing.T) {
@@ -105,7 +106,8 @@ func TestEngine_IndexRemovesDeletedFile(t *testing.T) {
require.NoError(t, os.Remove(filepath.Join(dir, "deleteme_unique.txt")))
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
results, sErr := eng.Search(ctx, "deleteme_unique", filefinder.DefaultSearchOptions())
if sErr != nil {
return false
@@ -116,7 +118,7 @@ func TestEngine_IndexRemovesDeletedFile(t *testing.T) {
}
}
return true
}, testutil.WaitShort, testutil.IntervalFast, "expected deleteme_unique.txt to disappear after removal")
}, testutil.IntervalFast, "expected deleteme_unique.txt to disappear after removal")
}
func TestEngine_MultipleRoots(t *testing.T) {
@@ -420,7 +420,7 @@ func TestBackedPipe_ReadBlocksWhenDisconnected(t *testing.T) {
testutil.TryReceive(testCtx, t, readStarted)
// Ensure the read is actually blocked by verifying it hasn't completed
require.Eventually(t, func() bool {
testutil.Eventually(testCtx, t, func(ctx context.Context) bool {
select {
case <-readDone:
t.Fatal("Read should be blocked when disconnected")
@@ -429,7 +429,7 @@ func TestBackedPipe_ReadBlocksWhenDisconnected(t *testing.T) {
// Good, still blocked
return true
}
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// Close should unblock the read
bp.Close()
@@ -468,9 +468,9 @@ func TestBackedPipe_Reconnection(t *testing.T) {
testutil.RequireReceive(testCtx, t, signalChan)
// Wait for reconnection to complete
require.Eventually(t, func() bool {
testutil.Eventually(testCtx, t, func(ctx context.Context) bool {
return bp.Connected()
}, testutil.WaitShort, testutil.IntervalFast, "pipe should reconnect")
}, testutil.IntervalFast, "pipe should reconnect")
replayedData := conn2.ReadString()
require.Equal(t, "***trigger failure ", replayedData, "Should replay exactly the data written after sequence 17")
@@ -646,9 +646,9 @@ func TestBackedPipe_StateTransitionsAndGenerationTracking(t *testing.T) {
testutil.RequireReceive(testutil.Context(t, testutil.WaitShort), t, signalChan)
// Wait for reconnection to complete
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
return bp.Connected()
}, testutil.WaitShort, testutil.IntervalFast, "should reconnect")
}, testutil.IntervalFast, "should reconnect")
require.Equal(t, 2, reconnector.GetCallCount())
// Force another reconnection
@@ -707,9 +707,9 @@ func TestBackedPipe_GenerationFiltering(t *testing.T) {
wg.Wait()
// Wait for reconnection to complete
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
return bp.Connected()
}, testutil.WaitShort, testutil.IntervalFast, "should reconnect once")
}, testutil.IntervalFast, "should reconnect once")
// Should have only reconnected once despite multiple errors
require.Equal(t, 2, reconnector.GetCallCount()) // Initial connect + 1 reconnect
@@ -840,9 +840,9 @@ func TestBackedPipe_SingleReconnectionOnMultipleErrors(t *testing.T) {
testutil.RequireReceive(testCtx, t, signalChan)
// Wait for reconnection to complete
require.Eventually(t, func() bool {
testutil.Eventually(testCtx, t, func(ctx context.Context) bool {
return bp.Connected()
}, testutil.WaitShort, testutil.IntervalFast, "should reconnect after write error")
}, testutil.IntervalFast, "should reconnect after write error")
// Verify that only one reconnection occurred
require.Equal(t, 2, reconnector.GetCallCount(), "should have exactly 2 calls: initial connect + 1 reconnection")
@@ -493,7 +493,7 @@ func TestBackedReader_CloseWhileBlockedOnUnderlyingReader(t *testing.T) {
// Verify read is blocked by checking that it hasn't completed
// and ensuring we have adequate time for it to reach the blocking state
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
select {
case <-readDone:
t.Fatal("Read should be blocked on underlying reader")
@@ -502,7 +502,7 @@ func TestBackedReader_CloseWhileBlockedOnUnderlyingReader(t *testing.T) {
// Good, still blocked
return true
}
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// Start Close() in a goroutine since it will block until the underlying read completes
closeDone := make(chan error, 1)
@@ -2,6 +2,7 @@ package backedpipe_test
import (
"bytes"
"context"
"os"
"sync"
"testing"
@@ -646,8 +647,8 @@ func TestBackedWriter_ConcurrentWriteAndClose(t *testing.T) {
// Ensure the write is actually blocked by repeatedly checking that:
// 1. The write hasn't completed yet
// 2. The writer is still not connected
// We use require.Eventually to give it a fair chance to reach the blocking state
require.Eventually(t, func() bool {
// We use testutil.Eventually to give it a fair chance to reach the blocking state
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
select {
case <-writeComplete:
t.Fatal("Write should be blocked when no writer is connected")
@@ -656,7 +657,7 @@ func TestBackedWriter_ConcurrentWriteAndClose(t *testing.T) {
// Write is still blocked, which is what we want
return !bw.Connected()
}
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// Close the writer while the write is blocked waiting for connection
closeErr := bw.Close()
+6 -4
View File
@@ -1,6 +1,7 @@
package cli_test
import (
"context"
"fmt"
"net/http"
"os"
@@ -51,13 +52,14 @@ func TestWorkspaceAgent(t *testing.T) {
coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
require.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
info, err := os.Stat(filepath.Join(logDir, "coder-agent.log"))
if err != nil {
return false
}
return info.Size() > 0
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium)
})
t.Run("PostStartup", func(t *testing.T) {
@@ -216,7 +218,7 @@ func TestWorkspaceAgent(t *testing.T) {
// Verify the servers are not listening by checking the log for disabled
// messages.
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
logContent, err := os.ReadFile(filepath.Join(logDir, "coder-agent.log"))
if err != nil {
return false
@@ -225,7 +227,7 @@ func TestWorkspaceAgent(t *testing.T) {
return strings.Contains(logStr, "pprof address is empty, disabling pprof server") &&
strings.Contains(logStr, "prometheus address is empty, disabling prometheus server") &&
strings.Contains(logStr, "debug address is empty, disabling debug server")
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium)
})
}
+3 -2
View File
@@ -1,6 +1,7 @@
package cli_test
import (
"context"
"runtime"
"testing"
@@ -104,10 +105,10 @@ func TestExpRpty(t *testing.T) {
})
require.NoError(t, err, "Could not start container")
// Wait for container to start
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
ct, ok := pool.ContainerByName(ct.Container.Name)
return ok && ct.Container.State.Running
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
}, testutil.IntervalSlow, "Container did not start in time")
t.Cleanup(func() {
err := pool.Purge(ct)
require.NoError(t, err, "Could not stop container")
+3 -2
View File
@@ -49,10 +49,11 @@ func TestResetPassword(t *testing.T) {
assert.NoError(t, err)
}()
var rawURL string
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
rawURL, err = cfg.URL().Read()
return err == nil && rawURL != ""
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
accessURL, err := url.Parse(rawURL)
require.NoError(t, err)
client := codersdk.New(accessURL)
+11 -13
View File
@@ -212,10 +212,10 @@ func TestServer(t *testing.T) {
clitest.Start(t, inv.WithContext(ctx))
//nolint:gocritic // Embedded postgres take a while to fire up.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
rawURL, err := cfg.URL().Read()
return err == nil && rawURL != ""
}, superDuperLong, testutil.IntervalFast, "failed to get access URL")
}, testutil.IntervalFast, "failed to get access URL")
})
t.Run("EphemeralDeployment", func(t *testing.T) {
t.Parallel()
@@ -1229,7 +1229,8 @@ func TestServer(t *testing.T) {
require.NoError(t, err)
require.NoError(t, body.Body.Close())
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
snap := <-snapshot
htmlFirstServedFound := false
for _, item := range snap.TelemetryItems {
@@ -1238,7 +1239,7 @@ func TestServer(t *testing.T) {
}
}
return htmlFirstServedFound
}, testutil.WaitLong, testutil.IntervalSlow, "no html_first_served telemetry item")
}, testutil.IntervalSlow, "no html_first_served telemetry item")
})
t.Run("Prometheus", func(t *testing.T) {
t.Parallel()
@@ -2074,7 +2075,8 @@ func TestServer_Logging_NoParallel(t *testing.T) {
func loggingWaitFile(t *testing.T, fiName string, dur time.Duration) {
var lastStat os.FileInfo
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, dur)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
lastStat, err = os.Stat(fiName)
if err != nil {
@@ -2084,12 +2086,7 @@ func loggingWaitFile(t *testing.T, fiName string, dur time.Duration) {
return false
}
return lastStat.Size() > 0
},
dur, //nolint:gocritic
testutil.IntervalFast,
"file at %s should exist, last stat: %+v",
fiName, lastStat,
)
}, testutil.IntervalFast, "file at %s should exist, last stat: %+v", fiName, lastStat)
}
func TestServer_Production(t *testing.T) {
@@ -2286,10 +2283,11 @@ func waitAccessURL(t *testing.T, cfg config.Root) *url.URL {
var err error
var rawURL string
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
rawURL, err = cfg.URL().Read()
return err == nil && rawURL != ""
}, testutil.WaitLong, testutil.IntervalFast, "failed to get access URL")
}, testutil.IntervalFast, "failed to get access URL")
accessURL, err := url.Parse(rawURL)
require.NoError(t, err, "failed to parse access URL")
+4 -4
View File
@@ -31,7 +31,7 @@ func TestSpeedtest(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
ws, err := client.Workspace(ctx, workspace.ID)
if !assert.NoError(t, err) {
return false
@@ -39,7 +39,7 @@ func TestSpeedtest(t *testing.T) {
a := ws.LatestBuild.Resources[0].Agents[0]
return a.Status == codersdk.WorkspaceAgentConnected &&
a.LifecycleState == codersdk.WorkspaceAgentLifecycleReady
}, testutil.WaitLong, testutil.IntervalFast, "agent is not ready")
}, testutil.IntervalFast, "agent is not ready")
inv, root := clitest.New(t, "speedtest", workspace.Name)
clitest.SetupConfig(t, client, root)
@@ -71,7 +71,7 @@ func TestSpeedtestJson(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
ws, err := client.Workspace(ctx, workspace.ID)
if !assert.NoError(t, err) {
return false
@@ -79,7 +79,7 @@ func TestSpeedtestJson(t *testing.T) {
a := ws.LatestBuild.Resources[0].Agents[0]
return a.Status == codersdk.WorkspaceAgentConnected &&
a.LifecycleState == codersdk.WorkspaceAgentLifecycleReady
}, testutil.WaitLong, testutil.IntervalFast, "agent is not ready")
}, testutil.IntervalFast, "agent is not ready")
inv, root := clitest.New(t, "speedtest", "--output=json", workspace.Name)
clitest.SetupConfig(t, client, root)
+5 -2
View File
@@ -121,11 +121,14 @@ func TestCloserStack_Context(t *testing.T) {
err = uut.push("fc1", fc1)
require.NoError(t, err)
cancel()
require.Eventually(t, func() bool {
// Use a fresh context for Eventually since we just canceled
// ctx above to trigger the closer stack's context handler.
waitCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(waitCtx, t, func(_ context.Context) bool {
uut.Lock()
defer uut.Unlock()
return uut.closed
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestCloserStack_PushAfterClose(t *testing.T) {
+19 -14
View File
@@ -180,11 +180,12 @@ func TestSSH(t *testing.T) {
// Delay until workspace is starting, otherwise the agent may be
// booted due to outdated build.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
workspace, err = client.Workspace(ctx, workspace.ID)
return err == nil && workspace.LatestBuild.Transition == codersdk.WorkspaceTransitionStart
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// When the agent connects, the workspace was started, and we should
// have access to the shell.
@@ -630,13 +631,13 @@ func TestSSH(t *testing.T) {
require.NoError(t, err)
_ = clientOutput.Close()
assert.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
entries, err := afero.ReadDir(fs, "/net")
if err != nil {
return false
}
return len(entries) > 0
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
<-cmdDone
})
@@ -759,11 +760,12 @@ func TestSSH(t *testing.T) {
// Delay until workspace is starting, otherwise the agent may be
// booted due to outdated build.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
workspace, err = client.Workspace(ctx, workspace.ID)
return err == nil && workspace.LatestBuild.Transition == codersdk.WorkspaceTransitionStart
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// When the agent connects, the workspace was started, and we should
// have access to the shell.
@@ -852,10 +854,11 @@ func TestSSH(t *testing.T) {
fsn.Notify()
<-cmdDone
fsn.AssertStopped()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
_, err = os.Stat(remoteSock)
return xerrors.Is(err, os.ErrNotExist)
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("Stdio_BrokenConn", func(t *testing.T) {
@@ -1025,10 +1028,11 @@ func TestSSH(t *testing.T) {
// wait for the remote socket to get cleaned up before retrying,
// because cleaning up the socket happens asynchronously, and we
// might connect to an old listener on the agent side.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
_, err = os.Stat(remoteSock)
return xerrors.Is(err, os.ErrNotExist)
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}()
}
})
@@ -1228,7 +1232,7 @@ func TestSSH(t *testing.T) {
assert.Error(t, err, "ssh command should fail")
})
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost:8222/", nil)
if !assert.NoError(t, err) {
// true exits the loop.
@@ -1245,7 +1249,7 @@ func TestSSH(t *testing.T) {
assert.NoError(t, err)
assert.EqualValues(t, "hello world", body)
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
// And we're done.
cancel()
@@ -2056,10 +2060,11 @@ func TestSSH_Container(t *testing.T) {
})
require.NoError(t, err, "Could not start container")
// Wait for container to start
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
ct, ok := pool.ContainerByName(ct.Container.Name)
return ok && ct.Container.State.Running
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
}, testutil.IntervalSlow, "Container did not start in time")
t.Cleanup(func() {
err := pool.Purge(ct)
require.NoError(t, err, "Could not stop container")
+3 -2
View File
@@ -549,7 +549,8 @@ func TestTemplatePush(t *testing.T) {
inv = inv.WithContext(ctx)
clitest.Start(t, inv) // Only used for output, disregard exit status.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
jobs, err := store.GetProvisionerJobsCreatedAfter(ctx, time.Time{})
if !assert.NoError(t, err) {
return false
@@ -558,7 +559,7 @@ func TestTemplatePush(t *testing.T) {
return false
}
return assert.EqualValues(t, wantTags, jobs[0].Tags)
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
if tt.expectOutput != "" {
pty.ExpectMatchContext(ctx, tt.expectOutput)
+2 -3
View File
@@ -6,7 +6,6 @@ import (
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/agent/agenttest"
@@ -74,13 +73,13 @@ func TestVSCodeSSH(t *testing.T) {
waiter := clitest.StartWithWaiter(t, inv.WithContext(ctx))
for _, dir := range []string{"/net", "/log"} {
assert.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
entries, err := afero.ReadDir(fs, dir)
if err != nil {
return false
}
return len(entries) > 0
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
}
waiter.Cancel()
+18 -22
View File
@@ -142,31 +142,27 @@ func TestWorkspaceActivityBump(t *testing.T) {
checks := 0
// The Deadline bump occurs asynchronously.
require.Eventuallyf(t,
func() bool {
checks++
workspace, err = client.Workspace(ctx, workspace.ID)
require.NoError(t, err)
tCtx := testutil.Context(t, maxTimeDrift)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
checks++
workspace, err = client.Workspace(ctx, workspace.ID)
require.NoError(t, err)
hasBumped := !workspace.LatestBuild.Deadline.Time.Equal(firstDeadline)
hasBumped := !workspace.LatestBuild.Deadline.Time.Equal(firstDeadline)
// Always make sure to log this information, even on the last check.
// The last check is the most important, as if this loop is acting
// slow, the last check could be the cause of the failure.
if time.Since(lastChecked) > time.Second || hasBumped {
avgCheckTime := time.Since(waitedFor) / time.Duration(checks)
t.Logf("deadline detect: bumped=%t since_last_check=%s avg_check_dur=%s checks=%d deadline=%v",
hasBumped, time.Since(updatedAfter), avgCheckTime, checks, workspace.LatestBuild.Deadline.Time)
lastChecked = time.Now()
}
// Always make sure to log this information, even on the last check.
// The last check is the most important, as if this loop is acting
// slow, the last check could be the cause of the failure.
if time.Since(lastChecked) > time.Second || hasBumped {
avgCheckTime := time.Since(waitedFor) / time.Duration(checks)
t.Logf("deadline detect: bumped=%t since_last_check=%s avg_check_dur=%s checks=%d deadline=%v",
hasBumped, time.Since(updatedAfter), avgCheckTime, checks, workspace.LatestBuild.Deadline.Time)
lastChecked = time.Now()
}
updatedAfter = dbtime.Now()
return hasBumped
},
//nolint: gocritic // maxTimeDrift is a testutil time
maxTimeDrift, testutil.IntervalFast,
"deadline %v never updated", firstDeadline,
)
updatedAfter = dbtime.Now()
return hasBumped
}, testutil.IntervalFast, "deadline %v never updated", firstDeadline)
// This log line helps establish how long it took for the deadline
// to be detected as bumped.
+2 -2
View File
@@ -1819,8 +1819,8 @@ func TestExecutorAutostartSkipsWhenNoProvisionersAvailable(t *testing.T) {
p, err = coderdtest.GetProvisionerForTags(db, time.Now(), workspace.OrganizationID, provisionerDaemonTags)
require.NoError(t, err, "Error getting provisioner for workspace")
// This assertion *may* no longer need to be `Eventually`.
require.Eventually(t, func() bool { return p.LastSeenAt.Time.UnixNano() == staleTime.UnixNano() },
testutil.WaitMedium, testutil.IntervalFast, "expected provisioner LastSeenAt to be:%+v, saw :%+v", staleTime.UTC(), p.LastSeenAt.Time.UTC())
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool { return p.LastSeenAt.Time.UnixNano() == staleTime.UnixNano() }, testutil.IntervalFast, "expected provisioner LastSeenAt to be:%+v, saw :%+v", staleTime.UTC(), p.LastSeenAt.Time.UTC())
// Ensure the provisioner is gone or stale, relative to the artificial next autostart time, before triggering the autobuild.
coderdtest.MustWaitForProvisionersUnavailable(t, db, workspace, provisionerDaemonTags, next)
+5 -6
View File
@@ -3,8 +3,7 @@ package coderd_test
import (
"context"
"net/http"
"github.com/stretchr/testify/require"
"testing"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/testutil"
@@ -12,12 +11,12 @@ import (
// Issue: https://github.com/coder/coder/issues/5249
// While running tests in parallel, the web server seems to be overloaded and responds with HTTP 502.
// require.Eventually expects correct HTTP responses.
// testutil.Eventually expects correct HTTP responses.
func requestWithRetries(ctx context.Context, t require.TestingT, client *codersdk.Client, method, path string, body interface{}, opts ...codersdk.RequestOption) (*http.Response, error) {
func requestWithRetries(ctx context.Context, t testing.TB, client *codersdk.Client, method, path string, body interface{}, opts ...codersdk.RequestOption) (*http.Response, error) {
var resp *http.Response
var err error
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
// nolint // only requests which are not passed upstream have a body closed
resp, err = client.Request(ctx, method, path, body, opts...)
if resp != nil && resp.StatusCode == http.StatusBadGateway {
@@ -27,6 +26,6 @@ func requestWithRetries(ctx context.Context, t require.TestingT, client *codersd
return false
}
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
return resp, err
}
+7 -9
View File
@@ -1126,7 +1126,7 @@ func AwaitTemplateVersionJobRunning(t testing.TB, client *codersdk.Client, versi
t.Logf("waiting for template version %s build job to start", version)
var templateVersion codersdk.TemplateVersion
require.Eventually(t, func() bool {
require.True(t, testutil.Eventually(ctx, t, func(ctx context.Context) bool {
var err error
templateVersion, err = client.TemplateVersion(ctx, version)
if err != nil {
@@ -1142,7 +1142,7 @@ func AwaitTemplateVersionJobRunning(t testing.TB, client *codersdk.Client, versi
t.FailNow()
return false
}
}, testutil.WaitShort, testutil.IntervalFast, "make sure you set `IncludeProvisionerDaemon`!")
}, testutil.IntervalFast, "make sure you set `IncludeProvisionerDaemon`!"))
t.Logf("template version %s job has started", version)
return templateVersion
}
@@ -1157,12 +1157,12 @@ func AwaitTemplateVersionJobCompleted(t testing.TB, client *codersdk.Client, ver
t.Logf("waiting for template version %s build job to complete", version)
var templateVersion codersdk.TemplateVersion
require.Eventually(t, func() bool {
require.True(t, testutil.Eventually(ctx, t, func(ctx context.Context) bool {
var err error
templateVersion, err = client.TemplateVersion(ctx, version)
t.Logf("template version job status: %s", templateVersion.Job.Status)
return assert.NoError(t, err) && templateVersion.Job.CompletedAt != nil
}, testutil.WaitLong, testutil.IntervalFast, "make sure you set `IncludeProvisionerDaemon`!")
}, testutil.IntervalFast, "make sure you set `IncludeProvisionerDaemon`!"))
t.Logf("template version %s job has completed", version)
return templateVersion
}
@@ -1171,12 +1171,10 @@ func AwaitTemplateVersionJobCompleted(t testing.TB, client *codersdk.Client, ver
func AwaitWorkspaceBuildJobCompleted(t testing.TB, client *codersdk.Client, build uuid.UUID) codersdk.WorkspaceBuild {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel()
t.Logf("waiting for workspace build job %s", build)
var workspaceBuild codersdk.WorkspaceBuild
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
require.True(t, testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
workspaceBuild, err = client.WorkspaceBuild(ctx, build)
if err != nil {
@@ -1188,7 +1186,7 @@ func AwaitWorkspaceBuildJobCompleted(t testing.TB, client *codersdk.Client, buil
return false
}
return true
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast))
t.Logf("got workspace build job %s (status: %s)", build, workspaceBuild.Job.Status)
return workspaceBuild
}
+8 -8
View File
@@ -241,7 +241,7 @@ func TestDeleteOldWorkspaceAgentStats(t *testing.T) {
// then
var stats []database.GetWorkspaceAgentStatsRow
var err error
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
// Query all stats created not earlier than ~7 months ago
stats, err = db.GetWorkspaceAgentStats(ctx, now.AddDate(0, 0, -210))
if err != nil {
@@ -249,7 +249,7 @@ func TestDeleteOldWorkspaceAgentStats(t *testing.T) {
}
return !containsWorkspaceAgentStat(stats, first) &&
containsWorkspaceAgentStat(stats, second)
}, testutil.WaitShort, testutil.IntervalFast, "it should delete old stats: %v", stats)
}, testutil.IntervalFast, "it should delete old stats: %v", stats)
// when
events := make(chan dbrollup.Event)
@@ -264,7 +264,7 @@ func TestDeleteOldWorkspaceAgentStats(t *testing.T) {
defer closer.Close()
// then
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
// Query all stats created not earlier than ~7 months ago
stats, err = db.GetWorkspaceAgentStats(ctx, now.AddDate(0, 0, -210))
if err != nil {
@@ -273,7 +273,7 @@ func TestDeleteOldWorkspaceAgentStats(t *testing.T) {
return !containsWorkspaceAgentStat(stats, first) &&
!containsWorkspaceAgentStat(stats, second) &&
containsWorkspaceAgentStat(stats, third)
}, testutil.WaitShort, testutil.IntervalFast, "it should delete old stats after rollup: %v", stats)
}, testutil.IntervalFast, "it should delete old stats after rollup: %v", stats)
}
func containsWorkspaceAgentStat(stats []database.GetWorkspaceAgentStatsRow, needle database.WorkspaceAgentStat) bool {
@@ -665,7 +665,7 @@ func TestDeleteOldProvisionerDaemons(t *testing.T) {
defer closer.Close()
// then
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
daemons, err := db.GetProvisionerDaemons(ctx)
if err != nil {
return false
@@ -681,7 +681,7 @@ func TestDeleteOldProvisionerDaemons(t *testing.T) {
!containsProvisionerDaemon(daemons, "external-1") &&
!containsProvisionerDaemon(daemons, "alice-provisioner") &&
containsProvisionerDaemon(daemons, "bob-provisioner")
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
}
func containsProvisionerDaemon(daemons []database.ProvisionerDaemon, name string) bool {
@@ -932,7 +932,7 @@ func TestDeleteOldTelemetryHeartbeats(t *testing.T) {
defer closer.Close()
<-done // doTick() has now run.
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
// We use an SQL queries directly here because we don't expose queries
// for deleting heartbeats in the application code.
var totalCount int
@@ -950,7 +950,7 @@ func TestDeleteOldTelemetryHeartbeats(t *testing.T) {
// Expect 2 heartbeats remaining and none older than 24 hours.
t.Logf("eventually: total count: %d, old count: %d", totalCount, oldCount)
return totalCount == 2 && oldCount == 0
}, testutil.WaitShort, testutil.IntervalFast, "it should delete old telemetry heartbeats")
}, testutil.IntervalFast, "it should delete old telemetry heartbeats")
}
func TestDeleteOldConnectionLogs(t *testing.T) {
+6 -4
View File
@@ -59,7 +59,8 @@ func TestPGPubsub_Metrics(t *testing.T) {
}()
_ = testutil.TryReceive(ctx, t, messageChannel)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
latencyBytes := gatherCount * pubsub.LatencyMessageLength
metrics, err = registry.Gather()
gatherCount++
@@ -76,7 +77,7 @@ func TestPGPubsub_Metrics(t *testing.T) {
testutil.PromGaugeAssertion(t, metrics, func(in float64) bool { return in > 0 }, "coder_pubsub_receive_latency_seconds") &&
testutil.PromCounterHasValue(t, metrics, gatherCount, "coder_pubsub_latency_measures_total") &&
!testutil.PromCounterGathered(t, metrics, "coder_pubsub_latency_measure_errs_total")
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
colossalSize := 7600
colossalData := make([]byte, colossalSize)
@@ -96,7 +97,8 @@ func TestPGPubsub_Metrics(t *testing.T) {
_ = testutil.TryReceive(ctx, t, messageChannel)
_ = testutil.TryReceive(ctx, t, messageChannel)
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
latencyBytes := gatherCount * pubsub.LatencyMessageLength
metrics, err = registry.Gather()
gatherCount++
@@ -114,7 +116,7 @@ func TestPGPubsub_Metrics(t *testing.T) {
testutil.PromGaugeAssertion(t, metrics, func(in float64) bool { return in > 0 }, "coder_pubsub_receive_latency_seconds") &&
testutil.PromCounterHasValue(t, metrics, gatherCount, "coder_pubsub_latency_measures_total") &&
!testutil.PromCounterGathered(t, metrics, "coder_pubsub_latency_measure_errs_total")
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestPGPubsubDriver(t *testing.T) {
+3 -2
View File
@@ -117,7 +117,8 @@ func TestTunnel(t *testing.T) {
defer func() { _ = server.Close() }()
defer func() { tun.Listener.Close() }()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, tun.URL.String(), nil)
if !assert.NoError(t, err) {
return false
@@ -130,7 +131,7 @@ func TestTunnel(t *testing.T) {
_, _ = io.Copy(io.Discard, res.Body)
return res.StatusCode == http.StatusAccepted
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
assert.NoError(t, server.Close())
cancelTun()
+14 -14
View File
@@ -1942,7 +1942,7 @@ func TestPostChatMessages(t *testing.T) {
require.NotZero(t, created.QueuedMessage.ID)
require.True(t, hasTextPart(created.QueuedMessage.Content, messageText))
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
messagesResult, getErr := client.GetChatMessages(ctx, chat.ID, nil)
if getErr != nil {
return false
@@ -1961,7 +1961,7 @@ func TestPostChatMessages(t *testing.T) {
}
}
return false
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
} else {
require.Nil(t, created.QueuedMessage)
require.NotNil(t, created.Message)
@@ -1970,7 +1970,7 @@ func TestPostChatMessages(t *testing.T) {
require.NotZero(t, created.Message.ID)
require.True(t, hasTextPart(created.Message.Content, messageText))
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
messagesResult, getErr := client.GetChatMessages(ctx, chat.ID, nil)
if getErr != nil {
return false
@@ -1983,7 +1983,7 @@ func TestPostChatMessages(t *testing.T) {
}
}
return false
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
}
})
@@ -2113,7 +2113,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
}
var found bool
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
messagesResult, getErr := client.GetChatMessages(ctx, chat.ID, nil)
if getErr != nil {
return false
@@ -2141,7 +2141,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
}
}
return false
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
require.True(t, found, "expected to find file-reference part in stored message")
})
@@ -2173,7 +2173,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
part.Content == "const x = 1;"
}
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
messagesResult, getErr := client.GetChatMessages(ctx, chat.ID, nil)
if getErr != nil {
return false
@@ -2195,7 +2195,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
}
}
return false
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("FileReferenceWithoutContent", func(t *testing.T) {
@@ -2226,7 +2226,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
part.Content == ""
}
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
messagesResult, getErr := client.GetChatMessages(ctx, chat.ID, nil)
if getErr != nil {
return false
@@ -2248,7 +2248,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
}
}
return false
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("FileReferenceWithCode", func(t *testing.T) {
@@ -2279,7 +2279,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
part.Content == "func main() {\n\tfmt.Println()\n}"
}
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
messagesResult, getErr := client.GetChatMessages(ctx, chat.ID, nil)
if getErr != nil {
return false
@@ -2301,7 +2301,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
}
}
return false
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("InterleavedTextAndFileReferences", func(t *testing.T) {
@@ -2369,7 +2369,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
{typ: codersdk.ChatMessagePartTypeText, text: "second issue"},
}
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
messagesResult, getErr := client.GetChatMessages(ctx, chat.ID, nil)
if getErr != nil {
return false
@@ -2414,7 +2414,7 @@ func TestChatMessageWithFileReferences(t *testing.T) {
}
}
return false
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("EmptyFileName", func(t *testing.T) {
+5 -4
View File
@@ -214,7 +214,7 @@ func TestUserActivityInsights_SanityCheck(t *testing.T) {
require.NoError(t, err)
var userActivities codersdk.UserActivityInsightsResponse
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
// Keep connection active.
_, err := w.Write([]byte("hello world\n"))
if !assert.NoError(t, err) {
@@ -229,7 +229,7 @@ func TestUserActivityInsights_SanityCheck(t *testing.T) {
return false
}
return len(userActivities.Report.Users) > 0 && userActivities.Report.Users[0].Seconds > 0
}, testutil.WaitSuperLong, testutil.IntervalMedium, "user activity is missing")
}, testutil.IntervalMedium, "user activity is missing")
// We got our latency data, close the connection.
_ = sess.Close()
@@ -312,7 +312,8 @@ func TestUserLatencyInsights(t *testing.T) {
require.NoError(t, err)
var userLatencies codersdk.UserLatencyInsightsResponse
require.Eventuallyf(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
// Keep connection active.
_, err := w.Write([]byte("hello world\n"))
if !assert.NoError(t, err) {
@@ -327,7 +328,7 @@ func TestUserLatencyInsights(t *testing.T) {
return false
}
return len(userLatencies.Report.Users) > 0 && userLatencies.Report.Users[0].LatencyMS.P50 > 0
}, testutil.WaitMedium, testutil.IntervalFast, "user latency is missing")
}, testutil.IntervalFast, "user latency is missing")
// We got our latency data, close the connection.
_ = sess.Close()
+5 -4
View File
@@ -2,6 +2,7 @@ package dispatch_test
import (
"bytes"
"context"
"fmt"
"log"
"sync"
@@ -452,7 +453,7 @@ func TestSMTP(t *testing.T) {
}()
// Wait for the server to become pingable.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
cl, err := smtptest.PingClient(listen, tc.useTLS, tc.cfg.TLS.StartTLS.Value())
if err != nil {
t.Logf("smtp not yet dialable: %s", err)
@@ -470,7 +471,7 @@ func TestSMTP(t *testing.T) {
}
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// Build a fake payload.
payload := types.MessagePayload{
@@ -596,14 +597,14 @@ func TestSMTPEnvelopeAndHeaders(t *testing.T) {
assert.NoError(t, srv.Serve(listen))
}()
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
cl, err := smtptest.PingClient(listen, false, false)
if err != nil {
return false
}
_ = cl.Close()
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
payload := types.MessagePayload{
Version: "1.0",
+9 -7
View File
@@ -73,16 +73,18 @@ func TestBufferedUpdates(t *testing.T) {
)
// Wait for messages to be dispatched.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return santa.naughty.Load() == expectedFailure &&
santa.nice.Load() == expectedSuccess
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
// Wait for the expected number of buffered updates to be accumulated.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
success, failure := mgr.BufferedUpdatesCount()
return success == expectedSuccess*len(handlers) && failure == expectedFailure*len(handlers)
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// Stop the manager which forces an update of buffered updates.
require.NoError(t, mgr.Stop(ctx))
@@ -170,11 +172,11 @@ func TestStopBeforeRun(t *testing.T) {
require.NoError(t, err)
// THEN: validate that the manager can be stopped safely without Run() having been called yet
ctx := testutil.Context(t, testutil.WaitSuperLong)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
assert.NoError(t, mgr.Stop(ctx))
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestRunStopRace(t *testing.T) {
+21 -14
View File
@@ -299,14 +299,16 @@ func TestPendingUpdatesMetric(t *testing.T) {
require.EqualValues(t, 2, failure)
// Validate that the store synced the expected number of updates.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return syncer.sent.Load() == 2 && syncer.failed.Load() == 2
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// Wait for the updates to be synced and the metric to reflect that.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return promtest.ToFloat64(metrics.PendingUpdates) == 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestInflightDispatchesMetric(t *testing.T) {
@@ -361,26 +363,29 @@ func TestInflightDispatchesMetric(t *testing.T) {
// THEN:
// Ensure we see the dispatches of the messages inflight.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return promtest.ToFloat64(metrics.InflightDispatches.WithLabelValues(string(method), tmpl.String())) == msgCount
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
for i := 0; i < msgCount; i++ {
barrier.wg.Done()
}
// Wait until the handler has dispatched the given notifications.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
handler.mu.RLock()
defer handler.mu.RUnlock()
return len(handler.succeeded) == msgCount
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// Wait for the updates to be synced and the metric to reflect that.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return promtest.ToFloat64(metrics.InflightDispatches.WithLabelValues(string(method), tmpl.String())) == 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestCustomMethodMetricCollection(t *testing.T) {
@@ -439,7 +444,8 @@ func TestCustomMethodMetricCollection(t *testing.T) {
mgr.Run(ctx)
// THEN: the fake handlers to "dispatch" the notifications.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
smtpHandler.mu.RLock()
webhookHandler.mu.RLock()
defer smtpHandler.mu.RUnlock()
@@ -447,13 +453,14 @@ func TestCustomMethodMetricCollection(t *testing.T) {
return len(smtpHandler.succeeded) == 1 && len(smtpHandler.failed) == 0 &&
len(webhookHandler.succeeded) == 1 && len(webhookHandler.failed) == 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// THEN: we should have metric series for both the default and custom notification methods.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return promtest.ToFloat64(metrics.DispatchAttempts.WithLabelValues(string(defaultMethod), anotherTemplate.String(), notifications.ResultSuccess)) > 0 &&
promtest.ToFloat64(metrics.DispatchAttempts.WithLabelValues(string(customMethod), tmpl.String(), notifications.ResultSuccess)) > 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
// hasMatchingFingerprint checks if the given metric's series fingerprint matches the reference fingerprint.
+31 -23
View File
@@ -97,18 +97,19 @@ func TestBasicNotificationRoundtrip(t *testing.T) {
mgr.Run(ctx)
// THEN: we expect that the handler will have received the notifications for dispatch
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
handler.mu.RLock()
defer handler.mu.RUnlock()
return slices.Contains(handler.succeeded, sid[0].String()) &&
slices.Contains(handler.failed, fid[0].String())
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
// THEN: we expect the store to be called with the updates of the earlier dispatches
require.Eventually(t, func() bool {
return interceptor.sent.Load() == 2 &&
interceptor.failed.Load() == 2
}, testutil.WaitLong, testutil.IntervalFast)
tCtx = testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return interceptor.sent.Load() == 2 && interceptor.failed.Load() == 2
}, testutil.IntervalFast)
// THEN: we verify that the store contains notifications in their expected state
success, err := store.GetNotificationMessagesByStatus(ctx, database.GetNotificationMessagesByStatusParams{
@@ -176,11 +177,12 @@ func TestSMTPDispatch(t *testing.T) {
mgr.Run(ctx)
// THEN: wait until the dispatch interceptor validates that the messages were dispatched
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
assert.Nil(t, handler.lastErr.Load())
assert.True(t, handler.retryable.Load() == 0)
return handler.sent.Load() == 1
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// THEN: we verify that the expected message was received by the mock SMTP server
msgs := mockSMTPSrv.MessagesAndPurge()
@@ -375,11 +377,12 @@ func TestBackpressure(t *testing.T) {
if elapsed%cfg.StoreSyncInterval.Value() == 0 {
numSent := cfg.StoreSyncBufferSize.Value() * int64(elapsed/cfg.StoreSyncInterval.Value())
t.Logf("waiting for %d messages", numSent)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
// need greater or equal because the last set of messages can come immediately due
// to graceful shut down
return int64(storeInterceptor.sent.Load()) >= numSent
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
}
t.Log("done advancing")
@@ -474,11 +477,12 @@ func TestRetries(t *testing.T) {
nbTries := msgCount * maxAttempts * 2
// THEN: we expect to see all but the final attempts failing on webhook, and all messages to fail on inbox
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
// nolint:gosec
return storeInterceptor.failed.Load() == int32(nbTries-msgCount) &&
storeInterceptor.sent.Load() == msgCount
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
}
// TestExpiredLeaseIsRequeued validates that notification messages which are left in "leased" status will be requeued once their lease expires.
@@ -573,10 +577,11 @@ func TestExpiredLeaseIsRequeued(t *testing.T) {
mgr.Run(ctx)
// Wait until all messages are sent & updates flushed to the database.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return handler.sent.Load() == msgCount &&
storeInterceptor.sent.Load() == msgCount*2
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
// Validate that no more messages are in "leased" status.
leased, err = store.GetNotificationMessagesByStatus(ctx, database.GetNotificationMessagesByStatusParams{
@@ -667,12 +672,13 @@ func TestNotifierPaused(t *testing.T) {
// Wait a few fetch intervals to be sure that no new notifications are being sent.
// TODO: use quartz instead.
// nolint:gocritic // These magic numbers are fine.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, fetchInterval*5)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
handler.mu.RLock()
defer handler.mu.RUnlock()
return len(handler.succeeded)+len(handler.failed) == 0
}, fetchInterval*5, testutil.IntervalFast)
}, testutil.IntervalFast)
// Unpause the notifier.
settingsJSON, err = json.Marshal(&codersdk.NotificationsSettings{NotifierPaused: false})
@@ -682,11 +688,12 @@ func TestNotifierPaused(t *testing.T) {
// Notifier is running again, message should be dequeued.
// nolint:gocritic // These magic numbers are fine.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, fetchInterval*5)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
handler.mu.RLock()
defer handler.mu.RUnlock()
return slices.Contains(handler.succeeded, sid[0].String())
}, fetchInterval*5, testutil.IntervalFast)
}, testutil.IntervalFast)
}
//go:embed events.go
@@ -1434,7 +1441,8 @@ func TestNotificationTemplates_Golden(t *testing.T) {
}()
// Wait for the server to become pingable.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
cl, err := smtptest.PingClient(listen, false, smtpConfig.TLS.StartTLS.Value())
if err != nil {
t.Logf("smtp not yet dialable: %s", err)
@@ -1452,7 +1460,7 @@ func TestNotificationTemplates_Golden(t *testing.T) {
}
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
smtpCfg := defaultNotificationsConfig(database.NotificationMethodSmtp)
smtpCfg.SMTP = smtpConfig
@@ -1506,11 +1514,11 @@ func TestNotificationTemplates_Golden(t *testing.T) {
// Wait for the message to be fetched
var msg *smtptest.Message
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
require.True(t, testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
msg = backend.LastMessage()
return msg != nil && len(msg.Contents) > 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast))
body := normalizeGoldenEmail([]byte(msg.Contents))
err = smtpManager.Stop(ctx)
+3 -2
View File
@@ -440,9 +440,10 @@ func setupDynamicParamsTest(t *testing.T, args setupDynamicParamsTestParams) dyn
_ = stream.Close(websocket.StatusGoingAway)
}
// Cache should always have 0 files when the only stream is closed
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort/5)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return api.FileCache.Count() == 0
}, testutil.WaitShort/5, testutil.IntervalMedium)
}, testutil.IntervalMedium)
})
return dynamicParamsTest{
@@ -1,6 +1,7 @@
package prometheusmetrics
import (
"context"
"testing"
"time"
@@ -52,10 +53,11 @@ func TestDescCache_DescExpire(t *testing.T) {
})
require.NoError(t, err)
require.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
ma.cleanupDescCache()
return len(ma.descCache) == 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
// TestDescCacheTimestampUpdate ensures that the timestamp update in getOrCreateDesc
+9 -6
View File
@@ -151,7 +151,8 @@ func TestUpdateMetrics_MetricsDoNotExpire(t *testing.T) {
metricsAggregator.Update(ctx, testLabels, given3)
// then
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var actual []prometheus.Metric
metricsCh := make(chan prometheus.Metric)
@@ -167,7 +168,7 @@ func TestUpdateMetrics_MetricsDoNotExpire(t *testing.T) {
close(metricsCh)
<-done
return verifyCollectedMetrics(t, expected, actual)
}, testutil.WaitMedium, testutil.IntervalSlow)
}, testutil.IntervalSlow)
}
func verifyCollectedMetrics(t *testing.T, expected []*agentproto.Stats_Metric, actual []prometheus.Metric) bool {
@@ -288,7 +289,8 @@ func TestUpdateMetrics_MetricsExpire(t *testing.T) {
time.Sleep(time.Millisecond * 10) // Ensure that metric is expired
// then
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var actual []prometheus.Metric
metricsCh := make(chan prometheus.Metric)
@@ -304,7 +306,7 @@ func TestUpdateMetrics_MetricsExpire(t *testing.T) {
close(metricsCh)
<-done
return len(actual) == 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestLabelsAggregation(t *testing.T) {
@@ -610,7 +612,8 @@ func TestLabelsAggregation(t *testing.T) {
}
// then
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var actual []prometheus.Metric
metricsCh := make(chan prometheus.Metric)
@@ -626,7 +629,7 @@ func TestLabelsAggregation(t *testing.T) {
close(metricsCh)
<-done
return verifyCollectedMetrics(t, tc.expected, actual)
}, testutil.WaitMedium, testutil.IntervalSlow)
}, testutil.IntervalSlow)
})
}
}
@@ -180,7 +180,8 @@ func TestCollectInsights(t *testing.T) {
require.NoError(t, err)
collected := map[string]int{}
ok := assert.Eventuallyf(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
ok := testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
// When
metrics, err := registry.Gather()
if !assert.NoError(t, err) {
@@ -205,7 +206,7 @@ func TestCollectInsights(t *testing.T) {
}
return assert.ObjectsAreEqualValues(golden, collected)
}, testutil.WaitMedium, testutil.IntervalFast, "template insights are inconsistent with golden files")
}, testutil.IntervalFast, "template insights are inconsistent with golden files")
if !ok {
diff := cmp.Diff(golden, collected)
assert.Empty(t, diff, "template insights are inconsistent with golden files (-golden +collected)")
@@ -105,12 +105,13 @@ func TestActiveUsers(t *testing.T) {
require.NoError(t, err)
t.Cleanup(closeFunc)
require.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
result := int(*metrics[0].Metric[0].Gauge.Value)
return result == tc.Count
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
}
@@ -195,7 +196,10 @@ func TestUsers(t *testing.T) {
return true
}
require.Eventually(t, checkFn, testutil.WaitShort, testutil.IntervalFast)
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(_ context.Context) bool {
return checkFn()
}, testutil.IntervalFast)
// Add another dormant user and ensure it updates
dbgen.User(t, db, database.User{Status: database.UserStatusDormant})
@@ -204,7 +208,10 @@ func TestUsers(t *testing.T) {
_, w = mClock.AdvanceNext()
w.MustWait(ctx)
require.Eventually(t, checkFn, testutil.WaitShort, testutil.IntervalFast)
tCtx2 := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx2, t, func(_ context.Context) bool {
return checkFn()
}, testutil.IntervalFast)
})
}
}
@@ -280,7 +287,7 @@ func TestWorkspaceLatestBuildTotals(t *testing.T) {
require.NoError(t, err)
t.Cleanup(closeFunc)
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
sum := 0
@@ -305,7 +312,7 @@ func TestWorkspaceLatestBuildTotals(t *testing.T) {
}
t.Logf("sum %d == total %d", sum, tc.Total)
return sum == tc.Total
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
}
@@ -383,7 +390,7 @@ func TestWorkspaceLatestBuildStatuses(t *testing.T) {
require.NoError(t, err)
t.Cleanup(closeFunc)
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
@@ -418,7 +425,7 @@ func TestWorkspaceLatestBuildStatuses(t *testing.T) {
t.Logf("status series = %d, expected == %d", stSum, tc.ExpectedWorkspaces)
return stSum == tc.ExpectedWorkspaces
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
}
@@ -503,7 +510,7 @@ func TestWorkspaceCreationTotal(t *testing.T) {
require.NoError(t, err)
t.Cleanup(closeFunc)
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
@@ -519,7 +526,7 @@ func TestWorkspaceCreationTotal(t *testing.T) {
t.Logf("count = %d, expected == %d", sum, tc.ExpectedWorkspaces)
return sum == tc.ExpectedWorkspaces
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
}
@@ -594,7 +601,8 @@ func TestAgents(t *testing.T) {
var agentsConnections bool
var agentsApps bool
var agentsExecutionInSeconds bool
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
@@ -635,7 +643,7 @@ func TestAgents(t *testing.T) {
}
}
return agentsUp && agentsConnections && agentsApps && agentsExecutionInSeconds
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestAgentStats(t *testing.T) {
@@ -738,7 +746,8 @@ func TestAgentStats(t *testing.T) {
collected := map[string]int{}
var executionSeconds bool
assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
@@ -767,7 +776,7 @@ func TestAgentStats(t *testing.T) {
}
}
return executionSeconds && reflect.DeepEqual(golden, collected)
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// Keep this assertion, so that "go test" can print differences instead of "Condition never satisfied"
assert.EqualValues(t, golden, collected)
+4 -4
View File
@@ -135,11 +135,11 @@ func TestAcquirer_WaitsOnNoJobs(t *testing.T) {
err = fs.sendCtx(ctx, database.ProvisionerJob{ID: jobID}, nil)
require.NoError(t, err)
acquiree.startAcquire(ctx, uut)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
fs.mu.Lock()
defer fs.mu.Unlock()
return len(fs.params) == 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
acquiree.requireBlocked()
// First send in some with incompatible tags & types
@@ -184,11 +184,11 @@ func TestAcquirer_RetriesPending(t *testing.T) {
jobID := uuid.New()
acquiree.startAcquire(ctx, uut)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
fs.mu.Lock()
defer fs.mu.Unlock()
return len(fs.params) == 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// First call to DB is in progress. Send in posting
postJob(t, ps, database.ProvisionerTypeEcho, provisionerdserver.Tags{})
+5 -4
View File
@@ -159,12 +159,13 @@ func TestServerTailnet_ReverseProxy(t *testing.T) {
defer res.Body.Close()
assert.Equal(t, http.StatusOK, res.StatusCode)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
metrics, err := registry.Gather()
assert.NoError(t, err)
return testutil.PromCounterHasValue(t, metrics, 1, "coder_servertailnet_connections_total", "tcp") &&
testutil.PromGaugeHasValue(t, metrics, 1, "coder_servertailnet_open_connections", "tcp")
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("HostRewrite", func(t *testing.T) {
@@ -463,9 +464,9 @@ func setupServerTailnetAgent(t *testing.T, agentNum int, opts ...tailnettest.DER
})
// Wait for the agent to connect.
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
return coord.Node(manifest.AgentID) != nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
agents = append(agents, agentWithID{id: manifest.AgentID, Agent: ag})
}
+4 -10
View File
@@ -1971,14 +1971,11 @@ func TestTemplateMetrics(t *testing.T) {
},
},
}
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
daus, err = client.TemplateDAUs(ctx, template.ID, codersdk.TimezoneOffsetHour(time.UTC))
require.NoError(t, err)
return len(daus.Entries) > 0
},
testutil.WaitShort, testutil.IntervalFast,
"template daus never loaded",
)
}, testutil.IntervalFast, "template daus never loaded")
gotDAUs, err := client.TemplateDAUs(ctx, template.ID, codersdk.TimezoneOffsetHour(time.UTC))
require.NoError(t, err)
require.Equal(t, gotDAUs, wantDAUs)
@@ -1987,15 +1984,12 @@ func TestTemplateMetrics(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, template.ActiveUserCount)
require.Eventuallyf(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
template, err = client.Template(ctx, template.ID)
require.NoError(t, err)
startMs := template.BuildTimeStats[codersdk.WorkspaceTransitionStart].P50
return startMs != nil && *startMs > 1
},
testutil.WaitShort, testutil.IntervalFast,
"BuildTimeStats never loaded",
)
}, testutil.IntervalFast, "BuildTimeStats never loaded")
res, err = client.Workspaces(ctx, codersdk.WorkspaceFilter{})
require.NoError(t, err)
+12 -10
View File
@@ -934,11 +934,11 @@ func TestPatchCancelTemplateVersion(t *testing.T) {
var apiErr *codersdk.Error
require.ErrorAs(t, err, &apiErr)
require.Equal(t, http.StatusBadRequest, apiErr.StatusCode())
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
var err error
version, err = client.TemplateVersion(ctx, version.ID)
return assert.NoError(t, err) && version.Job.Status == codersdk.ProvisionerJobFailed
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
// TODO(Cian): until we are able to test cancellation properly, validating
// Running -> Canceling is the best we can do for now.
@@ -960,7 +960,7 @@ func TestPatchCancelTemplateVersion(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
var err error
version, err = client.TemplateVersion(ctx, version.ID)
if !assert.NoError(t, err) {
@@ -968,10 +968,10 @@ func TestPatchCancelTemplateVersion(t *testing.T) {
}
t.Logf("Status: %s", version.Job.Status)
return version.Job.Status == codersdk.ProvisionerJobRunning
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
err := client.CancelTemplateVersion(ctx, version.ID)
require.NoError(t, err)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
var err error
version, err = client.TemplateVersion(ctx, version.ID)
// job gets marked Failed when there is an Error; in practice we never get to Status = Canceled
@@ -981,7 +981,7 @@ func TestPatchCancelTemplateVersion(t *testing.T) {
return assert.NoError(t, err) &&
strings.HasSuffix(version.Job.Error, "canceled") &&
version.Job.Status == codersdk.ProvisionerJobFailed
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
@@ -1520,10 +1520,11 @@ func TestTemplateVersionDryRun(t *testing.T) {
}()
// Wait for the job to complete
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
job, err := client.TemplateVersionDryRun(ctx, version.ID, job.ID)
return assert.NoError(t, err) && job.Status == codersdk.ProvisionerJobSucceeded
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
<-logsDone
@@ -1616,7 +1617,8 @@ func TestTemplateVersionDryRun(t *testing.T) {
job, err := client.CreateTemplateVersionDryRun(ctx, version.ID, codersdk.CreateTemplateVersionDryRunRequest{})
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
job, err := client.TemplateVersionDryRun(ctx, version.ID, job.ID)
if !assert.NoError(t, err) {
return false
@@ -1624,7 +1626,7 @@ func TestTemplateVersionDryRun(t *testing.T) {
t.Logf("Status: %s", job.Status)
return job.Status == codersdk.ProvisionerJobSucceeded
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
err = client.CancelTemplateVersionDryRun(ctx, version.ID, job.ID)
var apiErr *codersdk.Error
+4 -4
View File
@@ -2734,20 +2734,20 @@ func TestWorkspaceAgent_UpdatedDERP(t *testing.T) {
currentDerpMap.Store(newDerpMap)
// Wait for the agent's DERP map to be updated.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
conn := agentCloser.TailnetConn()
if conn == nil {
return false
}
regionIDs := conn.DERPMap().RegionIDs()
return len(regionIDs) == 1 && regionIDs[0] == 2 && conn.Node().PreferredDERP == 2
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
// Wait for the DERP map to be updated on the existing client.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
regionIDs := conn1.TailnetConn().DERPMap().RegionIDs()
return len(regionIDs) == 1 && regionIDs[0] == 2
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
// The first client should still be able to reach the agent.
ok = conn1.AwaitReachable(ctx)
+6 -6
View File
@@ -372,11 +372,11 @@ func (f *fakePingerCloser) requireNotClosed(t *testing.T) {
}
func (f *fakePingerCloser) requireEventuallyClosed(t *testing.T, code websocket.StatusCode, reason string) {
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
f.Lock()
defer f.Unlock()
return f.closed
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
f.Lock()
defer f.Unlock()
require.Equal(t, code, f.code)
@@ -384,11 +384,11 @@ func (f *fakePingerCloser) requireEventuallyClosed(t *testing.T, code websocket.
}
func (f *fakePingerCloser) requireEventuallyHasPing(t *testing.T) {
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
f.Lock()
defer f.Unlock()
return len(f.pings) > 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
type fakeUpdater struct {
@@ -403,11 +403,11 @@ func (f *fakeUpdater) publishWorkspaceUpdate(_ context.Context, _ uuid.UUID, eve
}
func (f *fakeUpdater) requireEventuallySomeUpdates(t *testing.T, workspaceID uuid.UUID) {
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
f.Lock()
defer f.Unlock()
return len(f.updates) >= 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
f.Lock()
defer f.Unlock()
+3 -2
View File
@@ -195,13 +195,14 @@ func TestWorkspaceAgentRPCRole(t *testing.T) {
// The connection monitor updates the database asynchronously,
// so we need to wait for first_connected_at to be set.
var agent database.WorkspaceAgent
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
agent, err = db.GetWorkspaceAgentByID(dbauthz.AsSystemRestricted(ctx), r.Agents[0].ID)
if err != nil {
return false
}
return agent.FirstConnectedAt.Valid
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
assert.True(t, agent.LastConnectedAt.Valid,
"last_connected_at should be set for agent role")
})
+7 -7
View File
@@ -917,7 +917,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) {
require.NoError(t, err)
var resp *http.Response
resp, err = doWithRetries(t, appClient, req)
resp, err = doWithRetries(ctx, t, appClient, req)
require.NoError(t, err)
if !assert.Equal(t, http.StatusSeeOther, resp.StatusCode) {
@@ -958,7 +958,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) {
t.Log("navigating to: ", gotLocation.String())
req, err = http.NewRequestWithContext(ctx, "GET", gotLocation.String(), nil)
require.NoError(t, err)
resp, err = doWithRetries(t, appClient, req)
resp, err = doWithRetries(ctx, t, appClient, req)
require.NoError(t, err)
resp.Body.Close()
require.Equal(t, http.StatusSeeOther, resp.StatusCode)
@@ -1015,7 +1015,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) {
req, err = http.NewRequestWithContext(ctx, "GET", gotLocation.String(), nil)
require.NoError(t, err)
req.Header.Set(codersdk.SessionTokenHeader, apiKey)
resp, err = doWithRetries(t, appClient, req)
resp, err = doWithRetries(ctx, t, appClient, req)
require.NoError(t, err)
resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode)
@@ -1044,7 +1044,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) {
require.NoError(t, err)
var resp *http.Response
resp, err = doWithRetries(t, appClient, req)
resp, err = doWithRetries(ctx, t, appClient, req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
@@ -2219,7 +2219,7 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) {
req.Header["Sec-WebSocket-Key"] = []string{secWebSocketKey}
req.Header.Set(codersdk.SessionTokenHeader, appDetails.SDKClient.SessionToken())
resp, err := doWithRetries(t, appDetails.AppClient(t), req)
resp, err := doWithRetries(ctx, t, appDetails.AppClient(t), req)
require.NoError(t, err)
defer resp.Body.Close()
@@ -2315,12 +2315,12 @@ func Run(t *testing.T, appHostIsPrimary bool, factory DeploymentFactory) {
require.Equal(t, http.StatusOK, resp.StatusCode)
var stats []workspaceapps.StatsReport
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
// Keep flushing until we get a non-empty stats report.
appDetails.FlushStats()
stats = reporter.stats()
return len(stats) > 0
}, testutil.WaitLong, testutil.IntervalFast, "stats not reported")
}, testutil.IntervalFast, "stats not reported")
assert.Equal(t, workspaceapps.AccessMethodPath, stats[0].AccessMethod)
assert.Equal(t, "test-app-owner", stats[0].SlugOrPort)
+5 -5
View File
@@ -561,10 +561,10 @@ func findProtoApp(t *testing.T, protoApps []*proto.App, slug string) *proto.App
return nil
}
func doWithRetries(t require.TestingT, client *codersdk.Client, req *http.Request) (*http.Response, error) {
func doWithRetries(ctx context.Context, t testing.TB, client *codersdk.Client, req *http.Request) (*http.Response, error) {
var resp *http.Response
var err error
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(_ context.Context) bool {
// nolint // only requests which are not passed upstream have a body closed
resp, err = client.HTTPClient.Do(req)
if resp != nil && resp.StatusCode == http.StatusBadGateway {
@@ -574,7 +574,7 @@ func doWithRetries(t require.TestingT, client *codersdk.Client, req *http.Reques
return false
}
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
return resp, err
}
@@ -582,7 +582,7 @@ func requestWithRetries(ctx context.Context, t testing.TB, client *codersdk.Clie
t.Helper()
var resp *http.Response
var err error
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
// nolint // only requests which are not passed upstream have a body closed
resp, err = client.Request(ctx, method, urlOrPath, body, opts...)
if resp != nil && resp.StatusCode == http.StatusBadGateway {
@@ -592,7 +592,7 @@ func requestWithRetries(ctx context.Context, t testing.TB, client *codersdk.Clie
return false
}
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
return resp, err
}
+3 -5
View File
@@ -1086,10 +1086,8 @@ func Test_ResolveRequest(t *testing.T) {
t.Run("UnhealthyAppPermitted", func(t *testing.T) {
t.Parallel()
require.Eventually(t, func() bool {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancel()
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
agent, err := client.WorkspaceAgent(ctx, agentID)
if err != nil {
t.Log("could not get agent", err)
@@ -1105,7 +1103,7 @@ func Test_ResolveRequest(t *testing.T) {
t.Log("could not find app")
return false
}, testutil.WaitLong, testutil.IntervalFast, "wait for app to become unhealthy")
}, testutil.IntervalFast, "wait for app to become unhealthy")
req := (workspaceapps.Request{
AccessMethod: workspaceapps.AccessMethodPath,
+3 -3
View File
@@ -10,7 +10,6 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/coderd/database/dbtime"
@@ -313,10 +312,11 @@ func TestStatsCollector(t *testing.T) {
}
var gotStats []workspaceapps.StatsReport
require.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
gotStats = reporter.stats()
return len(gotStats) == len(tt.want)
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
// Order is not guaranteed.
sortBySessionID := func(a, b workspaceapps.StatsReport) int {
+21 -17
View File
@@ -74,12 +74,13 @@ func TestWorkspaceBuild(t *testing.T) {
_ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
// Create workspace will also start a build, so we need to wait for
// it to ensure all events are recorded.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
logs := auditor.AuditLogs()
return len(logs) == 2 &&
assert.Equal(t, logs[0].Ip.IPNet.IP.String(), "127.0.0.1") &&
assert.Equal(t, logs[1].Ip.IPNet.IP.String(), "127.0.0.1")
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
wb, err := client.WorkspaceBuild(testutil.Context(t, testutil.WaitShort), workspace.LatestBuild.ID)
require.NoError(t, err)
require.Equal(t, up.Username, wb.WorkspaceOwnerName)
@@ -574,21 +575,21 @@ func TestPatchCancelWorkspaceBuild(t *testing.T) {
workspace := coderdtest.CreateWorkspace(t, client, template.ID)
var build codersdk.WorkspaceBuild
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
build, err = client.WorkspaceBuild(ctx, workspace.LatestBuild.ID)
return assert.NoError(t, err) && build.Job.Status == codersdk.ProvisionerJobRunning
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
err := client.CancelWorkspaceBuild(ctx, build.ID, codersdk.CancelWorkspaceBuildParams{})
return err == nil
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
build, err = client.WorkspaceBuild(ctx, build.ID)
// job gets marked Failed when there is an Error; in practice we never get to Status = Canceled
@@ -597,7 +598,7 @@ func TestPatchCancelWorkspaceBuild(t *testing.T) {
return assert.NoError(t, err) &&
build.Job.Error == "canceled" &&
build.Job.Status == codersdk.ProvisionerJobFailed
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("User is not allowed to cancel", func(t *testing.T) {
t.Parallel()
@@ -629,11 +630,12 @@ func TestPatchCancelWorkspaceBuild(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
build, err = userClient.WorkspaceBuild(ctx, workspace.LatestBuild.ID)
return assert.NoError(t, err) && build.Job.Status == codersdk.ProvisionerJobRunning
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
err := userClient.CancelWorkspaceBuild(ctx, build.ID, codersdk.CancelWorkspaceBuildParams{})
var apiErr *codersdk.Error
require.ErrorAs(t, err, &apiErr)
@@ -721,11 +723,12 @@ func TestPatchCancelWorkspaceBuild(t *testing.T) {
defer cancel()
var build codersdk.WorkspaceBuild
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
build, err = client.WorkspaceBuild(ctx, workspace.LatestBuild.ID)
return assert.NoError(t, err) && build.Job.Status == codersdk.ProvisionerJobRunning
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// When: a cancel request is made with expect_state=pending
err := client.CancelWorkspaceBuild(ctx, build.ID, codersdk.CancelWorkspaceBuildParams{
@@ -1678,11 +1681,12 @@ func TestPostWorkspaceBuild(t *testing.T) {
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, build.ID)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
logs := auditor.AuditLogs()
return len(logs) > 0 &&
assert.Equal(t, logs[0].Ip.IPNet.IP.String(), "127.0.0.1")
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("IncrementBuildNumber", func(t *testing.T) {
+12 -8
View File
@@ -403,7 +403,8 @@ func TestWorkspace(t *testing.T) {
// Wait for the sub-agent to become unhealthy due to timeout.
var subAgentUnhealthy bool
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
workspace, err = client.Workspace(ctx, workspace.ID)
if err != nil {
return false
@@ -417,7 +418,7 @@ func TestWorkspace(t *testing.T) {
}
}
return false
}, testutil.WaitShort, testutil.IntervalFast, "sub-agent should become unhealthy")
}, testutil.IntervalFast, "sub-agent should become unhealthy")
require.True(t, subAgentUnhealthy, "sub-agent should be unhealthy")
@@ -3004,13 +3005,14 @@ func TestWorkspaceUpdateAutostart(t *testing.T) {
interval := next.Sub(testCase.at)
require.Equal(t, testCase.expectedInterval, interval, "unexpected interval")
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
if len(auditor.AuditLogs()) < 7 {
return false
}
return auditor.AuditLogs()[6].Action == database.AuditActionWrite ||
auditor.AuditLogs()[5].Action == database.AuditActionWrite
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
@@ -3168,13 +3170,14 @@ func TestWorkspaceUpdateTTL(t *testing.T) {
require.Equal(t, testCase.ttlMillis, updated.TTLMillis, "expected autostop ttl to equal requested")
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
if len(auditor.AuditLogs()) != 7 {
return false
}
return auditor.AuditLogs()[6].Action == database.AuditActionWrite ||
auditor.AuditLogs()[5].Action == database.AuditActionWrite
}, testutil.WaitMedium, testutil.IntervalFast, "expected audit log to be written")
}, testutil.IntervalFast, "expected audit log to be written")
})
}
@@ -3489,7 +3492,8 @@ func TestWorkspaceUpdateAutomaticUpdates_OK(t *testing.T) {
require.NoError(t, err)
require.Equal(t, codersdk.AutomaticUpdatesAlways, updated.AutomaticUpdates)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var found bool
for _, l := range auditor.AuditLogs() {
if l.Action == database.AuditActionWrite &&
@@ -3500,7 +3504,7 @@ func TestWorkspaceUpdateAutomaticUpdates_OK(t *testing.T) {
}
}
return found
}, testutil.WaitShort, testutil.IntervalFast, "did not find expected audit log")
}, testutil.IntervalFast, "did not find expected audit log")
}
func TestUpdateWorkspaceAutomaticUpdates_NotFound(t *testing.T) {
+41 -32
View File
@@ -86,7 +86,8 @@ func TestInterruptChatBroadcastsStatusAcrossInstances(t *testing.T) {
require.Equal(t, database.ChatStatusWaiting, updated.Status)
require.False(t, updated.WorkerID.Valid)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeStatus && event.Status != nil {
@@ -97,7 +98,7 @@ func TestInterruptChatBroadcastsStatusAcrossInstances(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestSubagentChatExcludesWorkspaceProvisioningTools(t *testing.T) {
@@ -193,7 +194,7 @@ func TestSubagentChatExcludesWorkspaceProvisioningTools(t *testing.T) {
// Wait for the root chat AND the subagent to finish.
// The root chat finishes first, then the chatd server
// picks up and runs the child (subagent) chat.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got, getErr := expClient.GetChat(ctx, chat.ID)
if getErr != nil {
return false
@@ -207,7 +208,7 @@ func TestSubagentChatExcludesWorkspaceProvisioningTools(t *testing.T) {
toolsMu.Unlock()
// Expect at least 3 calls: root-1 (spawn_agent), child-1, root-2.
return n >= 3
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
// There should be at least two streamed calls: one for the root
// chat and one for the subagent child chat.
@@ -1288,13 +1289,14 @@ func TestRecoverStaleChatsPeriodically(t *testing.T) {
// The startup recovery should have already reset our stale
// chat.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
fromDB, err := db.GetChatByID(ctx, chat.ID)
if err != nil {
return false
}
return fromDB.Status == database.ChatStatusPending
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
// Now simulate a second stale chat appearing AFTER startup.
// This tests the periodic recovery, not just the startup one.
@@ -1317,13 +1319,14 @@ func TestRecoverStaleChatsPeriodically(t *testing.T) {
// The periodic stale recovery loop (running at staleAfter/5 =
// 100ms intervals) should pick this up without a restart.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
fromDB, err := db.GetChatByID(ctx, chat2.ID)
if err != nil {
return false
}
return fromDB.Status == database.ChatStatusPending
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestNewReplicaRecoversStaleChatFromDeadReplica(t *testing.T) {
@@ -1359,14 +1362,15 @@ func TestNewReplicaRecoversStaleChatFromDeadReplica(t *testing.T) {
newReplica := newTestServer(t, db, ps, uuid.New())
_ = newReplica
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
fromDB, err := db.GetChatByID(ctx, chat.ID)
if err != nil {
return false
}
return fromDB.Status == database.ChatStatusPending &&
!fromDB.WorkerID.Valid
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestWaitingChatsAreNotRecoveredAsStale(t *testing.T) {
@@ -1594,14 +1598,14 @@ func TestPersistToolResultWithBinaryData(t *testing.T) {
require.NoError(t, err)
var chatResult database.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
}
chatResult = got
return got.Status == database.ChatStatusWaiting || got.Status == database.ChatStatusError
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
if chatResult.Status == database.ChatStatusError {
require.FailNowf(t, "chat run failed", "last_error=%q", chatResult.LastError.String)
@@ -1902,14 +1906,14 @@ func TestCreateWorkspaceTool_EndToEnd(t *testing.T) {
require.NoError(t, err)
var chatResult codersdk.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got, getErr := expClient.GetChat(ctx, chat.ID)
if getErr != nil {
return false
}
chatResult = got
return got.Status == codersdk.ChatStatusWaiting || got.Status == codersdk.ChatStatusError
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
if chatResult.Status == codersdk.ChatStatusError {
lastError := ""
@@ -2074,14 +2078,14 @@ func TestStartWorkspaceTool_EndToEnd(t *testing.T) {
require.NoError(t, err)
var chatResult codersdk.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got, getErr := expClient.GetChat(ctx, chat.ID)
if getErr != nil {
return false
}
chatResult = got
return got.Status == codersdk.ChatStatusWaiting || got.Status == codersdk.ChatStatusError
}, testutil.WaitSuperLong, testutil.IntervalFast)
}, testutil.IntervalFast)
if chatResult.Status == codersdk.ChatStatusError {
lastError := ""
@@ -2813,26 +2817,29 @@ func TestCloseDuringShutdownContextCanceledShouldRetryOnNewReplica(t *testing.T)
})
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
fromDB, dbErr := db.GetChatByID(ctx, chat.ID)
if dbErr != nil {
return false
}
return fromDB.Status == database.ChatStatusRunning && fromDB.WorkerID.Valid
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case <-streamStarted:
return true
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
require.NoError(t, serverA.Close())
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
fromDB, dbErr := db.GetChatByID(ctx, chat.ID)
if dbErr != nil {
return false
@@ -2840,7 +2847,7 @@ func TestCloseDuringShutdownContextCanceledShouldRetryOnNewReplica(t *testing.T)
return fromDB.Status == database.ChatStatusPending &&
!fromDB.WorkerID.Valid &&
!fromDB.LastError.Valid
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
loggerB := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
serverB := chatd.New(chatd.Config{
@@ -2855,11 +2862,13 @@ func TestCloseDuringShutdownContextCanceledShouldRetryOnNewReplica(t *testing.T)
require.NoError(t, serverB.Close())
})
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return requestCount.Load() >= 2
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
fromDB, dbErr := db.GetChatByID(ctx, chat.ID)
if dbErr != nil {
return false
@@ -2867,7 +2876,7 @@ func TestCloseDuringShutdownContextCanceledShouldRetryOnNewReplica(t *testing.T)
return fromDB.Status == database.ChatStatusWaiting &&
!fromDB.WorkerID.Valid &&
!fromDB.LastError.Valid
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func TestSuccessfulChatSendsWebPushWithSummary(t *testing.T) {
@@ -3189,7 +3198,7 @@ func TestComputerUseSubagentToolsAndModel(t *testing.T) {
// Wait for the root chat AND the computer use child to finish.
// The root chat spawns the child, then the chatd server picks
// up and runs the child (which hits the Anthropic mock).
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
@@ -3203,7 +3212,7 @@ func TestComputerUseSubagentToolsAndModel(t *testing.T) {
n := len(anthropicCalls)
anthropicMu.Unlock()
return n >= 1
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
anthropicMu.Lock()
calls := append([]anthropicCall(nil), anthropicCalls...)
@@ -3459,14 +3468,14 @@ func TestProcessChatPanicRecovery(t *testing.T) {
// Wait for the panic to be recovered and the chat to
// transition to error status.
var chatResult database.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
}
chatResult = got
return got.Status == database.ChatStatusError
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
require.True(t, chatResult.LastError.Valid, "LastError should be set")
require.Contains(t, chatResult.LastError.String, "chat processing panicked")
@@ -3626,14 +3635,14 @@ func TestMCPServerToolInvocation(t *testing.T) {
// Wait for the chat to finish processing.
var chatResult database.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
got, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
}
chatResult = got
return got.Status == database.ChatStatusWaiting || got.Status == database.ChatStatusError
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
if chatResult.Status == database.ChatStatusError {
require.FailNowf(t, "chat failed", "last_error=%q", chatResult.LastError.String)
+3 -2
View File
@@ -426,13 +426,14 @@ func TestWorkspaceBashBackgroundIntegration(t *testing.T) {
require.Contains(t, result.Output, "Command continues running in background")
// Wait for the background command to complete (even though SSH session timed out)
require.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
checkArgs := toolsdk.WorkspaceBashArgs{
Workspace: workspace.Name,
Command: `cat /tmp/bg-test-done 2>/dev/null || echo "not found"`,
}
checkResult, err := toolsdk.WorkspaceBash.Handler(t.Context(), deps, checkArgs)
return err == nil && checkResult.Output == "done"
}, testutil.WaitMedium, testutil.IntervalMedium, "Background command should continue running and complete after timeout")
}, testutil.IntervalMedium, "Background command should continue running and complete after timeout")
})
}
@@ -418,10 +418,11 @@ func TestIntegrationWithMetrics(t *testing.T) {
// Then: the interceptions metric should increase to 1.
// This is not exhaustively checking the available metrics; just an indicative one to prove
// the plumbing is working.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
count := promtest.ToFloat64(metrics.InterceptionCount)
return count == 1
}, testutil.WaitShort, testutil.IntervalFast, "interceptions_total metric should be 1")
}, testutil.IntervalFast, "interceptions_total metric should be 1")
}
// TestIntegrationCircuitBreaker validates that the circuit breaker opens after
@@ -3,6 +3,7 @@ package aibridgeproxyd_test
import (
"bufio"
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
@@ -270,14 +271,15 @@ func newTestProxy(t *testing.T, opts ...testProxyOption) *aibridgeproxyd.Server
// Wait for the proxy server to be ready.
proxyAddr := srv.Addr()
require.NotEmpty(t, proxyAddr)
require.Eventually(t, func() bool {
ctx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
conn, err := net.Dial("tcp", proxyAddr)
if err != nil {
return false
}
_ = conn.Close()
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
return srv
}
+29 -20
View File
@@ -50,13 +50,14 @@ func TestProvisionerDaemon_PSK(t *testing.T) {
pty.ExpectMatchContext(ctx, "matt-daemon")
var daemons []codersdk.ProvisionerDaemon
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err = client.ProvisionerDaemons(ctx)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
require.Equal(t, "matt-daemon", daemons[0].Name)
require.Equal(t, provisionersdk.ScopeOrganization, daemons[0].Tags[provisionersdk.TagScope])
require.Equal(t, buildinfo.Version(), daemons[0].Version)
@@ -128,13 +129,14 @@ func TestProvisionerDaemon_SessionToken(t *testing.T) {
var daemons []codersdk.ProvisionerDaemon
var err error
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err = client.ProvisionerDaemons(ctx)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
assert.Equal(t, "my-daemon", daemons[0].Name)
assert.Equal(t, provisionersdk.ScopeUser, daemons[0].Tags[provisionersdk.TagScope])
assert.Equal(t, anotherUser.ID.String(), daemons[0].Tags[provisionersdk.TagOwner])
@@ -163,13 +165,14 @@ func TestProvisionerDaemon_SessionToken(t *testing.T) {
var daemons []codersdk.ProvisionerDaemon
var err error
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err = client.ProvisionerDaemons(ctx)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
assert.Equal(t, "my-daemon", daemons[0].Name)
assert.Equal(t, provisionersdk.ScopeUser, daemons[0].Tags[provisionersdk.TagScope])
// This should get clobbered to the user who started the daemon.
@@ -199,13 +202,14 @@ func TestProvisionerDaemon_SessionToken(t *testing.T) {
var daemons []codersdk.ProvisionerDaemon
var err error
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err = client.ProvisionerDaemons(ctx)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
assert.Equal(t, "org-daemon", daemons[0].Name)
assert.Equal(t, provisionersdk.ScopeOrganization, daemons[0].Tags[provisionersdk.TagScope])
assert.Equal(t, buildinfo.Version(), daemons[0].Version)
@@ -235,13 +239,14 @@ func TestProvisionerDaemon_SessionToken(t *testing.T) {
var daemons []codersdk.ProvisionerDaemon
var err error
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err = client.OrganizationProvisionerDaemons(ctx, anotherOrg.ID, nil)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
assert.Equal(t, "org-daemon", daemons[0].Name)
assert.Equal(t, provisionersdk.ScopeUser, daemons[0].Tags[provisionersdk.TagScope])
assert.Equal(t, anotherUser.ID.String(), daemons[0].Tags[provisionersdk.TagOwner])
@@ -281,13 +286,14 @@ func TestProvisionerDaemon_ProvisionerKey(t *testing.T) {
pty.ExpectMatchContext(ctx, "matt-daemon")
var daemons []codersdk.ProvisionerDaemon
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err = client.OrganizationProvisionerDaemons(ctx, user.OrganizationID, nil)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
require.Equal(t, "matt-daemon", daemons[0].Name)
require.Equal(t, provisionersdk.ScopeOrganization, daemons[0].Tags[provisionersdk.TagScope])
require.Equal(t, buildinfo.Version(), daemons[0].Version)
@@ -326,13 +332,14 @@ func TestProvisionerDaemon_ProvisionerKey(t *testing.T) {
pty.ExpectMatchContext(ctx, `tags={"tag1":"value1","tag2":"value2"}`)
var daemons []codersdk.ProvisionerDaemon
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err = client.OrganizationProvisionerDaemons(ctx, user.OrganizationID, nil)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
require.Equal(t, "matt-daemon", daemons[0].Name)
require.Equal(t, provisionersdk.ScopeOrganization, daemons[0].Tags[provisionersdk.TagScope])
require.Equal(t, buildinfo.Version(), daemons[0].Version)
@@ -441,13 +448,14 @@ func TestProvisionerDaemon_ProvisionerKey(t *testing.T) {
pty.ExpectNoMatchBefore(ctx, "check entitlement", "starting provisioner daemon")
pty.ExpectMatchContext(ctx, "matt-daemon")
var daemons []codersdk.ProvisionerDaemon
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err = client.OrganizationProvisionerDaemons(ctx, anotherOrg.ID, nil)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitShort, testutil.IntervalSlow)
}, testutil.IntervalSlow)
require.Equal(t, "matt-daemon", daemons[0].Name)
require.Equal(t, provisionersdk.ScopeOrganization, daemons[0].Tags[provisionersdk.TagScope])
require.Equal(t, buildinfo.Version(), daemons[0].Version)
@@ -483,20 +491,21 @@ func TestProvisionerDaemon_PrometheusEnabled(t *testing.T) {
var daemons []codersdk.ProvisionerDaemon
var err error
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
daemons, err = client.ProvisionerDaemons(ctx)
if err != nil {
return false
}
return len(daemons) == 1
}, testutil.WaitLong, testutil.IntervalSlow)
}, testutil.IntervalSlow)
require.Equal(t, "daemon-with-prometheus", daemons[0].Name)
// Fetch metrics from Prometheus endpoint
var req *http.Request
var res *http.Response
httpClient := &http.Client{}
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
req, err = http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://127.0.0.1:%d", prometheusPort), nil)
if err != nil {
t.Logf("unable to create new HTTP request: %s", err.Error())
@@ -510,7 +519,7 @@ func TestProvisionerDaemon_PrometheusEnabled(t *testing.T) {
return false
}
return true
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
defer res.Body.Close()
// Scan for metric patterns
+3 -2
View File
@@ -116,13 +116,14 @@ func TestWorkspaceProxy_Server_PrometheusEnabled(t *testing.T) {
// Fetch metrics from Prometheus endpoint
var res *http.Response
client := &http.Client{}
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://127.0.0.1:%d", prometheusPort), nil)
assert.NoError(t, err)
// nolint:bodyclose
res, err = client.Do(req)
return err == nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
defer res.Body.Close()
// Scan for metric patterns
+5 -4
View File
@@ -44,7 +44,8 @@ func TestServer_Single(t *testing.T) {
clitest.Start(t, inv.WithContext(ctx))
accessURL := waitAccessURL(t, cfg)
client := &http.Client{}
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
reqCtx := testutil.Context(t, testutil.IntervalMedium)
req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, accessURL.String()+"/healthz", nil)
if err != nil {
@@ -61,7 +62,7 @@ func TestServer_Single(t *testing.T) {
panic(err)
}
return assert.Equal(t, "OK", string(bs))
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
}
func waitAccessURL(t *testing.T, cfg config.Root) *url.URL {
@@ -69,10 +70,10 @@ func waitAccessURL(t *testing.T, cfg config.Root) *url.URL {
var err error
var rawURL string
require.Eventually(t, func() bool {
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
rawURL, err = cfg.URL().Read()
return err == nil && rawURL != ""
}, testutil.WaitLong, testutil.IntervalFast, "failed to get access URL")
}, testutil.IntervalFast, "failed to get access URL")
accessURL, err := url.Parse(rawURL)
require.NoError(t, err, "failed to parse access URL")
+18 -14
View File
@@ -210,11 +210,12 @@ func TestEntitlements(t *testing.T) {
require.NoError(t, err)
err = api.Pubsub.Publish(coderd.PubsubEventLicenses, []byte{})
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
entitlements, err := anotherClient.Entitlements(context.Background())
assert.NoError(t, err)
return entitlements.HasLicense
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("Resync", func(t *testing.T) {
t.Parallel()
@@ -254,11 +255,12 @@ func TestEntitlements(t *testing.T) {
JWT: "invalid",
})
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
entitlements, err := anotherClient.Entitlements(context.Background())
assert.NoError(t, err)
return entitlements.HasLicense
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
@@ -339,9 +341,10 @@ func TestEntitlements_Prebuilds(t *testing.T) {
})
// The entitlements will need to refresh before the reconciler is set.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitSuperLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return api.AGPL.PrebuildsReconciler.Load() != nil
}, testutil.WaitSuperLong, testutil.IntervalFast)
}, testutil.IntervalFast)
reconciler := api.AGPL.PrebuildsReconciler.Load()
claimer := api.AGPL.PrebuildsClaimer.Load()
@@ -440,7 +443,7 @@ func TestExternalTokenEncryption(t *testing.T) {
require.Len(t, keys, 1)
require.Equal(t, ciphers[0].HexDigest(), keys[0].ActiveKeyDigest.String)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
entitlements, err := client.Entitlements(context.Background())
assert.NoError(t, err)
feature := entitlements.Features[codersdk.FeatureExternalTokenEncryption]
@@ -454,7 +457,7 @@ func TestExternalTokenEncryption(t *testing.T) {
}
t.Logf("feature: %+v, warnings: %+v, errors: %+v", feature, entitlements.Warnings, entitlements.Errors)
return feature.Enabled && entitled && !warningExists
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("Disabled", func(t *testing.T) {
@@ -477,7 +480,7 @@ func TestExternalTokenEncryption(t *testing.T) {
require.NoError(t, err)
require.Empty(t, keys)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
entitlements, err := client.Entitlements(context.Background())
assert.NoError(t, err)
feature := entitlements.Features[codersdk.FeatureExternalTokenEncryption]
@@ -491,7 +494,7 @@ func TestExternalTokenEncryption(t *testing.T) {
}
t.Logf("feature: %+v, warnings: %+v, errors: %+v", feature, entitlements.Warnings, entitlements.Errors)
return !feature.Enabled && !entitled && !warningExists
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("PreviouslyEnabledButMissingFromLicense", func(t *testing.T) {
@@ -522,7 +525,7 @@ func TestExternalTokenEncryption(t *testing.T) {
},
})
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
entitlements, err := client.Entitlements(context.Background())
assert.NoError(t, err)
feature := entitlements.Features[codersdk.FeatureExternalTokenEncryption]
@@ -536,7 +539,7 @@ func TestExternalTokenEncryption(t *testing.T) {
}
t.Logf("feature: %+v, warnings: %+v, errors: %+v", feature, entitlements.Warnings, entitlements.Errors)
return feature.Enabled && !entitled && warningExists
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
@@ -1165,11 +1168,12 @@ func TestConn_CoordinatorRollingRestart(t *testing.T) {
require.NoError(t, err)
defer conn.Close()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
_, p2p, _, err := conn.Ping(ctx)
assert.NoError(t, err)
return p2p == direct
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// Open a TCP server and connection to it through the tunnel that
// should be maintained throughout the restart.
+2 -2
View File
@@ -557,9 +557,9 @@ func TestDynamicParameterTemplate(t *testing.T) {
// Wait until the cache ends up empty. This verifies the cache does not
// leak any files.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return api.AGPL.FileCache.Count() == 0
}, testutil.WaitShort, testutil.IntervalFast, "file cache should be empty after the test")
}, testutil.IntervalFast, "file cache should be empty after the test")
}()
// Initial response
+10 -10
View File
@@ -103,7 +103,7 @@ func TestChatStreamRelay(t *testing.T) {
require.Equal(t, codersdk.ChatStatusPending, chat.Status)
var runningChat database.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
current, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
@@ -113,7 +113,7 @@ func TestChatStreamRelay(t *testing.T) {
}
runningChat = current
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
var localClient *codersdk.ExperimentalClient
var relayClient *codersdk.ExperimentalClient
@@ -292,7 +292,7 @@ func TestChatStreamRelay(t *testing.T) {
require.Equal(t, codersdk.ChatStatusPending, chat.Status)
var runningChat database.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
current, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
@@ -302,7 +302,7 @@ func TestChatStreamRelay(t *testing.T) {
}
runningChat = current
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
var localClient *codersdk.ExperimentalClient
var relayClient *codersdk.ExperimentalClient
@@ -462,7 +462,7 @@ func TestChatStreamRelay(t *testing.T) {
require.Equal(t, codersdk.ChatStatusPending, chat.Status)
var runningChat database.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
current, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
@@ -472,7 +472,7 @@ func TestChatStreamRelay(t *testing.T) {
}
runningChat = current
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
var localClient *codersdk.ExperimentalClient
var relayClient *codersdk.ExperimentalClient
@@ -634,7 +634,7 @@ func TestChatStreamRelay(t *testing.T) {
require.Equal(t, codersdk.ChatStatusPending, chat.Status)
var runningChat database.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
current, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
@@ -644,7 +644,7 @@ func TestChatStreamRelay(t *testing.T) {
}
runningChat = current
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
var localClient *codersdk.ExperimentalClient
var relayClient *codersdk.ExperimentalClient
@@ -782,7 +782,7 @@ func TestChatStreamRelay(t *testing.T) {
require.Equal(t, codersdk.ChatStatusPending, chat.Status)
var runningChat database.Chat
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
current, getErr := db.GetChatByID(ctx, chat.ID)
if getErr != nil {
return false
@@ -792,7 +792,7 @@ func TestChatStreamRelay(t *testing.T) {
}
runningChat = current
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
var localClient *codersdk.ExperimentalClient
var relayClient *codersdk.ExperimentalClient
+4 -4
View File
@@ -207,7 +207,7 @@ func TestClaimPrebuild(t *testing.T) {
// Given: a set of running, eligible prebuilds eventually starts up.
runningPrebuilds := make(map[uuid.UUID]database.GetRunningPrebuiltWorkspacesRow, desiredInstances*presetCount)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
rows, err := spy.GetRunningPrebuiltWorkspaces(ctx)
if err != nil {
return false
@@ -242,7 +242,7 @@ func TestClaimPrebuild(t *testing.T) {
t.Logf("found %d running prebuilds so far, want %d", len(runningPrebuilds), expectedPrebuildsCount)
return len(runningPrebuilds) == expectedPrebuildsCount
}, testutil.WaitSuperLong, testutil.IntervalSlow)
}, testutil.IntervalSlow)
// When: a user creates a new workspace with a preset for which prebuilds are configured.
workspaceName := strings.ReplaceAll(testutil.GetRandomName(t), "_", "-")
@@ -345,7 +345,7 @@ func TestClaimPrebuild(t *testing.T) {
}
}
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
rows, err := spy.GetRunningPrebuiltWorkspaces(ctx)
if err != nil {
return false
@@ -354,7 +354,7 @@ func TestClaimPrebuild(t *testing.T) {
t.Logf("found %d running prebuilds so far, want %d", len(rows), expectedPrebuildsCount)
return len(runningPrebuilds) == expectedPrebuildsCount
}, testutil.WaitSuperLong, testutil.IntervalSlow)
}, testutil.IntervalSlow)
// Then: when restarting the created workspace (which claimed a prebuild), it should not try and claim a new prebuild.
// Prebuilds should ONLY be used for net-new workspaces.
@@ -1756,12 +1756,12 @@ func TestRunLoop(t *testing.T) {
// wait until ReconcileAll is completed
// TODO: is it possible to avoid Eventually and replace it with quartz?
// Ideally to have all control on test-level, and be able to advance loop iterations from the test.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
newPrebuildCount := getNewPrebuildCount()
// NOTE: preset1 doesn't block creation of instances in preset2
return preset2.DesiredInstances.Int32 == newPrebuildCount
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// setup one more preset with 5 prebuilds
preset3 := setupTestDBPreset(
@@ -1780,12 +1780,12 @@ func TestRunLoop(t *testing.T) {
clock.Advance(cfg.ReconciliationInterval.Value()).MustWait(ctx)
// wait until ReconcileAll is completed
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
newPrebuildCount := getNewPrebuildCount()
// both prebuilds for preset2 and preset3 were created
return preset2.DesiredInstances.Int32+preset3.DesiredInstances.Int32 == newPrebuildCount
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// gracefully stop the reconciliation loop
reconciler.Stop(ctx, nil)
+3 -2
View File
@@ -272,14 +272,15 @@ func TestProvisionerDaemonServe(t *testing.T) {
file, err := client.Upload(context.Background(), codersdk.ContentTypeTar, bytes.NewReader(data))
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
daemons, err := client.ProvisionerDaemons(context.Background())
assert.NoError(t, err, "failed to get provisioner daemons")
return len(daemons) > 0 &&
assert.NotEmpty(t, daemons[0].Name) &&
assert.Equal(t, provisionersdk.ScopeUser, daemons[0].Tags[provisionersdk.TagScope]) &&
assert.Equal(t, user.UserID.String(), daemons[0].Tags[provisionersdk.TagOwner])
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
version, err := client.CreateTemplateVersion(context.Background(), user.OrganizationID, codersdk.CreateTemplateVersionRequest{
Name: "example",
+4 -8
View File
@@ -125,12 +125,10 @@ func TestReplicas(t *testing.T) {
Logger: testutil.Logger(t),
})
require.NoError(t, err)
require.Eventually(t, func() bool {
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitShort)
defer cancelFunc()
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
_, _, _, err = conn.Ping(ctx)
return err == nil
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
_ = conn.Close()
})
t.Run("ConnectAcrossMultipleTLS", func(t *testing.T) {
@@ -172,12 +170,10 @@ func TestReplicas(t *testing.T) {
Logger: testutil.Logger(t).Named("client"),
})
require.NoError(t, err)
require.Eventually(t, func() bool {
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.IntervalSlow)
defer cancelFunc()
testutil.Eventually(testutil.Context(t, testutil.WaitShort), t, func(ctx context.Context) bool {
_, _, _, err = conn.Ping(ctx)
return err == nil
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
_ = conn.Close()
replicas, err = secondClient.Replicas(context.Background())
require.NoError(t, err)
+6 -5
View File
@@ -200,15 +200,15 @@ func TestReinitializeAgent(t *testing.T) {
// Wait for prebuilds to create a prebuilt workspace
ctx := testutil.Context(t, testutil.WaitSuperLong)
var prebuildID uuid.UUID
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
require.True(t, testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
agentAndBuild, err := db.GetAuthenticatedWorkspaceAgentAndBuildByAuthToken(ctx, agentToken)
if err != nil {
return false
}
prebuildID = agentAndBuild.WorkspaceBuild.ID
return true
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast))
prebuild := coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, prebuildID)
preset, err := db.GetPresetByWorkspaceBuildID(ctx, prebuildID)
@@ -245,7 +245,8 @@ func TestReinitializeAgent(t *testing.T) {
waiter.WaitFor(coderdtest.AgentsReady)
var matches [][]byte
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitLong)
require.True(t, testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
// THEN the agent script ran again and reused the same agent token
contents, err := os.ReadFile(tempAgentLog.Name())
if err != nil {
@@ -259,7 +260,7 @@ func TestReinitializeAgent(t *testing.T) {
// As such, we expect to have written the agent environment to the temp file twice.
// Once on initial startup and then once on reinitialization.
return len(matches) == 2
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium))
require.Equal(t, matches[0], matches[1])
})
}
+8 -5
View File
@@ -1,6 +1,7 @@
package coderd_test
import (
"context"
"database/sql"
"encoding/json"
"fmt"
@@ -120,8 +121,9 @@ func TestRegions(t *testing.T) {
require.NoError(t, err)
// Wait for the proxy to become healthy.
require.Eventually(t, func() bool {
healthCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
healthCtx, healthCancel := context.WithTimeout(ctx, testutil.WaitLong)
defer healthCancel()
err := api.ProxyHealth.ForceUpdate(healthCtx)
if !assert.NoError(t, err) {
return false
@@ -147,7 +149,7 @@ func TestRegions(t *testing.T) {
}
}
return true
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium)
regions, err := client.Regions(ctx)
require.NoError(t, err)
@@ -590,7 +592,8 @@ func TestProxyRegisterDeregister(t *testing.T) {
// In production, proxies re-register every 30s and
// Kubernetes rolls out gradually, so this is benign.
var registerRes wsproxysdk.RegisterWorkspaceProxyResponse
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
var err error
registerRes, err = proxyClient.RegisterWorkspaceProxy(ctx, wsproxysdk.RegisterWorkspaceProxyRequest{
AccessURL: "https://proxy.coder.test",
@@ -606,7 +609,7 @@ func TestProxyRegisterDeregister(t *testing.T) {
return false
}
return len(registerRes.SiblingReplicas) == i
}, testutil.WaitShort, testutil.IntervalMedium, "expected to register replica %d with %d siblings", i, i)
}, testutil.IntervalMedium, "expected to register replica %d with %d siblings", i, i)
}
})
+11 -8
View File
@@ -2557,7 +2557,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
// Given: reconciliation loop runs and starts prebuilt workspace in failed state
runReconciliationLoop(t, ctx, db, reconciler, presets)
var failedWorkspaceBuilds []database.GetFailedWorkspaceBuildsByTemplateIDRow
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
rows, err := db.GetFailedWorkspaceBuildsByTemplateID(ctx, database.GetFailedWorkspaceBuildsByTemplateIDParams{
TemplateID: template.ID,
})
@@ -2569,7 +2569,7 @@ func TestPrebuildsAutobuild(t *testing.T) {
t.Logf("found %d failed prebuilds so far, want %d", len(failedWorkspaceBuilds), prebuildInstances)
return len(failedWorkspaceBuilds) == int(prebuildInstances)
}, testutil.WaitSuperLong, testutil.IntervalSlow)
}, testutil.IntervalSlow)
require.Len(t, failedWorkspaceBuilds, int(prebuildInstances))
// Given: a failed prebuilt workspace
@@ -3031,9 +3031,10 @@ func TestWorkspaceProvisionerdServerMetrics(t *testing.T) {
"preset_name": presetsPrebuild[0].Name,
"type": "prebuild",
}
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return promhelp.MetricValue(t, reg, "coderd_workspace_creation_duration_seconds", prebuildCreationLabels) != nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
prebuildCreationHistogram := promhelp.HistogramValue(t, reg, "coderd_workspace_creation_duration_seconds", prebuildCreationLabels)
require.Equal(t, uint64(1), prebuildCreationHistogram.GetSampleCount())
@@ -3063,9 +3064,10 @@ func TestWorkspaceProvisionerdServerMetrics(t *testing.T) {
"template_name": templatePrebuild.Name,
"preset_name": presetsPrebuild[0].Name,
}
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return promhelp.MetricValue(t, reg, "coderd_prebuilt_workspace_claim_duration_seconds", prebuildClaimLabels) != nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
prebuildClaimHistogram := promhelp.HistogramValue(t, reg, "coderd_prebuilt_workspace_claim_duration_seconds", prebuildClaimLabels)
require.Equal(t, uint64(1), prebuildClaimHistogram.GetSampleCount())
@@ -3096,9 +3098,10 @@ func TestWorkspaceProvisionerdServerMetrics(t *testing.T) {
"preset_name": presetsNoPrebuild[0].Name,
"type": "regular",
}
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return promhelp.MetricValue(t, reg, "coderd_workspace_creation_duration_seconds", regularWorkspaceLabels) != nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
regularWorkspaceHistogram := promhelp.HistogramValue(t, reg, "coderd_workspace_creation_duration_seconds", regularWorkspaceLabels)
require.Equal(t, uint64(1), regularWorkspaceHistogram.GetSampleCount())
}
+42 -29
View File
@@ -215,7 +215,8 @@ func TestSubscribeRelayReconnectsOnDrop(t *testing.T) {
t.Cleanup(cancel)
// Should get the first relay part.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeMessagePart &&
@@ -227,7 +228,7 @@ func TestSubscribeRelayReconnectsOnDrop(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
// Wait for the reconnect timer to be created after the relay
// drop, then advance the mock clock to fire it immediately.
@@ -236,7 +237,8 @@ func TestSubscribeRelayReconnectsOnDrop(t *testing.T) {
// After the first relay closes, the reconnection should deliver
// the second relay part.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeMessagePart &&
@@ -248,7 +250,7 @@ func TestSubscribeRelayReconnectsOnDrop(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
require.GreaterOrEqual(t, int(callCount.Load()), 2)
}
@@ -334,7 +336,8 @@ func TestSubscribeRelayAsyncDoesNotBlock(t *testing.T) {
// The waiting status event should arrive promptly despite the
// relay still dialing.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
return event.Type == codersdk.ChatStreamEventTypeStatus &&
@@ -343,7 +346,7 @@ func TestSubscribeRelayAsyncDoesNotBlock(t *testing.T) {
default:
return false
}
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
// Unblock the relay dial so the test can clean up.
close(dialContinue)
@@ -419,7 +422,8 @@ func TestSubscribeRelaySnapshotDelivered(t *testing.T) {
// channel by the enterprise SubscribeFn. Collect them along
// with the live part.
var receivedTexts []string
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeMessagePart &&
@@ -431,7 +435,7 @@ func TestSubscribeRelaySnapshotDelivered(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Equal(t, []string{"snap-one", "snap-two", "live-part"}, receivedTexts)
@@ -506,14 +510,15 @@ func TestSubscribeRetryEventAcrossInstances(t *testing.T) {
})
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
fromDB, dbErr := db.GetChatByID(ctx, chat.ID)
if dbErr != nil {
return false
}
return fromDB.Status == database.ChatStatusRunning &&
fromDB.WorkerID.Valid && fromDB.WorkerID.UUID == workerID
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
select {
case <-firstStreamStarted:
@@ -531,7 +536,7 @@ func TestSubscribeRetryEventAcrossInstances(t *testing.T) {
var waitingSeen bool
var waitingBeforeRetry bool
var assistantMessageBeforeRetry bool
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
select {
case event, ok := <-events:
if !ok {
@@ -558,7 +563,7 @@ func TestSubscribeRetryEventAcrossInstances(t *testing.T) {
default:
return false
}
}, testutil.WaitLong, testutil.IntervalFast)
}, testutil.IntervalFast)
require.NotNil(t, retryEvent)
require.Equal(t, 1, retryEvent.Attempt)
@@ -697,7 +702,8 @@ func TestSubscribeRelayStaleDialDiscardedAfterInterrupt(t *testing.T) {
// and emit the status event before publishing the new running
// notification. This avoids time.Sleep (banned by project
// policy) and provides a deterministic sync point.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
return event.Type == codersdk.ChatStreamEventTypeStatus &&
@@ -706,7 +712,7 @@ func TestSubscribeRelayStaleDialDiscardedAfterInterrupt(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
// Now the chat transitions to running on the NEW worker.
_, err = db.UpdateChatStatus(ctx, database.UpdateChatStatusParams{
@@ -730,7 +736,8 @@ func TestSubscribeRelayStaleDialDiscardedAfterInterrupt(t *testing.T) {
close(releaseFirstDial)
// The subscriber should receive parts from the NEW worker, not the stale one.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeMessagePart &&
@@ -748,10 +755,10 @@ func TestSubscribeRelayStaleDialDiscardedAfterInterrupt(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
// Drain the events channel for a while to ensure no late-arriving
// stale part sneaks in after the require.Eventually above returned.
// stale part sneaks in after the testutil.Eventually above returned.
// This closes the timing gap where "stale-part" could arrive after
// "new-worker-part" was already consumed.
require.Never(t, func() bool {
@@ -837,14 +844,15 @@ func TestSubscribeCancelDuringInFlightDial(t *testing.T) {
// The provider context must be canceled, causing the goroutine
// to return cleanly.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case <-dialExited:
return true
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
}
// TestSubscribeRelayRunningToRunningSwitch verifies that when a chat
@@ -943,17 +951,19 @@ func TestSubscribeRelayRunningToRunningSwitch(t *testing.T) {
require.NoError(t, err)
// Verify that the relay canceled workerA's stale dial.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case <-dialAExited:
return true
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
// We should receive the part from workerB.
require.Eventually(t, func() bool {
tCtx = testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeMessagePart &&
@@ -965,7 +975,7 @@ func TestSubscribeRelayRunningToRunningSwitch(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Equal(t, 2, int(callCount.Load()))
}
@@ -1069,7 +1079,8 @@ func TestSubscribeRelayFailedDialRetries(t *testing.T) {
// The merge loop re-checks the DB, sees the chat is still
// running on the remote worker, and dials again. The second
// dial succeeds.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeMessagePart &&
@@ -1081,7 +1092,7 @@ func TestSubscribeRelayFailedDialRetries(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
require.GreaterOrEqual(t, int(callCount.Load()), 2)
}
@@ -1148,7 +1159,8 @@ func TestSubscribeRunningLocalWorkerClosesRelay(t *testing.T) {
t.Cleanup(cancel)
// Consume the remote-part from the initial relay.
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeMessagePart &&
@@ -1160,7 +1172,7 @@ func TestSubscribeRunningLocalWorkerClosesRelay(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
// Notify that the LOCAL worker now owns the chat. This should
// close the relay without opening a new one.
@@ -1258,7 +1270,8 @@ func TestSubscribeRelayMultipleReconnects(t *testing.T) {
// Helper to consume a specific relay part.
consumePart := func(text string) {
t.Helper()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
select {
case event := <-events:
if event.Type == codersdk.ChatStreamEventTypeMessagePart &&
@@ -1270,7 +1283,7 @@ func TestSubscribeRelayMultipleReconnects(t *testing.T) {
default:
return false
}
}, testutil.WaitMedium, testutil.IntervalFast)
}, testutil.IntervalFast)
}
// First relay: consumed immediately (synchronous dial).
+9 -6
View File
@@ -181,9 +181,10 @@ func TestReplica(t *testing.T) {
require.NoError(t, err)
err = pubsub.Publish(replicasync.PubsubEvent, []byte(peer.ID.String()))
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return len(server.Regional()) == 1
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
_ = server.Close()
})
t.Run("DeletesOld", func(t *testing.T) {
@@ -203,9 +204,10 @@ func TestReplica(t *testing.T) {
})
require.NoError(t, err)
defer server.Close()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return len(server.Regional()) == 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("TwentyConcurrent", func(t *testing.T) {
// Ensures that twenty concurrent replicas can spawn and all
@@ -263,9 +265,10 @@ func TestReplica(t *testing.T) {
err = db.DeleteReplicasUpdatedBefore(ctx, dbtime.Now())
require.NoError(t, err)
deleteTime := dbtime.Now()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return server.Self().UpdatedAt.After(deleteTime)
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
+6 -3
View File
@@ -415,9 +415,9 @@ func TestPGCoordinatorUnhealthy(t *testing.T) {
// The querier is informed async about being unhealthy, so we need to wait
// until it is.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return !coordinator.querier.isHealthy()
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
pID := uuid.UUID{5}
_, resps := coordinator.Coordinate(ctx, pID, "test", agpl.AgentCoordinateeAuth{ID: pID})
@@ -431,5 +431,8 @@ func TestPGCoordinatorUnhealthy(t *testing.T) {
// shut down the test.
time.Sleep(testutil.IntervalMedium)
_ = coordinator.Close()
require.Eventually(t, ctrl.Satisfied, testutil.WaitShort, testutil.IntervalFast)
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(_ context.Context) bool {
return ctrl.Satisfied()
}, testutil.IntervalFast)
}
+18 -12
View File
@@ -49,7 +49,8 @@ func TestPGCoordinatorSingle_ClientWithoutAgent(t *testing.T) {
client := agpltest.NewClient(ctx, t, coordinator, "client", agentID)
defer client.Close(ctx)
client.UpdateDERP(10)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
clients, err := store.GetTailnetTunnelPeerBindings(ctx, agentID)
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
t.Fatalf("database error: %v", err)
@@ -62,7 +63,7 @@ func TestPGCoordinatorSingle_ClientWithoutAgent(t *testing.T) {
assert.NoError(t, err)
assert.EqualValues(t, 10, node.PreferredDerp)
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
client.UngracefulDisconnect(ctx)
assertEventuallyLost(ctx, t, store, client.ID)
}
@@ -81,7 +82,8 @@ func TestPGCoordinatorSingle_AgentWithoutClients(t *testing.T) {
agent := agpltest.NewAgent(ctx, t, coordinator, "agent")
defer agent.Close(ctx)
agent.UpdateDERP(10)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
agents, err := store.GetTailnetPeers(ctx, agent.ID)
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
t.Fatalf("database error: %v", err)
@@ -94,7 +96,7 @@ func TestPGCoordinatorSingle_AgentWithoutClients(t *testing.T) {
assert.NoError(t, err)
assert.EqualValues(t, 10, node.PreferredDerp)
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
agent.UngracefulDisconnect(ctx)
assertEventuallyLost(ctx, t, store, agent.ID)
}
@@ -169,7 +171,8 @@ func TestPGCoordinatorSingle_AgentValidIP(t *testing.T) {
},
PreferredDerp: 10,
})
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
agents, err := store.GetTailnetPeers(ctx, agent.ID)
if err != nil && !xerrors.Is(err, sql.ErrNoRows) {
t.Fatalf("database error: %v", err)
@@ -182,7 +185,7 @@ func TestPGCoordinatorSingle_AgentValidIP(t *testing.T) {
assert.NoError(t, err)
assert.EqualValues(t, 10, node.PreferredDerp)
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
agent.UngracefulDisconnect(ctx)
assertEventuallyLost(ctx, t, store, agent.ID)
}
@@ -385,7 +388,8 @@ func TestPGCoordinatorSingle_SendsHeartbeats(t *testing.T) {
require.NoError(t, err)
defer coordinator.Close()
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitMedium)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
mu.Lock()
defer mu.Unlock()
if len(heartbeats) < 2 {
@@ -394,7 +398,7 @@ func TestPGCoordinatorSingle_SendsHeartbeats(t *testing.T) {
assert.Greater(t, heartbeats[0].Sub(start), time.Duration(0))
assert.Greater(t, heartbeats[1].Sub(start), time.Duration(0))
return assert.Greater(t, heartbeats[1].Sub(heartbeats[0]), tailnet.HeartbeatPeriod*3/4)
}, testutil.WaitMedium, testutil.IntervalMedium)
}, testutil.IntervalMedium)
}
// TestPGCoordinatorDual_Mainline tests with 2 coordinators, one agent connected to each, and 2 clients per agent.
@@ -909,7 +913,8 @@ func TestPGCoordinatorPropogatedPeerContext(t *testing.T) {
func assertEventuallyStatus(ctx context.Context, t *testing.T, store database.Store, agentID uuid.UUID, status database.TailnetStatus) {
t.Helper()
assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
peers, err := store.GetTailnetPeers(ctx, agentID)
if xerrors.Is(err, sql.ErrNoRows) {
return false
@@ -923,7 +928,7 @@ func assertEventuallyStatus(ctx context.Context, t *testing.T, store database.St
}
}
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
func assertEventuallyLost(ctx context.Context, t *testing.T, store database.Store, agentID uuid.UUID) {
@@ -933,7 +938,8 @@ func assertEventuallyLost(ctx context.Context, t *testing.T, store database.Stor
func assertEventuallyNoClientsForAgent(ctx context.Context, t *testing.T, store database.Store, agentID uuid.UUID) {
t.Helper()
assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
clients, err := store.GetTailnetTunnelPeerIDs(ctx, agentID)
if xerrors.Is(err, sql.ErrNoRows) {
return true
@@ -942,7 +948,7 @@ func assertEventuallyNoClientsForAgent(ctx context.Context, t *testing.T, store
t.Fatal(err)
}
return len(clients) == 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
}
type fakeCoordinator struct {
+7 -7
View File
@@ -143,7 +143,7 @@ func TestDERP(t *testing.T) {
require.NoError(t, err)
// Wait for all three running proxies to become healthy.
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
err := api.ProxyHealth.ForceUpdate(ctx)
if !assert.NoError(t, err) {
return false
@@ -167,7 +167,7 @@ func TestDERP(t *testing.T) {
// The last region should never be healthy.
assert.False(t, regions[4].Healthy)
return true
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// Create a workspace + apps
authToken := uuid.NewString()
@@ -360,7 +360,7 @@ func TestDERPEndToEnd(t *testing.T) {
// Wait for the proxy to become healthy.
ctx := testutil.Context(t, testutil.WaitLong)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
err := api.ProxyHealth.ForceUpdate(ctx)
if !assert.NoError(t, err) {
return false
@@ -379,13 +379,13 @@ func TestDERPEndToEnd(t *testing.T) {
}
}
return true
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium)
// Wait until the proxy appears in the DERP map, and then swap out the DERP
// map for one that only contains the proxy region. This allows us to force
// the agent to pick the proxy as its preferred region.
var proxyOnlyDERPMap *tailcfg.DERPMap
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
derpMap := api.AGPL.DERPMap()
if derpMap == nil {
return false
@@ -401,7 +401,7 @@ func TestDERPEndToEnd(t *testing.T) {
}
proxyOnlyDERPMap.OmitDefaultRegions = true
return true
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium)
newDERPMapper := func(_ *tailcfg.DERPMap) *tailcfg.DERPMap {
return proxyOnlyDERPMap
}
@@ -1177,7 +1177,7 @@ func createProxyReplicas(ctx context.Context, t *testing.T, opts *createProxyRep
// Ensure that all proxies have pinged successfully. If replicas haven't
// successfully pinged yet, force them to re-register again. We don't
// use require.Eventually here because it runs the condition function in
// use testutil.Eventually here because it runs the condition function in
// a goroutine.
ticker := time.NewTicker(testutil.IntervalSlow)
defer ticker.Stop()
+3 -2
View File
@@ -540,7 +540,8 @@ func goEventuallyStartFakeAgent(ctx context.Context, t *testing.T, client *coder
go func() {
defer close(ch)
var workspace codersdk.Workspace
if !assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
if !testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
res, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{})
if err != nil {
return false
@@ -550,7 +551,7 @@ func goEventuallyStartFakeAgent(ctx context.Context, t *testing.T, client *coder
return true
}
return false
}, testutil.WaitShort, testutil.IntervalMedium) {
}, testutil.IntervalMedium) {
return
}
+3 -2
View File
@@ -285,9 +285,10 @@ func setupRunnerTest(t *testing.T) (client *codersdk.Client, agentID uuid.UUID)
_ = agenttest.New(t, client.URL, authToken)
resources := coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
t.Log("agent id", resources[0].Agents[0].ID)
return (*api.TailnetCoordinator.Load()).Node(resources[0].Agents[0].ID) != nil
}, testutil.WaitLong, testutil.IntervalMedium, "agent never connected")
}, testutil.IntervalMedium, "agent never connected")
return client, resources[0].Agents[0].ID
}
+12 -8
View File
@@ -54,9 +54,10 @@ func TestServer_SendAndReceiveEmail(t *testing.T) {
err = sendTestEmail(srv.SMTPAddress(), "test@example.com", "Test Subject", "Test Body")
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return srv.MessageCount() == 1
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
url := fmt.Sprintf("%s/messages", srv.APIAddress())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
@@ -95,9 +96,10 @@ func TestServer_FilterByEmail(t *testing.T) {
err = sendTestEmail(srv.SMTPAddress(), "test-user@coder.com", "Email for test-user", "Body 2")
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return srv.MessageCount() == 2
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
url := fmt.Sprintf("%s/messages?email=admin@coder.com", srv.APIAddress())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
@@ -134,9 +136,10 @@ func TestServer_NotificationTemplateID(t *testing.T) {
err = sendTestEmail(srv.SMTPAddress(), "test-user@coder.com", "Notification", body)
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return srv.MessageCount() == 1
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
url := fmt.Sprintf("%s/messages", srv.APIAddress())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
@@ -170,9 +173,10 @@ func TestServer_Purge(t *testing.T) {
err = sendTestEmail(srv.SMTPAddress(), "test-user@coder.com", "Test", "Body")
require.NoError(t, err)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
return srv.MessageCount() == 1
}, testutil.WaitShort, testutil.IntervalMedium)
}, testutil.IntervalMedium)
url := fmt.Sprintf("%s/purge", srv.APIAddress())
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
+3 -3
View File
@@ -8,7 +8,6 @@ import (
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cdr.dev/slog/v3"
@@ -117,7 +116,8 @@ func Test_Runner(t *testing.T) {
// finish, then start the agents.
go func() {
var workspace codersdk.Workspace
if !assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
if !testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
res, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
Owner: codersdk.Me,
})
@@ -129,7 +129,7 @@ func Test_Runner(t *testing.T) {
return true
}
return false
}, testutil.WaitShort, testutil.IntervalMedium) {
}, testutil.IntervalMedium) {
return
}
+9 -6
View File
@@ -123,14 +123,15 @@ func TestRun(t *testing.T) {
go func() {
defer close(gotMetrics)
// Wait until we get some non-zero metrics before canceling.
assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
readLatencies := readMetrics.Latencies()
writeLatencies := writeMetrics.Latencies()
return len(readLatencies) > 0 &&
len(writeLatencies) > 0 &&
slices.ContainsFunc(readLatencies, func(f float64) bool { return f > 0.0 }) &&
slices.ContainsFunc(writeLatencies, func(f float64) bool { return f > 0.0 })
}, testutil.WaitLong, testutil.IntervalMedium, "expected non-zero metrics")
}, testutil.IntervalMedium, "expected non-zero metrics")
}()
// Stop the test after we get some non-zero metrics.
@@ -243,14 +244,15 @@ func TestRun(t *testing.T) {
go func() {
defer close(gotMetrics)
// Wait until we get some non-zero metrics before canceling.
assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
readLatencies := readMetrics.Latencies()
writeLatencies := writeMetrics.Latencies()
return len(readLatencies) > 0 &&
len(writeLatencies) > 0 &&
slices.ContainsFunc(readLatencies, func(f float64) bool { return f > 0.0 }) &&
slices.ContainsFunc(writeLatencies, func(f float64) bool { return f > 0.0 })
}, testutil.WaitLong, testutil.IntervalMedium, "expected non-zero metrics")
}, testutil.IntervalMedium, "expected non-zero metrics")
}()
// Stop the test after we get some non-zero metrics.
@@ -343,14 +345,15 @@ func TestRun(t *testing.T) {
go func() {
defer close(gotMetrics)
// Wait until we get some non-zero metrics before canceling.
assert.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
readLatencies := readMetrics.Latencies()
writeLatencies := writeMetrics.Latencies()
return len(readLatencies) > 0 &&
len(writeLatencies) > 0 &&
slices.ContainsFunc(readLatencies, func(f float64) bool { return f > 0.0 }) &&
slices.ContainsFunc(writeLatencies, func(f float64) bool { return f > 0.0 })
}, testutil.WaitLong, testutil.IntervalMedium, "expected non-zero metrics")
}, testutil.IntervalMedium, "expected non-zero metrics")
}()
// Stop the test after we get some non-zero metrics.
+20 -24
View File
@@ -210,42 +210,38 @@ func doNotCallTFailNowInsideGoroutine(m dsl.Matcher) {
Report("Do not call functions that may call t.FailNow in a goroutine, as this can cause data races (see testing.go:834)")
}
// useTestutilEventually ensures that tests use the context-aware
// testutil.Eventually helper instead of require.Eventually or
// assert.Eventually. testutil.Eventually runs the condition inline
// (not in a goroutine), accepts a context for cancellation, and
// passes that context to the condition function.
//
//nolint:unused,deadcode,varnamelen
func useTestutilEventually(m dsl.Matcher) {
m.Import("github.com/stretchr/testify/require")
m.Import("github.com/stretchr/testify/assert")
m.Match(
`require.Eventually($*_)`,
`require.Eventuallyf($*_)`,
`assert.Eventually($*_)`,
`assert.Eventuallyf($*_)`,
).
Report("Use testutil.Eventually instead of require/assert.Eventually. testutil.Eventually is context-aware and does not run the condition in a goroutine.")
}
// useStandardTimeoutsAndDelaysInTests ensures all tests use common
// constants for timeouts and delays in usual scenarios, this allows us
// to tweak them based on platform (important to avoid CI flakes).
//
//nolint:unused,deadcode,varnamelen
func useStandardTimeoutsAndDelaysInTests(m dsl.Matcher) {
m.Import("github.com/stretchr/testify/require")
m.Import("github.com/stretchr/testify/assert")
m.Import("github.com/coder/coder/v2/testutil")
m.Match(`context.WithTimeout($ctx, $duration)`).
Where(m.File().Imports("testing") && !m.File().PkgPath.Matches("testutil$") && !m["duration"].Text.Matches("^testutil\\.")).
At(m["duration"]).
Report("Do not use magic numbers in test timeouts and delays. Use the standard testutil.Wait* or testutil.Interval* constants instead.")
m.Match(`
$testify.$Eventually($t, func() bool {
$*_
}, $timeout, $interval, $*_)
`).
Where((m["testify"].Text == "require" || m["testify"].Text == "assert") &&
(m["Eventually"].Text == "Eventually" || m["Eventually"].Text == "Eventuallyf") &&
!m["timeout"].Text.Matches("^testutil\\.")).
At(m["timeout"]).
Report("Do not use magic numbers in test timeouts and delays. Use the standard testutil.Wait* or testutil.Interval* constants instead.")
m.Match(`
$testify.$Eventually($t, func() bool {
$*_
}, $timeout, $interval, $*_)
`).
Where((m["testify"].Text == "require" || m["testify"].Text == "assert") &&
(m["Eventually"].Text == "Eventually" || m["Eventually"].Text == "Eventuallyf") &&
!m["interval"].Text.Matches("^testutil\\.")).
At(m["interval"]).
Report("Do not use magic numbers in test timeouts and delays. Use the standard testutil.Wait* or testutil.Interval* constants instead.")
}
// InTx checks to ensure the database used inside the transaction closure is the transaction
+6 -6
View File
@@ -167,7 +167,6 @@ func TestTailnet(t *testing.T) {
t.Run("PingDirect", func(t *testing.T) {
t.Parallel()
logger := testutil.Logger(t)
ctx := testutil.Context(t, testutil.WaitLong)
w1IP := tailnet.TailscaleServicePrefix.RandomAddr()
w1, err := tailnet.NewConn(&tailnet.Options{
Addresses: []netip.Prefix{netip.PrefixFrom(w1IP, 128)},
@@ -190,7 +189,8 @@ func TestTailnet(t *testing.T) {
stitch(t, w1, w2)
require.True(t, w2.AwaitReachable(context.Background(), w1IP))
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
_, direct, pong, err := w2.Ping(ctx, w1IP)
if err != nil {
t.Logf("ping error: %s", err.Error())
@@ -201,13 +201,12 @@ func TestTailnet(t *testing.T) {
return false
}
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("PingDERPOnly", func(t *testing.T) {
t.Parallel()
logger := testutil.Logger(t)
ctx := testutil.Context(t, testutil.WaitLong)
w1IP := tailnet.TailscaleServicePrefix.RandomAddr()
w1, err := tailnet.NewConn(&tailnet.Options{
Addresses: []netip.Prefix{netip.PrefixFrom(w1IP, 128)},
@@ -232,7 +231,8 @@ func TestTailnet(t *testing.T) {
stitch(t, w1, w2)
require.True(t, w2.AwaitReachable(context.Background(), w1IP))
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitShort)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
_, direct, pong, err := w2.Ping(ctx, w1IP)
if err != nil {
t.Logf("ping error: %s", err.Error())
@@ -243,7 +243,7 @@ func TestTailnet(t *testing.T) {
return false
}
return true
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
}
+2 -2
View File
@@ -520,11 +520,11 @@ func coordinationTest(
},
}
testutil.RequireSend(ctx, t, resps, &proto.CoordinateResponse{PeerUpdates: updates})
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
fConn.Lock()
defer fConn.Unlock()
return len(fConn.updates) > 0
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
require.Len(t, fConn.updates[0], 1)
require.Equal(t, agentID[:], fConn.updates[0][0].Id)
+8 -8
View File
@@ -34,9 +34,9 @@ func TestCoordinator(t *testing.T) {
Addresses: []string{tailnet.TailscaleServicePrefix.RandomPrefix().String()},
PreferredDerp: 10,
})
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return coordinator.Node(client.ID) != nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("ClientWithoutAgent_InvalidIPBits", func(t *testing.T) {
@@ -80,9 +80,9 @@ func TestCoordinator(t *testing.T) {
},
PreferredDerp: 10,
})
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return coordinator.Node(agent.ID) != nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
})
t.Run("AgentWithoutClients_InvalidIP", func(t *testing.T) {
@@ -141,9 +141,9 @@ func TestCoordinator(t *testing.T) {
agent := test.NewAgent(ctx, t, coordinator, "agent")
defer agent.Close(ctx)
agent.UpdateDERP(1)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return coordinator.Node(agent.ID) != nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
client := test.NewClient(ctx, t, coordinator, "client", agent.ID)
defer client.Close(ctx)
@@ -176,9 +176,9 @@ func TestCoordinator(t *testing.T) {
agent1 := test.NewPeer(ctx, t, coordinator, "agent1", test.WithID(agentID))
defer agent1.Close(ctx)
agent1.UpdateDERP(1)
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
return coordinator.Node(agentID) != nil
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
client := test.NewPeer(ctx, t, coordinator, "client")
defer client.Close(ctx)
+3 -2
View File
@@ -1,6 +1,7 @@
package tailnet
import (
"context"
"net/netip"
"slices"
"testing"
@@ -595,7 +596,7 @@ func TestNodeUpdater_fillPeerDiagnostics(t *testing.T) {
// after node callback, we should get the derp and SentNode is true.
// Use eventually since, there is a race between the callback completing
// and the test checking
require.Eventually(t, func() bool {
testutil.Eventually(ctx, t, func(ctx context.Context) bool {
d := PeerDiagnostics{}
uut.fillPeerDiagnostics(&d)
// preferred DERP should be set right away, even if the callback is not
@@ -604,7 +605,7 @@ func TestNodeUpdater_fillPeerDiagnostics(t *testing.T) {
return false
}
return d.SentNode
}, testutil.WaitShort, testutil.IntervalFast)
}, testutil.IntervalFast)
done := make(chan struct{})
go func() {
+6 -4
View File
@@ -427,10 +427,12 @@ func (b BasicClientStarter) StartClient(t *testing.T, logger slog.Logger, server
if b.WaitForConnection || b.WaitForDirect {
// Wait for connection to be established.
peerIP := tailnet.TailscaleServicePrefix.AddrFromUUID(peer.ID)
require.Eventually(t, func() bool {
tCtx := testutil.Context(t, testutil.WaitLong)
testutil.Eventually(tCtx, t, func(ctx context.Context) bool {
t.Log("attempting ping to peer to judge direct connection")
ctx := testutil.Context(t, testutil.WaitShort)
_, p2p, pong, err := conn.Ping(ctx, peerIP)
pingCtx, pingCancel := context.WithTimeout(ctx, testutil.WaitShort)
defer pingCancel()
_, p2p, pong, err := conn.Ping(pingCtx, peerIP)
if err != nil {
t.Logf("ping failed: %v", err)
return false
@@ -441,7 +443,7 @@ func (b BasicClientStarter) StartClient(t *testing.T, logger slog.Logger, server
}
t.Logf("ping succeeded, p2p=%t, endpoint=%s", p2p, pong.Endpoint)
return true
}, testutil.WaitLong, testutil.IntervalMedium)
}, testutil.IntervalMedium)
}
return conn
+2 -2
View File
@@ -5,7 +5,7 @@ import (
)
// Constants for timing out operations, usable for creating contexts
// that timeout or in require.Eventually.
// that timeout or in testutil.Eventually.
const (
WaitShort = 10 * time.Second
WaitMedium = 15 * time.Second
@@ -14,7 +14,7 @@ const (
)
// Constants for delaying repeated operations, e.g. in
// require.Eventually.
// testutil.Eventually.
const (
IntervalFast = 25 * time.Millisecond
IntervalMedium = 250 * time.Millisecond