Compare commits
27 Commits
v2.30.2
...
release/2.24
| Author | SHA1 | Date | |
|---|---|---|---|
| ecc6d536ba | |||
| d7c483344f | |||
| 20d67d7d71 | |||
| dd62ec494a | |||
| 7541787112 | |||
| b1e8f0a7fc | |||
| 1be409cf2e | |||
| 1e284fe835 | |||
| c219a9a748 | |||
| 5ff749617b | |||
| 7f6cefdb56 | |||
| 9df4992076 | |||
| bc502b52f5 | |||
| d6b2ca1c3b | |||
| 3e0645cdeb | |||
| 0ead64f264 | |||
| 51e60b74d1 | |||
| 33885afbff | |||
| 3c602b0e29 | |||
| 5096582dda | |||
| d027a3f51b | |||
| f97bd76bb5 | |||
| 5059c23b43 | |||
| e5a74a775d | |||
| de494d0a49 | |||
| 774792476c | |||
| 4a61bbeae4 |
@@ -4,7 +4,7 @@ description: |
|
||||
inputs:
|
||||
version:
|
||||
description: "The Go version to use."
|
||||
default: "1.24.4"
|
||||
default: "1.24.6"
|
||||
use-preinstalled-go:
|
||||
description: "Whether to use preinstalled Go."
|
||||
default: "false"
|
||||
|
||||
@@ -256,8 +256,8 @@ jobs:
|
||||
pushd /tmp/proto
|
||||
curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v23.4/protoc-23.4-linux-x86_64.zip
|
||||
unzip protoc.zip
|
||||
cp -r ./bin/* /usr/local/bin
|
||||
cp -r ./include /usr/local/bin/include
|
||||
sudo cp -r ./bin/* /usr/local/bin
|
||||
sudo cp -r ./include /usr/local/bin/include
|
||||
popd
|
||||
|
||||
- name: make gen
|
||||
@@ -428,6 +428,11 @@ jobs:
|
||||
- name: Disable Spotlight Indexing
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
enabled=$(sudo mdutil -a -s | grep "Indexing enabled" | wc -l)
|
||||
if [ $enabled -eq 0 ]; then
|
||||
echo "Spotlight indexing is already disabled"
|
||||
exit 0
|
||||
fi
|
||||
sudo mdutil -a -i off
|
||||
sudo mdutil -X /
|
||||
sudo launchctl bootout system /System/Library/LaunchDaemons/com.apple.metadata.mds.plist
|
||||
@@ -983,8 +988,8 @@ jobs:
|
||||
pushd /tmp/proto
|
||||
curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v23.4/protoc-23.4-linux-x86_64.zip
|
||||
unzip protoc.zip
|
||||
cp -r ./bin/* /usr/local/bin
|
||||
cp -r ./include /usr/local/bin/include
|
||||
sudo cp -r ./bin/* /usr/local/bin
|
||||
sudo cp -r ./include /usr/local/bin/include
|
||||
popd
|
||||
|
||||
- name: Setup Go
|
||||
@@ -1082,7 +1087,7 @@ jobs:
|
||||
- name: Switch XCode Version
|
||||
uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
|
||||
with:
|
||||
xcode-version: "16.0.0"
|
||||
xcode-version: "16.1.0"
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
@@ -1220,8 +1225,8 @@ jobs:
|
||||
id: gcloud_auth
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_CODE_SIGNING_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
|
||||
workload_identity_provider: ${{ vars.GCP_CODE_SIGNING_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
|
||||
token_format: "access_token"
|
||||
|
||||
- name: Setup GCloud SDK
|
||||
@@ -1259,6 +1264,8 @@ jobs:
|
||||
# do (see above).
|
||||
CODER_SIGN_WINDOWS: "1"
|
||||
CODER_WINDOWS_RESOURCES: "1"
|
||||
CODER_SIGN_GPG: "1"
|
||||
CODER_GPG_RELEASE_KEY_BASE64: ${{ secrets.GPG_RELEASE_KEY_BASE64 }}
|
||||
EV_KEY: ${{ secrets.EV_KEY }}
|
||||
EV_KEYSTORE: ${{ secrets.EV_KEYSTORE }}
|
||||
EV_TSA_URL: ${{ secrets.EV_TSA_URL }}
|
||||
@@ -1519,8 +1526,8 @@ jobs:
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
workload_identity_provider: ${{ vars.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: Set up Google Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4
|
||||
|
||||
@@ -35,7 +35,11 @@ jobs:
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Setup Nix
|
||||
uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
|
||||
uses: nixbuild/nix-quick-install-action@63ca48f939ee3b8d835f4126562537df0fee5b91 # v32
|
||||
with:
|
||||
# Pinning to 2.28 here, as Nix gets a "error: [json.exception.type_error.302] type must be array, but is string"
|
||||
# on version 2.29 and above.
|
||||
nix_version: "2.28.4"
|
||||
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
@@ -127,8 +131,8 @@ jobs:
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
workload_identity_provider: ${{ vars.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: Terraform init and validate
|
||||
run: |
|
||||
|
||||
@@ -420,7 +420,7 @@ jobs:
|
||||
curl -fsSL "$URL" -o "${DEST}"
|
||||
chmod +x "${DEST}"
|
||||
"${DEST}" version
|
||||
mv "${DEST}" /usr/local/bin/coder
|
||||
sudo mv "${DEST}" /usr/local/bin/coder
|
||||
|
||||
- name: Create first user
|
||||
if: needs.get_info.outputs.NEW == 'true' || github.event.inputs.deploy == 'true'
|
||||
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
- name: Switch XCode Version
|
||||
uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
|
||||
with:
|
||||
xcode-version: "16.0.0"
|
||||
xcode-version: "16.1.0"
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
@@ -288,8 +288,8 @@ jobs:
|
||||
id: gcloud_auth
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_CODE_SIGNING_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
|
||||
workload_identity_provider: ${{ vars.GCP_CODE_SIGNING_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
|
||||
token_format: "access_token"
|
||||
|
||||
- name: Setup GCloud SDK
|
||||
@@ -323,6 +323,8 @@ jobs:
|
||||
env:
|
||||
CODER_SIGN_WINDOWS: "1"
|
||||
CODER_SIGN_DARWIN: "1"
|
||||
CODER_SIGN_GPG: "1"
|
||||
CODER_GPG_RELEASE_KEY_BASE64: ${{ secrets.GPG_RELEASE_KEY_BASE64 }}
|
||||
CODER_WINDOWS_RESOURCES: "1"
|
||||
AC_CERTIFICATE_FILE: /tmp/apple_cert.p12
|
||||
AC_CERTIFICATE_PASSWORD_FILE: /tmp/apple_cert_password.txt
|
||||
@@ -632,6 +634,30 @@ jobs:
|
||||
- name: ls build
|
||||
run: ls -lh build
|
||||
|
||||
- name: Publish Coder CLI binaries and detached signatures to GCS
|
||||
if: ${{ !inputs.dry_run && github.ref == 'refs/heads/main' && github.repository_owner == 'coder'}}
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
|
||||
version="$(./scripts/version.sh)"
|
||||
|
||||
# Source array of slim binaries
|
||||
declare -A binaries
|
||||
binaries["coder-darwin-amd64"]="coder-slim_${version}_darwin_amd64"
|
||||
binaries["coder-darwin-arm64"]="coder-slim_${version}_darwin_arm64"
|
||||
binaries["coder-linux-amd64"]="coder-slim_${version}_linux_amd64"
|
||||
binaries["coder-linux-arm64"]="coder-slim_${version}_linux_arm64"
|
||||
binaries["coder-linux-armv7"]="coder-slim_${version}_linux_armv7"
|
||||
binaries["coder-windows-amd64.exe"]="coder-slim_${version}_windows_amd64.exe"
|
||||
binaries["coder-windows-arm64.exe"]="coder-slim_${version}_windows_arm64.exe"
|
||||
|
||||
for cli_name in "${!binaries[@]}"; do
|
||||
slim_binary="${binaries[$cli_name]}"
|
||||
detached_signature="${slim_binary}.asc"
|
||||
gcloud storage cp "./build/${slim_binary}" "gs://releases.coder.com/coder-cli/${version}/${cli_name}"
|
||||
gcloud storage cp "./build/${detached_signature}" "gs://releases.coder.com/coder-cli/${version}/${cli_name}.asc"
|
||||
done
|
||||
|
||||
- name: Publish release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
@@ -673,8 +699,8 @@ jobs:
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
workload_identity_provider: ${{ vars.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: Setup GCloud SDK
|
||||
uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # 2.1.4
|
||||
|
||||
@@ -252,6 +252,10 @@ $(CODER_ALL_BINARIES): go.mod go.sum \
|
||||
fi
|
||||
|
||||
cp "$@" "./site/out/bin/coder-$$os-$$arch$$dot_ext"
|
||||
|
||||
if [[ "$${CODER_SIGN_GPG:-0}" == "1" ]]; then
|
||||
cp "$@.asc" "./site/out/bin/coder-$$os-$$arch$$dot_ext.asc"
|
||||
fi
|
||||
fi
|
||||
|
||||
# This task builds Coder Desktop dylibs
|
||||
|
||||
+92
-28
@@ -91,6 +91,7 @@ type Options struct {
|
||||
Execer agentexec.Execer
|
||||
Devcontainers bool
|
||||
DevcontainerAPIOptions []agentcontainers.Option // Enable Devcontainers for these to be effective.
|
||||
Clock quartz.Clock
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
@@ -144,6 +145,9 @@ func New(options Options) Agent {
|
||||
if options.PortCacheDuration == 0 {
|
||||
options.PortCacheDuration = 1 * time.Second
|
||||
}
|
||||
if options.Clock == nil {
|
||||
options.Clock = quartz.NewReal()
|
||||
}
|
||||
|
||||
prometheusRegistry := options.PrometheusRegistry
|
||||
if prometheusRegistry == nil {
|
||||
@@ -157,6 +161,7 @@ func New(options Options) Agent {
|
||||
hardCtx, hardCancel := context.WithCancel(context.Background())
|
||||
gracefulCtx, gracefulCancel := context.WithCancel(hardCtx)
|
||||
a := &agent{
|
||||
clock: options.Clock,
|
||||
tailnetListenPort: options.TailnetListenPort,
|
||||
reconnectingPTYTimeout: options.ReconnectingPTYTimeout,
|
||||
logger: options.Logger,
|
||||
@@ -204,6 +209,7 @@ func New(options Options) Agent {
|
||||
}
|
||||
|
||||
type agent struct {
|
||||
clock quartz.Clock
|
||||
logger slog.Logger
|
||||
client Client
|
||||
exchangeToken func(ctx context.Context) (string, error)
|
||||
@@ -273,7 +279,7 @@ type agent struct {
|
||||
|
||||
devcontainers bool
|
||||
containerAPIOptions []agentcontainers.Option
|
||||
containerAPI atomic.Pointer[agentcontainers.API] // Set by apiHandler.
|
||||
containerAPI *agentcontainers.API
|
||||
}
|
||||
|
||||
func (a *agent) TailnetConn() *tailnet.Conn {
|
||||
@@ -330,6 +336,19 @@ func (a *agent) init() {
|
||||
// will not report anywhere.
|
||||
a.scriptRunner.RegisterMetrics(a.prometheusRegistry)
|
||||
|
||||
if a.devcontainers {
|
||||
containerAPIOpts := []agentcontainers.Option{
|
||||
agentcontainers.WithExecer(a.execer),
|
||||
agentcontainers.WithCommandEnv(a.sshServer.CommandEnv),
|
||||
agentcontainers.WithScriptLogger(func(logSourceID uuid.UUID) agentcontainers.ScriptLogger {
|
||||
return a.logSender.GetScriptLogger(logSourceID)
|
||||
}),
|
||||
}
|
||||
containerAPIOpts = append(containerAPIOpts, a.containerAPIOptions...)
|
||||
|
||||
a.containerAPI = agentcontainers.NewAPI(a.logger.Named("containers"), containerAPIOpts...)
|
||||
}
|
||||
|
||||
a.reconnectingPTYServer = reconnectingpty.NewServer(
|
||||
a.logger.Named("reconnecting-pty"),
|
||||
a.sshServer,
|
||||
@@ -1141,17 +1160,27 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
}
|
||||
|
||||
var (
|
||||
scripts = manifest.Scripts
|
||||
scriptRunnerOpts []agentscripts.InitOption
|
||||
scripts = manifest.Scripts
|
||||
devcontainerScripts map[uuid.UUID]codersdk.WorkspaceAgentScript
|
||||
)
|
||||
if a.devcontainers {
|
||||
var dcScripts []codersdk.WorkspaceAgentScript
|
||||
scripts, dcScripts = agentcontainers.ExtractAndInitializeDevcontainerScripts(manifest.Devcontainers, scripts)
|
||||
// See ExtractAndInitializeDevcontainerScripts for motivation
|
||||
// behind running dcScripts as post start scripts.
|
||||
scriptRunnerOpts = append(scriptRunnerOpts, agentscripts.WithPostStartScripts(dcScripts...))
|
||||
if a.containerAPI != nil {
|
||||
// Init the container API with the manifest and client so that
|
||||
// we can start accepting requests. The final start of the API
|
||||
// happens after the startup scripts have been executed to
|
||||
// ensure the presence of required tools. This means we can
|
||||
// return existing devcontainers but actual container detection
|
||||
// and creation will be deferred.
|
||||
a.containerAPI.Init(
|
||||
agentcontainers.WithManifestInfo(manifest.OwnerName, manifest.WorkspaceName, manifest.AgentName),
|
||||
agentcontainers.WithDevcontainers(manifest.Devcontainers, manifest.Scripts),
|
||||
agentcontainers.WithSubAgentClient(agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI)),
|
||||
)
|
||||
|
||||
// Since devcontainer are enabled, remove devcontainer scripts
|
||||
// from the main scripts list to avoid showing an error.
|
||||
scripts, devcontainerScripts = agentcontainers.ExtractDevcontainerScripts(manifest.Devcontainers, scripts)
|
||||
}
|
||||
err = a.scriptRunner.Init(scripts, aAPI.ScriptCompleted, scriptRunnerOpts...)
|
||||
err = a.scriptRunner.Init(scripts, aAPI.ScriptCompleted)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("init script runner: %w", err)
|
||||
}
|
||||
@@ -1168,7 +1197,18 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
// finished (both start and post start). For instance, an
|
||||
// autostarted devcontainer will be included in this time.
|
||||
err := a.scriptRunner.Execute(a.gracefulCtx, agentscripts.ExecuteStartScripts)
|
||||
err = errors.Join(err, a.scriptRunner.Execute(a.gracefulCtx, agentscripts.ExecutePostStartScripts))
|
||||
|
||||
if a.containerAPI != nil {
|
||||
// Start the container API after the startup scripts have
|
||||
// been executed to ensure that the required tools can be
|
||||
// installed.
|
||||
a.containerAPI.Start()
|
||||
for _, dc := range manifest.Devcontainers {
|
||||
cErr := a.createDevcontainer(ctx, aAPI, dc, devcontainerScripts[dc.ID])
|
||||
err = errors.Join(err, cErr)
|
||||
}
|
||||
}
|
||||
|
||||
dur := time.Since(start).Seconds()
|
||||
if err != nil {
|
||||
a.logger.Warn(ctx, "startup script(s) failed", slog.Error(err))
|
||||
@@ -1187,14 +1227,6 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
}
|
||||
a.metrics.startupScriptSeconds.WithLabelValues(label).Set(dur)
|
||||
a.scriptRunner.StartCron()
|
||||
|
||||
// If the container API is enabled, trigger an immediate refresh
|
||||
// for quick sub agent injection.
|
||||
if cAPI := a.containerAPI.Load(); cAPI != nil {
|
||||
if err := cAPI.RefreshContainers(ctx); err != nil {
|
||||
a.logger.Error(ctx, "failed to refresh containers", slog.Error(err))
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("track conn goroutine: %w", err)
|
||||
@@ -1204,6 +1236,38 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *agent) createDevcontainer(
|
||||
ctx context.Context,
|
||||
aAPI proto.DRPCAgentClient26,
|
||||
dc codersdk.WorkspaceAgentDevcontainer,
|
||||
script codersdk.WorkspaceAgentScript,
|
||||
) (err error) {
|
||||
var (
|
||||
exitCode = int32(0)
|
||||
startTime = a.clock.Now()
|
||||
status = proto.Timing_OK
|
||||
)
|
||||
if err = a.containerAPI.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath); err != nil {
|
||||
exitCode = 1
|
||||
status = proto.Timing_EXIT_FAILURE
|
||||
}
|
||||
endTime := a.clock.Now()
|
||||
|
||||
if _, scriptErr := aAPI.ScriptCompleted(ctx, &proto.WorkspaceAgentScriptCompletedRequest{
|
||||
Timing: &proto.Timing{
|
||||
ScriptId: script.ID[:],
|
||||
Start: timestamppb.New(startTime),
|
||||
End: timestamppb.New(endTime),
|
||||
ExitCode: exitCode,
|
||||
Stage: proto.Timing_START,
|
||||
Status: status,
|
||||
},
|
||||
}); scriptErr != nil {
|
||||
a.logger.Warn(ctx, "reporting script completed failed", slog.Error(scriptErr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createOrUpdateNetwork waits for the manifest to be set using manifestOK, then creates or updates
|
||||
// the tailnet using the information in the manifest
|
||||
func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(context.Context, proto.DRPCAgentClient26) error {
|
||||
@@ -1227,7 +1291,6 @@ func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(co
|
||||
// agent API.
|
||||
network, err = a.createTailnet(
|
||||
a.gracefulCtx,
|
||||
aAPI,
|
||||
manifest.AgentID,
|
||||
manifest.DERPMap,
|
||||
manifest.DERPForceWebSockets,
|
||||
@@ -1262,9 +1325,9 @@ func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(co
|
||||
network.SetBlockEndpoints(manifest.DisableDirectConnections)
|
||||
|
||||
// Update the subagent client if the container API is available.
|
||||
if cAPI := a.containerAPI.Load(); cAPI != nil {
|
||||
if a.containerAPI != nil {
|
||||
client := agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI)
|
||||
cAPI.UpdateSubAgentClient(client)
|
||||
a.containerAPI.UpdateSubAgentClient(client)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1382,7 +1445,6 @@ func (a *agent) trackGoroutine(fn func()) error {
|
||||
|
||||
func (a *agent) createTailnet(
|
||||
ctx context.Context,
|
||||
aAPI proto.DRPCAgentClient26,
|
||||
agentID uuid.UUID,
|
||||
derpMap *tailcfg.DERPMap,
|
||||
derpForceWebSockets, disableDirectConnections bool,
|
||||
@@ -1515,10 +1577,7 @@ func (a *agent) createTailnet(
|
||||
}()
|
||||
if err = a.trackGoroutine(func() {
|
||||
defer apiListener.Close()
|
||||
apiHandler, closeAPIHAndler := a.apiHandler(aAPI)
|
||||
defer func() {
|
||||
_ = closeAPIHAndler()
|
||||
}()
|
||||
apiHandler := a.apiHandler()
|
||||
server := &http.Server{
|
||||
BaseContext: func(net.Listener) context.Context { return ctx },
|
||||
Handler: apiHandler,
|
||||
@@ -1532,7 +1591,6 @@ func (a *agent) createTailnet(
|
||||
case <-ctx.Done():
|
||||
case <-a.hardCtx.Done():
|
||||
}
|
||||
_ = closeAPIHAndler()
|
||||
_ = server.Close()
|
||||
}()
|
||||
|
||||
@@ -1871,6 +1929,12 @@ func (a *agent) Close() error {
|
||||
a.logger.Error(a.hardCtx, "script runner close", slog.Error(err))
|
||||
}
|
||||
|
||||
if a.containerAPI != nil {
|
||||
if err := a.containerAPI.Close(); err != nil {
|
||||
a.logger.Error(a.hardCtx, "container API close", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the graceful shutdown to complete, but don't wait forever so
|
||||
// that we don't break user expectations.
|
||||
go func() {
|
||||
|
||||
+179
-104
@@ -53,7 +53,6 @@ type API struct {
|
||||
cancel context.CancelFunc
|
||||
watcherDone chan struct{}
|
||||
updaterDone chan struct{}
|
||||
initialUpdateDone chan struct{} // Closed after first update in updaterLoop.
|
||||
updateTrigger chan chan error // Channel to trigger manual refresh.
|
||||
updateInterval time.Duration // Interval for periodic container updates.
|
||||
logger slog.Logger
|
||||
@@ -71,13 +70,16 @@ type API struct {
|
||||
|
||||
ownerName string
|
||||
workspaceName string
|
||||
parentAgent string
|
||||
|
||||
mu sync.RWMutex
|
||||
mu sync.RWMutex // Protects the following fields.
|
||||
initDone chan struct{} // Closed by Init.
|
||||
closed bool
|
||||
containers codersdk.WorkspaceAgentListContainersResponse // Output from the last list operation.
|
||||
containersErr error // Error from the last list operation.
|
||||
devcontainerNames map[string]bool // By devcontainer name.
|
||||
knownDevcontainers map[string]codersdk.WorkspaceAgentDevcontainer // By workspace folder.
|
||||
devcontainerLogSourceIDs map[string]uuid.UUID // By workspace folder.
|
||||
configFileModifiedTimes map[string]time.Time // By config file path.
|
||||
recreateSuccessTimes map[string]time.Time // By workspace folder.
|
||||
recreateErrorTimes map[string]time.Time // By workspace folder.
|
||||
@@ -85,8 +87,6 @@ type API struct {
|
||||
usingWorkspaceFolderName map[string]bool // By workspace folder.
|
||||
ignoredDevcontainers map[string]bool // By workspace folder. Tracks three states (true, false and not checked).
|
||||
asyncWg sync.WaitGroup
|
||||
|
||||
devcontainerLogSourceIDs map[string]uuid.UUID // By workspace folder.
|
||||
}
|
||||
|
||||
type subAgentProcess struct {
|
||||
@@ -188,10 +188,11 @@ func WithSubAgentEnv(env ...string) Option {
|
||||
|
||||
// WithManifestInfo sets the owner name, and workspace name
|
||||
// for the sub-agent.
|
||||
func WithManifestInfo(owner, workspace string) Option {
|
||||
func WithManifestInfo(owner, workspace, parentAgent string) Option {
|
||||
return func(api *API) {
|
||||
api.ownerName = owner
|
||||
api.workspaceName = workspace
|
||||
api.parentAgent = parentAgent
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,6 +208,29 @@ func WithDevcontainers(devcontainers []codersdk.WorkspaceAgentDevcontainer, scri
|
||||
api.devcontainerNames = make(map[string]bool, len(devcontainers))
|
||||
api.devcontainerLogSourceIDs = make(map[string]uuid.UUID)
|
||||
for _, dc := range devcontainers {
|
||||
if dc.Status == "" {
|
||||
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStarting
|
||||
}
|
||||
logger := api.logger.With(
|
||||
slog.F("devcontainer_id", dc.ID),
|
||||
slog.F("devcontainer_name", dc.Name),
|
||||
slog.F("workspace_folder", dc.WorkspaceFolder),
|
||||
slog.F("config_path", dc.ConfigPath),
|
||||
)
|
||||
|
||||
// Devcontainers have a name originating from Terraform, but
|
||||
// we need to ensure that the name is unique. We will use
|
||||
// the workspace folder name to generate a unique agent name,
|
||||
// and if that fails, we will fall back to the devcontainers
|
||||
// original name.
|
||||
name, usingWorkspaceFolder := api.makeAgentName(dc.WorkspaceFolder, dc.Name)
|
||||
if name != dc.Name {
|
||||
logger = logger.With(slog.F("devcontainer_name", name))
|
||||
logger.Debug(api.ctx, "updating devcontainer name", slog.F("devcontainer_old_name", dc.Name))
|
||||
dc.Name = name
|
||||
api.usingWorkspaceFolderName[dc.WorkspaceFolder] = usingWorkspaceFolder
|
||||
}
|
||||
|
||||
api.knownDevcontainers[dc.WorkspaceFolder] = dc
|
||||
api.devcontainerNames[dc.Name] = true
|
||||
for _, script := range scripts {
|
||||
@@ -218,12 +242,7 @@ func WithDevcontainers(devcontainers []codersdk.WorkspaceAgentDevcontainer, scri
|
||||
}
|
||||
}
|
||||
if api.devcontainerLogSourceIDs[dc.WorkspaceFolder] == uuid.Nil {
|
||||
api.logger.Error(api.ctx, "devcontainer log source ID not found for devcontainer",
|
||||
slog.F("devcontainer_id", dc.ID),
|
||||
slog.F("devcontainer_name", dc.Name),
|
||||
slog.F("workspace_folder", dc.WorkspaceFolder),
|
||||
slog.F("config_path", dc.ConfigPath),
|
||||
)
|
||||
logger.Error(api.ctx, "devcontainer log source ID not found for devcontainer")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -265,9 +284,7 @@ func NewAPI(logger slog.Logger, options ...Option) *API {
|
||||
api := &API{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
watcherDone: make(chan struct{}),
|
||||
updaterDone: make(chan struct{}),
|
||||
initialUpdateDone: make(chan struct{}),
|
||||
initDone: make(chan struct{}),
|
||||
updateTrigger: make(chan chan error),
|
||||
updateInterval: defaultUpdateInterval,
|
||||
logger: logger,
|
||||
@@ -315,10 +332,47 @@ func NewAPI(logger slog.Logger, options ...Option) *API {
|
||||
api.subAgentClient.Store(&c)
|
||||
}
|
||||
|
||||
return api
|
||||
}
|
||||
|
||||
// Init applies a final set of options to the API and then
|
||||
// closes initDone. This method can only be called once.
|
||||
func (api *API) Init(opts ...Option) {
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
if api.closed {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-api.initDone:
|
||||
return
|
||||
default:
|
||||
}
|
||||
defer close(api.initDone)
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(api)
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the API by initializing the watcher and updater loops.
|
||||
// This method calls Init, if it is desired to apply options after
|
||||
// the API has been created, it should be done by calling Init before
|
||||
// Start. This method must only be called once.
|
||||
func (api *API) Start() {
|
||||
api.Init()
|
||||
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
if api.closed {
|
||||
return
|
||||
}
|
||||
|
||||
api.watcherDone = make(chan struct{})
|
||||
api.updaterDone = make(chan struct{})
|
||||
|
||||
go api.watcherLoop()
|
||||
go api.updaterLoop()
|
||||
|
||||
return api
|
||||
}
|
||||
|
||||
func (api *API) watcherLoop() {
|
||||
@@ -391,21 +445,23 @@ func (api *API) updaterLoop() {
|
||||
} else {
|
||||
api.logger.Debug(api.ctx, "initial containers update complete")
|
||||
}
|
||||
// Signal that the initial update attempt (successful or not) is done.
|
||||
// Other services can wait on this if they need the first data to be available.
|
||||
close(api.initialUpdateDone)
|
||||
|
||||
// We utilize a TickerFunc here instead of a regular Ticker so that
|
||||
// we can guarantee execution of the updateContainers method after
|
||||
// advancing the clock.
|
||||
ticker := api.clock.TickerFunc(api.ctx, api.updateInterval, func() error {
|
||||
done := make(chan error, 1)
|
||||
defer close(done)
|
||||
|
||||
var sent bool
|
||||
defer func() {
|
||||
if !sent {
|
||||
close(done)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-api.ctx.Done():
|
||||
return api.ctx.Err()
|
||||
case api.updateTrigger <- done:
|
||||
sent = true
|
||||
err := <-done
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
@@ -434,6 +490,7 @@ func (api *API) updaterLoop() {
|
||||
// Note that although we pass api.ctx here, updateContainers
|
||||
// has an internal timeout to prevent long blocking calls.
|
||||
done <- api.updateContainers(api.ctx)
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -447,7 +504,7 @@ func (api *API) UpdateSubAgentClient(client SubAgentClient) {
|
||||
func (api *API) Routes() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
|
||||
ensureInitialUpdateDoneMW := func(next http.Handler) http.Handler {
|
||||
ensureInitDoneMW := func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
select {
|
||||
case <-api.ctx.Done():
|
||||
@@ -458,9 +515,8 @@ func (api *API) Routes() http.Handler {
|
||||
return
|
||||
case <-r.Context().Done():
|
||||
return
|
||||
case <-api.initialUpdateDone:
|
||||
// Initial update is done, we can start processing
|
||||
// requests.
|
||||
case <-api.initDone:
|
||||
// API init is done, we can start processing requests.
|
||||
}
|
||||
next.ServeHTTP(rw, r)
|
||||
})
|
||||
@@ -469,13 +525,13 @@ func (api *API) Routes() http.Handler {
|
||||
// For now, all endpoints require the initial update to be done.
|
||||
// If we want to allow some endpoints to be available before
|
||||
// the initial update, we can enable this per-route.
|
||||
r.Use(ensureInitialUpdateDoneMW)
|
||||
r.Use(ensureInitDoneMW)
|
||||
|
||||
r.Get("/", api.handleList)
|
||||
// TODO(mafredri): Simplify this route as the previous /devcontainers
|
||||
// /-route was dropped. We can drop the /devcontainers prefix here too.
|
||||
r.Route("/devcontainers", func(r chi.Router) {
|
||||
r.Post("/container/{container}/recreate", api.handleDevcontainerRecreate)
|
||||
r.Route("/devcontainers/{devcontainer}", func(r chi.Router) {
|
||||
r.Post("/recreate", api.handleDevcontainerRecreate)
|
||||
})
|
||||
|
||||
return r
|
||||
@@ -508,7 +564,6 @@ func (api *API) updateContainers(ctx context.Context) error {
|
||||
// will clear up on the next update.
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
api.mu.Lock()
|
||||
api.containers = codersdk.WorkspaceAgentListContainersResponse{}
|
||||
api.containersErr = err
|
||||
api.mu.Unlock()
|
||||
}
|
||||
@@ -571,7 +626,8 @@ func (api *API) processUpdatedContainersLocked(ctx context.Context, updated code
|
||||
slog.F("config_file", configFile),
|
||||
)
|
||||
|
||||
if len(api.containerLabelIncludeFilter) > 0 {
|
||||
// Filter out devcontainer tests, unless explicitly set in include filters.
|
||||
if len(api.containerLabelIncludeFilter) > 0 || container.Labels[DevcontainerIsTestRunLabel] == "true" {
|
||||
var ok bool
|
||||
for label, value := range api.containerLabelIncludeFilter {
|
||||
if v, found := container.Labels[label]; found && v == value {
|
||||
@@ -777,12 +833,19 @@ func (api *API) RefreshContainers(ctx context.Context) (err error) {
|
||||
}()
|
||||
|
||||
done := make(chan error, 1)
|
||||
var sent bool
|
||||
defer func() {
|
||||
if !sent {
|
||||
close(done)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-api.ctx.Done():
|
||||
return xerrors.Errorf("API closed: %w", api.ctx.Err())
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case api.updateTrigger <- done:
|
||||
sent = true
|
||||
select {
|
||||
case <-api.ctx.Done():
|
||||
return xerrors.Errorf("API closed: %w", api.ctx.Err())
|
||||
@@ -823,7 +886,7 @@ func (api *API) getContainers() (codersdk.WorkspaceAgentListContainersResponse,
|
||||
devcontainers = append(devcontainers, dc)
|
||||
}
|
||||
slices.SortFunc(devcontainers, func(a, b codersdk.WorkspaceAgentDevcontainer) int {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
return strings.Compare(a.WorkspaceFolder, b.WorkspaceFolder)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -838,68 +901,40 @@ func (api *API) getContainers() (codersdk.WorkspaceAgentListContainersResponse,
|
||||
// devcontainer by referencing the container.
|
||||
func (api *API) handleDevcontainerRecreate(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
containerID := chi.URLParam(r, "container")
|
||||
devcontainerID := chi.URLParam(r, "devcontainer")
|
||||
|
||||
if containerID == "" {
|
||||
if devcontainerID == "" {
|
||||
httpapi.Write(ctx, w, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Missing container ID or name",
|
||||
Detail: "Container ID or name is required to recreate a devcontainer.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
containers, err := api.getContainers()
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Could not list containers",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
containerIdx := slices.IndexFunc(containers.Containers, func(c codersdk.WorkspaceAgentContainer) bool { return c.Match(containerID) })
|
||||
if containerIdx == -1 {
|
||||
httpapi.Write(ctx, w, http.StatusNotFound, codersdk.Response{
|
||||
Message: "Container not found",
|
||||
Detail: "Container ID or name not found in the list of containers.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
container := containers.Containers[containerIdx]
|
||||
workspaceFolder := container.Labels[DevcontainerLocalFolderLabel]
|
||||
configPath := container.Labels[DevcontainerConfigFileLabel]
|
||||
|
||||
// Workspace folder is required to recreate a container, we don't verify
|
||||
// the config path here because it's optional.
|
||||
if workspaceFolder == "" {
|
||||
httpapi.Write(ctx, w, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Missing workspace folder label",
|
||||
Detail: "The container is not a devcontainer, the container must have the workspace folder label to support recreation.",
|
||||
Message: "Missing devcontainer ID",
|
||||
Detail: "Devcontainer ID is required to recreate a devcontainer.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
api.mu.Lock()
|
||||
|
||||
dc, ok := api.knownDevcontainers[workspaceFolder]
|
||||
switch {
|
||||
case !ok:
|
||||
var dc codersdk.WorkspaceAgentDevcontainer
|
||||
for _, knownDC := range api.knownDevcontainers {
|
||||
if knownDC.ID.String() == devcontainerID {
|
||||
dc = knownDC
|
||||
break
|
||||
}
|
||||
}
|
||||
if dc.ID == uuid.Nil {
|
||||
api.mu.Unlock()
|
||||
|
||||
// This case should not happen if the container is a valid devcontainer.
|
||||
api.logger.Error(ctx, "devcontainer not found for workspace folder", slog.F("workspace_folder", workspaceFolder))
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
httpapi.Write(ctx, w, http.StatusNotFound, codersdk.Response{
|
||||
Message: "Devcontainer not found.",
|
||||
Detail: fmt.Sprintf("Could not find devcontainer for workspace folder: %q", workspaceFolder),
|
||||
Detail: fmt.Sprintf("Could not find devcontainer with ID: %q", devcontainerID),
|
||||
})
|
||||
return
|
||||
case dc.Status == codersdk.WorkspaceAgentDevcontainerStatusStarting:
|
||||
}
|
||||
if dc.Status == codersdk.WorkspaceAgentDevcontainerStatusStarting {
|
||||
api.mu.Unlock()
|
||||
|
||||
httpapi.Write(ctx, w, http.StatusConflict, codersdk.Response{
|
||||
Message: "Devcontainer recreation already in progress",
|
||||
Detail: fmt.Sprintf("Recreation for workspace folder %q is already underway.", dc.WorkspaceFolder),
|
||||
Detail: fmt.Sprintf("Recreation for devcontainer %q is already underway.", dc.Name),
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -909,51 +944,65 @@ func (api *API) handleDevcontainerRecreate(w http.ResponseWriter, r *http.Reques
|
||||
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStarting
|
||||
dc.Container = nil
|
||||
api.knownDevcontainers[dc.WorkspaceFolder] = dc
|
||||
api.asyncWg.Add(1)
|
||||
go api.recreateDevcontainer(dc, configPath)
|
||||
go func() {
|
||||
_ = api.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath, WithRemoveExistingContainer())
|
||||
}()
|
||||
|
||||
api.mu.Unlock()
|
||||
|
||||
httpapi.Write(ctx, w, http.StatusAccepted, codersdk.Response{
|
||||
Message: "Devcontainer recreation initiated",
|
||||
Detail: fmt.Sprintf("Recreation process for workspace folder %q has started.", dc.WorkspaceFolder),
|
||||
Detail: fmt.Sprintf("Recreation process for devcontainer %q has started.", dc.Name),
|
||||
})
|
||||
}
|
||||
|
||||
// recreateDevcontainer should run in its own goroutine and is responsible for
|
||||
// createDevcontainer should run in its own goroutine and is responsible for
|
||||
// recreating a devcontainer based on the provided devcontainer configuration.
|
||||
// It updates the devcontainer status and logs the process. The configPath is
|
||||
// passed as a parameter for the odd chance that the container being recreated
|
||||
// has a different config file than the one stored in the devcontainer state.
|
||||
// The devcontainer state must be set to starting and the asyncWg must be
|
||||
// incremented before calling this function.
|
||||
func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, configPath string) {
|
||||
defer api.asyncWg.Done()
|
||||
func (api *API) CreateDevcontainer(workspaceFolder, configPath string, opts ...DevcontainerCLIUpOptions) error {
|
||||
api.mu.Lock()
|
||||
if api.closed {
|
||||
api.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
dc, found := api.knownDevcontainers[workspaceFolder]
|
||||
if !found {
|
||||
api.mu.Unlock()
|
||||
return xerrors.Errorf("devcontainer not found")
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
ctx = api.ctx
|
||||
logger = api.logger.With(
|
||||
slog.F("devcontainer_id", dc.ID),
|
||||
slog.F("devcontainer_name", dc.Name),
|
||||
slog.F("workspace_folder", dc.WorkspaceFolder),
|
||||
slog.F("config_path", configPath),
|
||||
slog.F("config_path", dc.ConfigPath),
|
||||
)
|
||||
)
|
||||
|
||||
// Send logs via agent logging facilities.
|
||||
logSourceID := api.devcontainerLogSourceIDs[dc.WorkspaceFolder]
|
||||
if logSourceID == uuid.Nil {
|
||||
api.logger.Debug(api.ctx, "devcontainer log source ID not found, falling back to external log source ID")
|
||||
logSourceID = agentsdk.ExternalLogSourceID
|
||||
}
|
||||
|
||||
api.asyncWg.Add(1)
|
||||
defer api.asyncWg.Done()
|
||||
api.mu.Unlock()
|
||||
|
||||
if dc.ConfigPath != configPath {
|
||||
logger.Warn(ctx, "devcontainer config path mismatch",
|
||||
slog.F("config_path_param", configPath),
|
||||
)
|
||||
}
|
||||
|
||||
// Send logs via agent logging facilities.
|
||||
logSourceID := api.devcontainerLogSourceIDs[dc.WorkspaceFolder]
|
||||
if logSourceID == uuid.Nil {
|
||||
// Fallback to the external log source ID if not found.
|
||||
logSourceID = agentsdk.ExternalLogSourceID
|
||||
}
|
||||
|
||||
scriptLogger := api.scriptLogger(logSourceID)
|
||||
defer func() {
|
||||
flushCtx, cancel := context.WithTimeout(api.ctx, 5*time.Second)
|
||||
@@ -969,12 +1018,15 @@ func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, con
|
||||
|
||||
logger.Debug(ctx, "starting devcontainer recreation")
|
||||
|
||||
_, err = api.dccli.Up(ctx, dc.WorkspaceFolder, configPath, WithUpOutput(infoW, errW), WithRemoveExistingContainer())
|
||||
upOptions := []DevcontainerCLIUpOptions{WithUpOutput(infoW, errW)}
|
||||
upOptions = append(upOptions, opts...)
|
||||
|
||||
_, err := api.dccli.Up(ctx, dc.WorkspaceFolder, configPath, upOptions...)
|
||||
if err != nil {
|
||||
// No need to log if the API is closing (context canceled), as this
|
||||
// is expected behavior when the API is shutting down.
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error(ctx, "devcontainer recreation failed", slog.Error(err))
|
||||
logger.Error(ctx, "devcontainer creation failed", slog.Error(err))
|
||||
}
|
||||
|
||||
api.mu.Lock()
|
||||
@@ -983,10 +1035,11 @@ func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, con
|
||||
api.knownDevcontainers[dc.WorkspaceFolder] = dc
|
||||
api.recreateErrorTimes[dc.WorkspaceFolder] = api.clock.Now("agentcontainers", "recreate", "errorTimes")
|
||||
api.mu.Unlock()
|
||||
return
|
||||
|
||||
return xerrors.Errorf("start devcontainer: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(ctx, "devcontainer recreated successfully")
|
||||
logger.Info(ctx, "devcontainer created successfully")
|
||||
|
||||
api.mu.Lock()
|
||||
dc = api.knownDevcontainers[dc.WorkspaceFolder]
|
||||
@@ -1009,8 +1062,11 @@ func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, con
|
||||
// Ensure an immediate refresh to accurately reflect the
|
||||
// devcontainer state after recreation.
|
||||
if err := api.RefreshContainers(ctx); err != nil {
|
||||
logger.Error(ctx, "failed to trigger immediate refresh after devcontainer recreation", slog.Error(err))
|
||||
logger.Error(ctx, "failed to trigger immediate refresh after devcontainer creation", slog.Error(err))
|
||||
return xerrors.Errorf("refresh containers: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// markDevcontainerDirty finds the devcontainer with the given config file path
|
||||
@@ -1259,6 +1315,7 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
|
||||
}
|
||||
|
||||
var (
|
||||
featureOptionsAsEnvs []string
|
||||
appsWithPossibleDuplicates []SubAgentApp
|
||||
workspaceFolder = DevcontainerDefaultContainerWorkspaceFolder
|
||||
)
|
||||
@@ -1270,12 +1327,16 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
|
||||
)
|
||||
|
||||
readConfig := func() (DevcontainerConfig, error) {
|
||||
return api.dccli.ReadConfig(ctx, dc.WorkspaceFolder, dc.ConfigPath, []string{
|
||||
fmt.Sprintf("CODER_WORKSPACE_AGENT_NAME=%s", subAgentConfig.Name),
|
||||
fmt.Sprintf("CODER_WORKSPACE_OWNER_NAME=%s", api.ownerName),
|
||||
fmt.Sprintf("CODER_WORKSPACE_NAME=%s", api.workspaceName),
|
||||
fmt.Sprintf("CODER_URL=%s", api.subAgentURL),
|
||||
})
|
||||
return api.dccli.ReadConfig(ctx, dc.WorkspaceFolder, dc.ConfigPath,
|
||||
append(featureOptionsAsEnvs, []string{
|
||||
fmt.Sprintf("CODER_WORKSPACE_AGENT_NAME=%s", subAgentConfig.Name),
|
||||
fmt.Sprintf("CODER_WORKSPACE_OWNER_NAME=%s", api.ownerName),
|
||||
fmt.Sprintf("CODER_WORKSPACE_NAME=%s", api.workspaceName),
|
||||
fmt.Sprintf("CODER_WORKSPACE_PARENT_AGENT_NAME=%s", api.parentAgent),
|
||||
fmt.Sprintf("CODER_URL=%s", api.subAgentURL),
|
||||
fmt.Sprintf("CONTAINER_ID=%s", container.ID),
|
||||
}...),
|
||||
)
|
||||
}
|
||||
|
||||
if config, err = readConfig(); err != nil {
|
||||
@@ -1291,6 +1352,11 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
|
||||
|
||||
workspaceFolder = config.Workspace.WorkspaceFolder
|
||||
|
||||
featureOptionsAsEnvs = config.MergedConfiguration.Features.OptionsAsEnvs()
|
||||
if len(featureOptionsAsEnvs) > 0 {
|
||||
configOutdated = true
|
||||
}
|
||||
|
||||
// NOTE(DanielleMaywood):
|
||||
// We only want to take an agent name specified in the root customization layer.
|
||||
// This restricts the ability for a feature to specify the agent name. We may revisit
|
||||
@@ -1415,6 +1481,11 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
|
||||
return xerrors.Errorf("set agent binary executable: %w", err)
|
||||
}
|
||||
|
||||
// Make sure the agent binary is owned by a valid user so we can run it.
|
||||
if _, err := api.ccli.ExecAs(ctx, container.ID, "root", "/bin/sh", "-c", fmt.Sprintf("chown $(id -u):$(id -g) %s", coderPathInsideContainer)); err != nil {
|
||||
return xerrors.Errorf("set agent binary ownership: %w", err)
|
||||
}
|
||||
|
||||
// Attempt to add CAP_NET_ADMIN to the binary to improve network
|
||||
// performance (optional, allow to fail). See `bootstrap_linux.sh`.
|
||||
// TODO(mafredri): Disable for now until we can figure out why this
|
||||
@@ -1609,8 +1680,12 @@ func (api *API) Close() error {
|
||||
err := api.watcher.Close()
|
||||
|
||||
// Wait for loops to finish.
|
||||
<-api.watcherDone
|
||||
<-api.updaterDone
|
||||
if api.watcherDone != nil {
|
||||
<-api.watcherDone
|
||||
}
|
||||
if api.updaterDone != nil {
|
||||
<-api.updaterDone
|
||||
}
|
||||
|
||||
// Wait for all async tasks to complete.
|
||||
api.asyncWg.Wait()
|
||||
|
||||
@@ -437,6 +437,7 @@ func TestAPI(t *testing.T) {
|
||||
agentcontainers.WithContainerCLI(mLister),
|
||||
agentcontainers.WithContainerLabelIncludeFilter("this.label.does.not.exist.ignore.devcontainers", "true"),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
@@ -492,78 +493,77 @@ func TestAPI(t *testing.T) {
|
||||
t.Run("Recreate", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
validContainer := codersdk.WorkspaceAgentContainer{
|
||||
ID: "container-id",
|
||||
FriendlyName: "container-name",
|
||||
devcontainerID1 := uuid.New()
|
||||
devcontainerID2 := uuid.New()
|
||||
workspaceFolder1 := "/workspace/test1"
|
||||
workspaceFolder2 := "/workspace/test2"
|
||||
configPath1 := "/workspace/test1/.devcontainer/devcontainer.json"
|
||||
configPath2 := "/workspace/test2/.devcontainer/devcontainer.json"
|
||||
|
||||
// Create a container that represents an existing devcontainer
|
||||
devContainer1 := codersdk.WorkspaceAgentContainer{
|
||||
ID: "container-1",
|
||||
FriendlyName: "test-container-1",
|
||||
Running: true,
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspaces",
|
||||
agentcontainers.DevcontainerConfigFileLabel: "/workspace/.devcontainer/devcontainer.json",
|
||||
agentcontainers.DevcontainerLocalFolderLabel: workspaceFolder1,
|
||||
agentcontainers.DevcontainerConfigFileLabel: configPath1,
|
||||
},
|
||||
}
|
||||
|
||||
missingFolderContainer := codersdk.WorkspaceAgentContainer{
|
||||
ID: "missing-folder-container",
|
||||
FriendlyName: "missing-folder-container",
|
||||
Labels: map[string]string{},
|
||||
devContainer2 := codersdk.WorkspaceAgentContainer{
|
||||
ID: "container-2",
|
||||
FriendlyName: "test-container-2",
|
||||
Running: true,
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: workspaceFolder2,
|
||||
agentcontainers.DevcontainerConfigFileLabel: configPath2,
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
containerID string
|
||||
lister *fakeContainerCLI
|
||||
devcontainerCLI *fakeDevcontainerCLI
|
||||
wantStatus []int
|
||||
wantBody []string
|
||||
name string
|
||||
devcontainerID string
|
||||
setupDevcontainers []codersdk.WorkspaceAgentDevcontainer
|
||||
lister *fakeContainerCLI
|
||||
devcontainerCLI *fakeDevcontainerCLI
|
||||
wantStatus []int
|
||||
wantBody []string
|
||||
}{
|
||||
{
|
||||
name: "Missing container ID",
|
||||
containerID: "",
|
||||
name: "Missing devcontainer ID",
|
||||
devcontainerID: "",
|
||||
lister: &fakeContainerCLI{},
|
||||
devcontainerCLI: &fakeDevcontainerCLI{},
|
||||
wantStatus: []int{http.StatusBadRequest},
|
||||
wantBody: []string{"Missing container ID or name"},
|
||||
wantBody: []string{"Missing devcontainer ID"},
|
||||
},
|
||||
{
|
||||
name: "List error",
|
||||
containerID: "container-id",
|
||||
name: "Devcontainer not found",
|
||||
devcontainerID: uuid.NewString(),
|
||||
lister: &fakeContainerCLI{
|
||||
listErr: xerrors.New("list error"),
|
||||
},
|
||||
devcontainerCLI: &fakeDevcontainerCLI{},
|
||||
wantStatus: []int{http.StatusInternalServerError},
|
||||
wantBody: []string{"Could not list containers"},
|
||||
},
|
||||
{
|
||||
name: "Container not found",
|
||||
containerID: "nonexistent-container",
|
||||
lister: &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{validContainer},
|
||||
},
|
||||
arch: "<none>", // Unsupported architecture, don't inject subagent.
|
||||
},
|
||||
devcontainerCLI: &fakeDevcontainerCLI{},
|
||||
wantStatus: []int{http.StatusNotFound},
|
||||
wantBody: []string{"Container not found"},
|
||||
wantBody: []string{"Devcontainer not found"},
|
||||
},
|
||||
{
|
||||
name: "Missing workspace folder label",
|
||||
containerID: "missing-folder-container",
|
||||
lister: &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{missingFolderContainer},
|
||||
name: "Devcontainer CLI error",
|
||||
devcontainerID: devcontainerID1.String(),
|
||||
setupDevcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerID1,
|
||||
Name: "test-devcontainer-1",
|
||||
WorkspaceFolder: workspaceFolder1,
|
||||
ConfigPath: configPath1,
|
||||
Status: codersdk.WorkspaceAgentDevcontainerStatusRunning,
|
||||
Container: &devContainer1,
|
||||
},
|
||||
},
|
||||
devcontainerCLI: &fakeDevcontainerCLI{},
|
||||
wantStatus: []int{http.StatusBadRequest},
|
||||
wantBody: []string{"Missing workspace folder label"},
|
||||
},
|
||||
{
|
||||
name: "Devcontainer CLI error",
|
||||
containerID: "container-id",
|
||||
lister: &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{validContainer},
|
||||
Containers: []codersdk.WorkspaceAgentContainer{devContainer1},
|
||||
},
|
||||
arch: "<none>", // Unsupported architecture, don't inject subagent.
|
||||
},
|
||||
@@ -574,11 +574,21 @@ func TestAPI(t *testing.T) {
|
||||
wantBody: []string{"Devcontainer recreation initiated", "Devcontainer recreation already in progress"},
|
||||
},
|
||||
{
|
||||
name: "OK",
|
||||
containerID: "container-id",
|
||||
name: "OK",
|
||||
devcontainerID: devcontainerID2.String(),
|
||||
setupDevcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerID2,
|
||||
Name: "test-devcontainer-2",
|
||||
WorkspaceFolder: workspaceFolder2,
|
||||
ConfigPath: configPath2,
|
||||
Status: codersdk.WorkspaceAgentDevcontainerStatusRunning,
|
||||
Container: &devContainer2,
|
||||
},
|
||||
},
|
||||
lister: &fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{validContainer},
|
||||
Containers: []codersdk.WorkspaceAgentContainer{devContainer2},
|
||||
},
|
||||
arch: "<none>", // Unsupported architecture, don't inject subagent.
|
||||
},
|
||||
@@ -607,13 +617,17 @@ func TestAPI(t *testing.T) {
|
||||
|
||||
// Setup router with the handler under test.
|
||||
r := chi.NewRouter()
|
||||
|
||||
api := agentcontainers.NewAPI(
|
||||
logger,
|
||||
agentcontainers.WithClock(mClock),
|
||||
agentcontainers.WithContainerCLI(tt.lister),
|
||||
agentcontainers.WithDevcontainerCLI(tt.devcontainerCLI),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
agentcontainers.WithDevcontainers(tt.setupDevcontainers, nil),
|
||||
)
|
||||
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
@@ -624,7 +638,7 @@ func TestAPI(t *testing.T) {
|
||||
|
||||
for i := range tt.wantStatus {
|
||||
// Simulate HTTP request to the recreate endpoint.
|
||||
req := httptest.NewRequest(http.MethodPost, "/devcontainers/container/"+tt.containerID+"/recreate", nil).
|
||||
req := httptest.NewRequest(http.MethodPost, "/devcontainers/"+tt.devcontainerID+"/recreate", nil).
|
||||
WithContext(ctx)
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
@@ -747,6 +761,7 @@ func TestAPI(t *testing.T) {
|
||||
knownDevcontainers []codersdk.WorkspaceAgentDevcontainer
|
||||
wantStatus int
|
||||
wantCount int
|
||||
wantTestContainer bool
|
||||
verify func(t *testing.T, devcontainers []codersdk.WorkspaceAgentDevcontainer)
|
||||
}{
|
||||
{
|
||||
@@ -993,6 +1008,13 @@ func TestAPI(t *testing.T) {
|
||||
assert.Len(t, names, 4, "should have four unique devcontainer names")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Include test containers",
|
||||
lister: &fakeContainerCLI{},
|
||||
wantStatus: http.StatusOK,
|
||||
wantTestContainer: true,
|
||||
wantCount: 1, // Will be appended.
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1005,14 +1027,33 @@ func TestAPI(t *testing.T) {
|
||||
mClock.Set(time.Now()).MustWait(testutil.Context(t, testutil.WaitShort))
|
||||
tickerTrap := mClock.Trap().TickerFunc("updaterLoop")
|
||||
|
||||
// This container should be ignored unless explicitly included.
|
||||
tt.lister.containers.Containers = append(tt.lister.containers.Containers, codersdk.WorkspaceAgentContainer{
|
||||
ID: "test-container-1",
|
||||
FriendlyName: "test-container-1",
|
||||
Running: true,
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspace/test1",
|
||||
agentcontainers.DevcontainerConfigFileLabel: "/workspace/test1/.devcontainer/devcontainer.json",
|
||||
agentcontainers.DevcontainerIsTestRunLabel: "true",
|
||||
},
|
||||
})
|
||||
|
||||
// Setup router with the handler under test.
|
||||
r := chi.NewRouter()
|
||||
apiOptions := []agentcontainers.Option{
|
||||
agentcontainers.WithClock(mClock),
|
||||
agentcontainers.WithContainerCLI(tt.lister),
|
||||
agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
}
|
||||
|
||||
if tt.wantTestContainer {
|
||||
apiOptions = append(apiOptions, agentcontainers.WithContainerLabelIncludeFilter(
|
||||
agentcontainers.DevcontainerIsTestRunLabel, "true",
|
||||
))
|
||||
}
|
||||
|
||||
// Generate matching scripts for the known devcontainers
|
||||
// (required to extract log source ID).
|
||||
var scripts []codersdk.WorkspaceAgentScript
|
||||
@@ -1027,6 +1068,7 @@ func TestAPI(t *testing.T) {
|
||||
}
|
||||
|
||||
api := agentcontainers.NewAPI(logger, apiOptions...)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
r.Mount("/", api.Routes())
|
||||
@@ -1038,6 +1080,11 @@ func TestAPI(t *testing.T) {
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
for _, dc := range tt.knownDevcontainers {
|
||||
err := api.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Advance the clock to run the updater loop.
|
||||
_, aw := mClock.AdvanceNext()
|
||||
aw.MustWait(ctx)
|
||||
@@ -1111,6 +1158,7 @@ func TestAPI(t *testing.T) {
|
||||
[]codersdk.WorkspaceAgentScript{{LogSourceID: uuid.New(), ID: dc.ID}},
|
||||
),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
// Make sure the ticker function has been registered
|
||||
@@ -1206,6 +1254,7 @@ func TestAPI(t *testing.T) {
|
||||
agentcontainers.WithWatcher(fWatcher),
|
||||
agentcontainers.WithClock(mClock),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
r := chi.NewRouter()
|
||||
@@ -1343,6 +1392,7 @@ func TestAPI(t *testing.T) {
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
|
||||
mCCLI.EXPECT().Copy(gomock.Any(), "test-container-id", coderBin, "/.coder-agent/coder").Return(nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "/bin/sh", "-c", "chown $(id -u):$(id -g) /.coder-agent/coder").Return(nil, nil),
|
||||
)
|
||||
|
||||
mClock.Set(time.Now()).MustWait(ctx)
|
||||
@@ -1356,8 +1406,9 @@ func TestAPI(t *testing.T) {
|
||||
agentcontainers.WithSubAgentClient(fakeSAC),
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithDevcontainerCLI(fakeDCCLI),
|
||||
agentcontainers.WithManifestInfo("test-user", "test-workspace"),
|
||||
agentcontainers.WithManifestInfo("test-user", "test-workspace", "test-parent-agent"),
|
||||
)
|
||||
api.Start()
|
||||
apiClose := func() {
|
||||
closeOnce.Do(func() {
|
||||
// Close before api.Close() defer to avoid deadlock after test.
|
||||
@@ -1377,7 +1428,9 @@ func TestAPI(t *testing.T) {
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_AGENT_NAME=coder")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_NAME=test-workspace")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_OWNER_NAME=test-user")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_PARENT_AGENT_NAME=test-parent-agent")
|
||||
assert.Contains(t, envs, "CODER_URL=test-subagent-url")
|
||||
assert.Contains(t, envs, "CONTAINER_ID=test-container-id")
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -1428,6 +1481,7 @@ func TestAPI(t *testing.T) {
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
|
||||
mCCLI.EXPECT().Copy(gomock.Any(), "test-container-id", coderBin, "/.coder-agent/coder").Return(nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "/bin/sh", "-c", "chown $(id -u):$(id -g) /.coder-agent/coder").Return(nil, nil),
|
||||
)
|
||||
|
||||
// Verify that the agent has started.
|
||||
@@ -1488,6 +1542,7 @@ func TestAPI(t *testing.T) {
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "new-test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
|
||||
mCCLI.EXPECT().Copy(gomock.Any(), "new-test-container-id", coderBin, "/.coder-agent/coder").Return(nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "new-test-container-id", "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), "new-test-container-id", "root", "/bin/sh", "-c", "chown $(id -u):$(id -g) /.coder-agent/coder").Return(nil, nil),
|
||||
)
|
||||
|
||||
fakeDCCLI.readConfig.MergedConfiguration.Customizations.Coder = []agentcontainers.CoderCustomization{
|
||||
@@ -1519,7 +1574,9 @@ func TestAPI(t *testing.T) {
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_AGENT_NAME=coder")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_NAME=test-workspace")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_OWNER_NAME=test-user")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_PARENT_AGENT_NAME=test-parent-agent")
|
||||
assert.Contains(t, envs, "CODER_URL=test-subagent-url")
|
||||
assert.NotContains(t, envs, "CONTAINER_ID=test-container-id")
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -1578,6 +1635,7 @@ func TestAPI(t *testing.T) {
|
||||
agentcontainers.WithSubAgentClient(fakeSAC),
|
||||
agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
@@ -1886,6 +1944,7 @@ func TestAPI(t *testing.T) {
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
|
||||
mCCLI.EXPECT().Copy(gomock.Any(), testContainer.ID, coderBin, "/.coder-agent/coder").Return(nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "/bin/sh", "-c", "chown $(id -u):$(id -g) /.coder-agent/coder").Return(nil, nil),
|
||||
)
|
||||
|
||||
mClock.Set(time.Now()).MustWait(ctx)
|
||||
@@ -1899,6 +1958,7 @@ func TestAPI(t *testing.T) {
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
// Close before api.Close() defer to avoid deadlock after test.
|
||||
@@ -1978,6 +2038,7 @@ func TestAPI(t *testing.T) {
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
|
||||
mCCLI.EXPECT().Copy(gomock.Any(), testContainer.ID, coderBin, "/.coder-agent/coder").Return(nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "/bin/sh", "-c", "chown $(id -u):$(id -g) /.coder-agent/coder").Return(nil, nil),
|
||||
)
|
||||
|
||||
mClock.Set(time.Now()).MustWait(ctx)
|
||||
@@ -1991,6 +2052,7 @@ func TestAPI(t *testing.T) {
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
// Close before api.Close() defer to avoid deadlock after test.
|
||||
@@ -2019,6 +2081,127 @@ func TestAPI(t *testing.T) {
|
||||
require.Len(t, fSAC.created, 1)
|
||||
})
|
||||
|
||||
t.Run("ReadConfigWithFeatureOptions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Dev Container tests are not supported on Windows (this test uses mocks but fails due to Windows paths)")
|
||||
}
|
||||
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitMedium)
|
||||
logger = testutil.Logger(t)
|
||||
mClock = quartz.NewMock(t)
|
||||
mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t))
|
||||
fSAC = &fakeSubAgentClient{
|
||||
logger: logger.Named("fakeSubAgentClient"),
|
||||
createErrC: make(chan error, 1),
|
||||
}
|
||||
fDCCLI = &fakeDevcontainerCLI{
|
||||
readConfig: agentcontainers.DevcontainerConfig{
|
||||
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
|
||||
Features: agentcontainers.DevcontainerFeatures{
|
||||
"./code-server": map[string]any{
|
||||
"port": 9090,
|
||||
},
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": map[string]any{
|
||||
"moby": "false",
|
||||
},
|
||||
},
|
||||
},
|
||||
Workspace: agentcontainers.DevcontainerWorkspace{
|
||||
WorkspaceFolder: "/workspaces/coder",
|
||||
},
|
||||
},
|
||||
readConfigErrC: make(chan func(envs []string) error, 2),
|
||||
}
|
||||
|
||||
testContainer = codersdk.WorkspaceAgentContainer{
|
||||
ID: "test-container-id",
|
||||
FriendlyName: "test-container",
|
||||
Image: "test-image",
|
||||
Running: true,
|
||||
CreatedAt: time.Now(),
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: "/workspaces/coder",
|
||||
agentcontainers.DevcontainerConfigFileLabel: "/workspaces/coder/.devcontainer/devcontainer.json",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
coderBin, err := os.Executable()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Mock the `List` function to always return our test container.
|
||||
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{testContainer},
|
||||
}, nil).AnyTimes()
|
||||
|
||||
// Mock the steps used for injecting the coder agent.
|
||||
gomock.InOrder(
|
||||
mCCLI.EXPECT().DetectArchitecture(gomock.Any(), testContainer.ID).Return(runtime.GOARCH, nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
|
||||
mCCLI.EXPECT().Copy(gomock.Any(), testContainer.ID, coderBin, "/.coder-agent/coder").Return(nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
|
||||
mCCLI.EXPECT().ExecAs(gomock.Any(), testContainer.ID, "root", "/bin/sh", "-c", "chown $(id -u):$(id -g) /.coder-agent/coder").Return(nil, nil),
|
||||
)
|
||||
|
||||
mClock.Set(time.Now()).MustWait(ctx)
|
||||
tickerTrap := mClock.Trap().TickerFunc("updaterLoop")
|
||||
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithClock(mClock),
|
||||
agentcontainers.WithContainerCLI(mCCLI),
|
||||
agentcontainers.WithDevcontainerCLI(fDCCLI),
|
||||
agentcontainers.WithSubAgentClient(fSAC),
|
||||
agentcontainers.WithSubAgentURL("test-subagent-url"),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
agentcontainers.WithManifestInfo("test-user", "test-workspace", "test-parent-agent"),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
// Close before api.Close() defer to avoid deadlock after test.
|
||||
defer close(fSAC.createErrC)
|
||||
defer close(fDCCLI.readConfigErrC)
|
||||
|
||||
// Allow agent creation and injection to succeed.
|
||||
testutil.RequireSend(ctx, t, fSAC.createErrC, nil)
|
||||
|
||||
testutil.RequireSend(ctx, t, fDCCLI.readConfigErrC, func(envs []string) error {
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_AGENT_NAME=coder")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_NAME=test-workspace")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_OWNER_NAME=test-user")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_PARENT_AGENT_NAME=test-parent-agent")
|
||||
assert.Contains(t, envs, "CODER_URL=test-subagent-url")
|
||||
assert.Contains(t, envs, "CONTAINER_ID=test-container-id")
|
||||
// First call should not have feature envs.
|
||||
assert.NotContains(t, envs, "FEATURE_CODE_SERVER_OPTION_PORT=9090")
|
||||
assert.NotContains(t, envs, "FEATURE_DOCKER_IN_DOCKER_OPTION_MOBY=false")
|
||||
return nil
|
||||
})
|
||||
|
||||
testutil.RequireSend(ctx, t, fDCCLI.readConfigErrC, func(envs []string) error {
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_AGENT_NAME=coder")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_NAME=test-workspace")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_OWNER_NAME=test-user")
|
||||
assert.Contains(t, envs, "CODER_WORKSPACE_PARENT_AGENT_NAME=test-parent-agent")
|
||||
assert.Contains(t, envs, "CODER_URL=test-subagent-url")
|
||||
assert.Contains(t, envs, "CONTAINER_ID=test-container-id")
|
||||
// Second call should have feature envs from the first config read.
|
||||
assert.Contains(t, envs, "FEATURE_CODE_SERVER_OPTION_PORT=9090")
|
||||
assert.Contains(t, envs, "FEATURE_DOCKER_IN_DOCKER_OPTION_MOBY=false")
|
||||
return nil
|
||||
})
|
||||
|
||||
// Wait until the ticker has been registered.
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
tickerTrap.Close()
|
||||
|
||||
// Verify agent was created successfully
|
||||
require.Len(t, fSAC.created, 1)
|
||||
})
|
||||
|
||||
t.Run("CommandEnv", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -2045,6 +2228,7 @@ func TestAPI(t *testing.T) {
|
||||
agentcontainers.WithExecer(fakeExec),
|
||||
agentcontainers.WithCommandEnv(commandEnv),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
// Call RefreshContainers directly to trigger CommandEnv usage.
|
||||
@@ -2134,12 +2318,16 @@ func TestAPI(t *testing.T) {
|
||||
agentcontainers.WithWatcher(fWatcher),
|
||||
agentcontainers.WithClock(mClock),
|
||||
)
|
||||
api.Start()
|
||||
defer func() {
|
||||
close(fakeSAC.createErrC)
|
||||
close(fakeSAC.deleteErrC)
|
||||
api.Close()
|
||||
}()
|
||||
|
||||
err := api.RefreshContainers(ctx)
|
||||
require.NoError(t, err, "RefreshContainers should not error")
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
@@ -2150,7 +2338,7 @@ func TestAPI(t *testing.T) {
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var response codersdk.WorkspaceAgentListContainersResponse
|
||||
err := json.NewDecoder(rec.Body).Decode(&response)
|
||||
err = json.NewDecoder(rec.Body).Decode(&response)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Empty(t, response.Devcontainers, "ignored devcontainer should not be in response when ignore=true")
|
||||
@@ -2334,6 +2522,7 @@ func TestSubAgentCreationWithNameRetry(t *testing.T) {
|
||||
agentcontainers.WithSubAgentClient(fSAC),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
api.Start()
|
||||
defer api.Close()
|
||||
|
||||
tickerTrap.MustWait(ctx).MustRelease(ctx)
|
||||
@@ -2407,3 +2596,82 @@ func fakeContainer(t *testing.T, mut ...func(*codersdk.WorkspaceAgentContainer))
|
||||
}
|
||||
return ct
|
||||
}
|
||||
|
||||
func TestWithDevcontainersNameGeneration(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Dev Container tests are not supported on Windows")
|
||||
}
|
||||
|
||||
devcontainers := []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: uuid.New(),
|
||||
Name: "original-name",
|
||||
WorkspaceFolder: "/home/coder/foo/project",
|
||||
ConfigPath: "/home/coder/foo/project/.devcontainer/devcontainer.json",
|
||||
},
|
||||
{
|
||||
ID: uuid.New(),
|
||||
Name: "another-name",
|
||||
WorkspaceFolder: "/home/coder/bar/project",
|
||||
ConfigPath: "/home/coder/bar/project/.devcontainer/devcontainer.json",
|
||||
},
|
||||
}
|
||||
|
||||
scripts := []codersdk.WorkspaceAgentScript{
|
||||
{ID: devcontainers[0].ID, LogSourceID: uuid.New()},
|
||||
{ID: devcontainers[1].ID, LogSourceID: uuid.New()},
|
||||
}
|
||||
|
||||
logger := testutil.Logger(t)
|
||||
|
||||
// This should trigger the WithDevcontainers code path where names are generated
|
||||
api := agentcontainers.NewAPI(logger,
|
||||
agentcontainers.WithDevcontainers(devcontainers, scripts),
|
||||
agentcontainers.WithContainerCLI(&fakeContainerCLI{
|
||||
containers: codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{
|
||||
fakeContainer(t, func(c *codersdk.WorkspaceAgentContainer) {
|
||||
c.ID = "some-container-id-1"
|
||||
c.FriendlyName = "container-name-1"
|
||||
c.Labels[agentcontainers.DevcontainerLocalFolderLabel] = "/home/coder/baz/project"
|
||||
c.Labels[agentcontainers.DevcontainerConfigFileLabel] = "/home/coder/baz/project/.devcontainer/devcontainer.json"
|
||||
}),
|
||||
},
|
||||
},
|
||||
}),
|
||||
agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}),
|
||||
agentcontainers.WithSubAgentClient(&fakeSubAgentClient{}),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
)
|
||||
defer api.Close()
|
||||
api.Start()
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Mount("/", api.Routes())
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := api.RefreshContainers(ctx)
|
||||
require.NoError(t, err, "RefreshContainers should not error")
|
||||
|
||||
// Initial request returns the initial data.
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil).
|
||||
WithContext(ctx)
|
||||
rec := httptest.NewRecorder()
|
||||
r.ServeHTTP(rec, req)
|
||||
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
var response codersdk.WorkspaceAgentListContainersResponse
|
||||
err = json.NewDecoder(rec.Body).Decode(&response)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the devcontainers have the expected names.
|
||||
require.Len(t, response.Devcontainers, 3, "should have two devcontainers")
|
||||
assert.NotEqual(t, "original-name", response.Devcontainers[2].Name, "first devcontainer should not keep original name")
|
||||
assert.Equal(t, "project", response.Devcontainers[2].Name, "first devcontainer should use the project folder name")
|
||||
assert.NotEqual(t, "another-name", response.Devcontainers[0].Name, "second devcontainer should not keep original name")
|
||||
assert.Equal(t, "bar-project", response.Devcontainers[0].Name, "second devcontainer should has a collision and uses the folder name with a prefix")
|
||||
assert.Equal(t, "baz-project", response.Devcontainers[1].Name, "third devcontainer should use the folder name with a prefix since it collides with the first two")
|
||||
}
|
||||
|
||||
@@ -2,10 +2,10 @@ package agentcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
@@ -18,37 +18,25 @@ const (
|
||||
// DevcontainerConfigFileLabel is the label that contains the path to
|
||||
// the devcontainer.json configuration file.
|
||||
DevcontainerConfigFileLabel = "devcontainer.config_file"
|
||||
// DevcontainerIsTestRunLabel is set if the devcontainer is part of a test
|
||||
// and should be excluded.
|
||||
DevcontainerIsTestRunLabel = "devcontainer.is_test_run"
|
||||
// The default workspace folder inside the devcontainer.
|
||||
DevcontainerDefaultContainerWorkspaceFolder = "/workspaces"
|
||||
)
|
||||
|
||||
const devcontainerUpScriptTemplate = `
|
||||
if ! which devcontainer > /dev/null 2>&1; then
|
||||
echo "ERROR: Unable to start devcontainer, @devcontainers/cli is not installed or not found in \$PATH." 1>&2
|
||||
echo "Please install @devcontainers/cli by running \"npm install -g @devcontainers/cli\" or by using the \"devcontainers-cli\" Coder module." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
devcontainer up %s
|
||||
`
|
||||
|
||||
// ExtractAndInitializeDevcontainerScripts extracts devcontainer scripts from
|
||||
// the given scripts and devcontainers. The devcontainer scripts are removed
|
||||
// from the returned scripts so that they can be run separately.
|
||||
//
|
||||
// Dev Containers have an inherent dependency on start scripts, since they
|
||||
// initialize the workspace (e.g. git clone, npm install, etc). This is
|
||||
// important if e.g. a Coder module to install @devcontainer/cli is used.
|
||||
func ExtractAndInitializeDevcontainerScripts(
|
||||
func ExtractDevcontainerScripts(
|
||||
devcontainers []codersdk.WorkspaceAgentDevcontainer,
|
||||
scripts []codersdk.WorkspaceAgentScript,
|
||||
) (filteredScripts []codersdk.WorkspaceAgentScript, devcontainerScripts []codersdk.WorkspaceAgentScript) {
|
||||
) (filteredScripts []codersdk.WorkspaceAgentScript, devcontainerScripts map[uuid.UUID]codersdk.WorkspaceAgentScript) {
|
||||
devcontainerScripts = make(map[uuid.UUID]codersdk.WorkspaceAgentScript)
|
||||
ScriptLoop:
|
||||
for _, script := range scripts {
|
||||
for _, dc := range devcontainers {
|
||||
// The devcontainer scripts match the devcontainer ID for
|
||||
// identification.
|
||||
if script.ID == dc.ID {
|
||||
devcontainerScripts = append(devcontainerScripts, devcontainerStartupScript(dc, script))
|
||||
devcontainerScripts[dc.ID] = script
|
||||
continue ScriptLoop
|
||||
}
|
||||
}
|
||||
@@ -59,24 +47,6 @@ ScriptLoop:
|
||||
return filteredScripts, devcontainerScripts
|
||||
}
|
||||
|
||||
func devcontainerStartupScript(dc codersdk.WorkspaceAgentDevcontainer, script codersdk.WorkspaceAgentScript) codersdk.WorkspaceAgentScript {
|
||||
args := []string{
|
||||
"--log-format json",
|
||||
fmt.Sprintf("--workspace-folder %q", dc.WorkspaceFolder),
|
||||
}
|
||||
if dc.ConfigPath != "" {
|
||||
args = append(args, fmt.Sprintf("--config %q", dc.ConfigPath))
|
||||
}
|
||||
cmd := fmt.Sprintf(devcontainerUpScriptTemplate, strings.Join(args, " "))
|
||||
// Force the script to run in /bin/sh, since some shells (e.g. fish)
|
||||
// don't support the script.
|
||||
script.Script = fmt.Sprintf("/bin/sh -c '%s'", cmd)
|
||||
// Disable RunOnStart, scripts have this set so that when devcontainers
|
||||
// have not been enabled, a warning will be surfaced in the agent logs.
|
||||
script.RunOnStart = false
|
||||
return script
|
||||
}
|
||||
|
||||
// ExpandAllDevcontainerPaths expands all devcontainer paths in the given
|
||||
// devcontainers. This is required by the devcontainer CLI, which requires
|
||||
// absolute paths for the workspace folder and config path.
|
||||
|
||||
@@ -1,274 +0,0 @@
|
||||
package agentcontainers_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func TestExtractAndInitializeDevcontainerScripts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
scriptIDs := []uuid.UUID{uuid.New(), uuid.New()}
|
||||
devcontainerIDs := []uuid.UUID{uuid.New(), uuid.New()}
|
||||
|
||||
type args struct {
|
||||
expandPath func(string) (string, error)
|
||||
devcontainers []codersdk.WorkspaceAgentDevcontainer
|
||||
scripts []codersdk.WorkspaceAgentScript
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantFilteredScripts []codersdk.WorkspaceAgentScript
|
||||
wantDevcontainerScripts []codersdk.WorkspaceAgentScript
|
||||
|
||||
skipOnWindowsDueToPathSeparator bool
|
||||
}{
|
||||
{
|
||||
name: "no scripts",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: nil,
|
||||
scripts: nil,
|
||||
},
|
||||
wantFilteredScripts: nil,
|
||||
wantDevcontainerScripts: nil,
|
||||
},
|
||||
{
|
||||
name: "no devcontainers",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: nil,
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0]},
|
||||
{ID: scriptIDs[1]},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0]},
|
||||
{ID: scriptIDs[1]},
|
||||
},
|
||||
wantDevcontainerScripts: nil,
|
||||
},
|
||||
{
|
||||
name: "no scripts match devcontainers",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{ID: devcontainerIDs[0]},
|
||||
{ID: devcontainerIDs[1]},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0]},
|
||||
{ID: scriptIDs[1]},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0]},
|
||||
{ID: scriptIDs[1]},
|
||||
},
|
||||
wantDevcontainerScripts: nil,
|
||||
},
|
||||
{
|
||||
name: "scripts match devcontainers and sets RunOnStart=false",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{ID: devcontainerIDs[0], WorkspaceFolder: "workspace1"},
|
||||
{ID: devcontainerIDs[1], WorkspaceFolder: "workspace2"},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0], RunOnStart: true},
|
||||
{ID: scriptIDs[1], RunOnStart: true},
|
||||
{ID: devcontainerIDs[0], RunOnStart: true},
|
||||
{ID: devcontainerIDs[1], RunOnStart: true},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0], RunOnStart: true},
|
||||
{ID: scriptIDs[1], RunOnStart: true},
|
||||
},
|
||||
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"workspace1\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"workspace2\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "scripts match devcontainers with config path",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
WorkspaceFolder: "workspace1",
|
||||
ConfigPath: "config1",
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
WorkspaceFolder: "workspace2",
|
||||
ConfigPath: "config2",
|
||||
},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: devcontainerIDs[0]},
|
||||
{ID: devcontainerIDs[1]},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
|
||||
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"workspace1\" --config \"workspace1/config1\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"workspace2\" --config \"workspace2/config2\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
},
|
||||
skipOnWindowsDueToPathSeparator: true,
|
||||
},
|
||||
{
|
||||
name: "scripts match devcontainers with expand path",
|
||||
args: args{
|
||||
expandPath: func(s string) (string, error) {
|
||||
return "/home/" + s, nil
|
||||
},
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
WorkspaceFolder: "workspace1",
|
||||
ConfigPath: "config1",
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
WorkspaceFolder: "workspace2",
|
||||
ConfigPath: "config2",
|
||||
},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: devcontainerIDs[0], RunOnStart: true},
|
||||
{ID: devcontainerIDs[1], RunOnStart: true},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
|
||||
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace1\" --config \"/home/workspace1/config1\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace2\" --config \"/home/workspace2/config2\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
},
|
||||
skipOnWindowsDueToPathSeparator: true,
|
||||
},
|
||||
{
|
||||
name: "expand config path when ~",
|
||||
args: args{
|
||||
expandPath: func(s string) (string, error) {
|
||||
s = strings.Replace(s, "~/", "", 1)
|
||||
if filepath.IsAbs(s) {
|
||||
return s, nil
|
||||
}
|
||||
return "/home/" + s, nil
|
||||
},
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
WorkspaceFolder: "workspace1",
|
||||
ConfigPath: "~/config1",
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
WorkspaceFolder: "workspace2",
|
||||
ConfigPath: "/config2",
|
||||
},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: devcontainerIDs[0], RunOnStart: true},
|
||||
{ID: devcontainerIDs[1], RunOnStart: true},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
|
||||
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace1\" --config \"/home/config1\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace2\" --config \"/config2\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
},
|
||||
skipOnWindowsDueToPathSeparator: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if tt.skipOnWindowsDueToPathSeparator && filepath.Separator == '\\' {
|
||||
t.Skip("Skipping test on Windows due to path separator difference.")
|
||||
}
|
||||
|
||||
logger := slogtest.Make(t, nil)
|
||||
if tt.args.expandPath == nil {
|
||||
tt.args.expandPath = func(s string) (string, error) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
gotFilteredScripts, gotDevcontainerScripts := agentcontainers.ExtractAndInitializeDevcontainerScripts(
|
||||
agentcontainers.ExpandAllDevcontainerPaths(logger, tt.args.expandPath, tt.args.devcontainers),
|
||||
tt.args.scripts,
|
||||
)
|
||||
|
||||
if diff := cmp.Diff(tt.wantFilteredScripts, gotFilteredScripts, cmpopts.EquateEmpty()); diff != "" {
|
||||
t.Errorf("ExtractAndInitializeDevcontainerScripts() gotFilteredScripts mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// Preprocess the devcontainer scripts to remove scripting part.
|
||||
for i := range gotDevcontainerScripts {
|
||||
gotDevcontainerScripts[i].Script = textGrep("devcontainer up", gotDevcontainerScripts[i].Script)
|
||||
require.NotEmpty(t, gotDevcontainerScripts[i].Script, "devcontainer up script not found")
|
||||
}
|
||||
if diff := cmp.Diff(tt.wantDevcontainerScripts, gotDevcontainerScripts); diff != "" {
|
||||
t.Errorf("ExtractAndInitializeDevcontainerScripts() gotDevcontainerScripts mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// textGrep returns matching lines from multiline string.
|
||||
func textGrep(want, got string) (filtered string) {
|
||||
var lines []string
|
||||
for _, line := range strings.Split(got, "\n") {
|
||||
if strings.Contains(line, want) {
|
||||
lines = append(lines, line)
|
||||
}
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
@@ -6,7 +6,10 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@@ -26,12 +29,55 @@ type DevcontainerConfig struct {
|
||||
|
||||
type DevcontainerMergedConfiguration struct {
|
||||
Customizations DevcontainerMergedCustomizations `json:"customizations,omitempty"`
|
||||
Features DevcontainerFeatures `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
type DevcontainerMergedCustomizations struct {
|
||||
Coder []CoderCustomization `json:"coder,omitempty"`
|
||||
}
|
||||
|
||||
type DevcontainerFeatures map[string]any
|
||||
|
||||
// OptionsAsEnvs converts the DevcontainerFeatures into a list of
|
||||
// environment variables that can be used to set feature options.
|
||||
// The format is FEATURE_<FEATURE_NAME>_OPTION_<OPTION_NAME>=<value>.
|
||||
// For example, if the feature is:
|
||||
//
|
||||
// "ghcr.io/coder/devcontainer-features/code-server:1": {
|
||||
// "port": 9090,
|
||||
// }
|
||||
//
|
||||
// It will produce:
|
||||
//
|
||||
// FEATURE_CODE_SERVER_OPTION_PORT=9090
|
||||
//
|
||||
// Note that the feature name is derived from the last part of the key,
|
||||
// so "ghcr.io/coder/devcontainer-features/code-server:1" becomes
|
||||
// "CODE_SERVER". The version part (e.g. ":1") is removed, and dashes in
|
||||
// the feature and option names are replaced with underscores.
|
||||
func (f DevcontainerFeatures) OptionsAsEnvs() []string {
|
||||
var env []string
|
||||
for k, v := range f {
|
||||
vv, ok := v.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// Take the last part of the key as the feature name/path.
|
||||
k = k[strings.LastIndex(k, "/")+1:]
|
||||
// Remove ":" and anything following it.
|
||||
if idx := strings.Index(k, ":"); idx != -1 {
|
||||
k = k[:idx]
|
||||
}
|
||||
k = strings.ReplaceAll(k, "-", "_")
|
||||
for k2, v2 := range vv {
|
||||
k2 = strings.ReplaceAll(k2, "-", "_")
|
||||
env = append(env, fmt.Sprintf("FEATURE_%s_OPTION_%s=%s", strings.ToUpper(k), strings.ToUpper(k2), fmt.Sprintf("%v", v2)))
|
||||
}
|
||||
}
|
||||
slices.Sort(env)
|
||||
return env
|
||||
}
|
||||
|
||||
type DevcontainerConfiguration struct {
|
||||
Customizations DevcontainerCustomizations `json:"customizations,omitempty"`
|
||||
}
|
||||
@@ -140,7 +186,7 @@ func WithReadConfigOutput(stdout, stderr io.Writer) DevcontainerCLIReadConfigOpt
|
||||
}
|
||||
|
||||
func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) devcontainerCLIUpConfig {
|
||||
conf := devcontainerCLIUpConfig{}
|
||||
conf := devcontainerCLIUpConfig{stdout: io.Discard, stderr: io.Discard}
|
||||
for _, opt := range opts {
|
||||
if opt != nil {
|
||||
opt(&conf)
|
||||
@@ -150,7 +196,7 @@ func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) devcontainer
|
||||
}
|
||||
|
||||
func applyDevcontainerCLIExecOptions(opts []DevcontainerCLIExecOptions) devcontainerCLIExecConfig {
|
||||
conf := devcontainerCLIExecConfig{}
|
||||
conf := devcontainerCLIExecConfig{stdout: io.Discard, stderr: io.Discard}
|
||||
for _, opt := range opts {
|
||||
if opt != nil {
|
||||
opt(&conf)
|
||||
@@ -160,7 +206,7 @@ func applyDevcontainerCLIExecOptions(opts []DevcontainerCLIExecOptions) devconta
|
||||
}
|
||||
|
||||
func applyDevcontainerCLIReadConfigOptions(opts []DevcontainerCLIReadConfigOptions) devcontainerCLIReadConfigConfig {
|
||||
conf := devcontainerCLIReadConfigConfig{}
|
||||
conf := devcontainerCLIReadConfigConfig{stdout: io.Discard, stderr: io.Discard}
|
||||
for _, opt := range opts {
|
||||
if opt != nil {
|
||||
opt(&conf)
|
||||
@@ -200,17 +246,20 @@ func (d *devcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath st
|
||||
|
||||
// Capture stdout for parsing and stream logs for both default and provided writers.
|
||||
var stdoutBuf bytes.Buffer
|
||||
stdoutWriters := []io.Writer{&stdoutBuf, &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))}}
|
||||
if conf.stdout != nil {
|
||||
stdoutWriters = append(stdoutWriters, conf.stdout)
|
||||
}
|
||||
cmd.Stdout = io.MultiWriter(stdoutWriters...)
|
||||
cmd.Stdout = io.MultiWriter(
|
||||
&stdoutBuf,
|
||||
&devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stdout", true)),
|
||||
writer: conf.stdout,
|
||||
},
|
||||
)
|
||||
// Stream stderr logs and provided writer if any.
|
||||
stderrWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))}}
|
||||
if conf.stderr != nil {
|
||||
stderrWriters = append(stderrWriters, conf.stderr)
|
||||
cmd.Stderr = &devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stderr", true)),
|
||||
writer: conf.stderr,
|
||||
}
|
||||
cmd.Stderr = io.MultiWriter(stderrWriters...)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
_, err2 := parseDevcontainerCLILastLine[devcontainerCLIResult](ctx, logger, stdoutBuf.Bytes())
|
||||
@@ -249,16 +298,16 @@ func (d *devcontainerCLI) Exec(ctx context.Context, workspaceFolder, configPath
|
||||
args = append(args, cmdArgs...)
|
||||
c := d.execer.CommandContext(ctx, "devcontainer", args...)
|
||||
|
||||
stdoutWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))}}
|
||||
if conf.stdout != nil {
|
||||
stdoutWriters = append(stdoutWriters, conf.stdout)
|
||||
}
|
||||
c.Stdout = io.MultiWriter(stdoutWriters...)
|
||||
stderrWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))}}
|
||||
if conf.stderr != nil {
|
||||
stderrWriters = append(stderrWriters, conf.stderr)
|
||||
}
|
||||
c.Stderr = io.MultiWriter(stderrWriters...)
|
||||
c.Stdout = io.MultiWriter(conf.stdout, &devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stdout", true)),
|
||||
writer: io.Discard,
|
||||
})
|
||||
c.Stderr = io.MultiWriter(conf.stderr, &devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stderr", true)),
|
||||
writer: io.Discard,
|
||||
})
|
||||
|
||||
if err := c.Run(); err != nil {
|
||||
return xerrors.Errorf("devcontainer exec failed: %w", err)
|
||||
@@ -283,16 +332,19 @@ func (d *devcontainerCLI) ReadConfig(ctx context.Context, workspaceFolder, confi
|
||||
c.Env = append(c.Env, env...)
|
||||
|
||||
var stdoutBuf bytes.Buffer
|
||||
stdoutWriters := []io.Writer{&stdoutBuf, &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))}}
|
||||
if conf.stdout != nil {
|
||||
stdoutWriters = append(stdoutWriters, conf.stdout)
|
||||
c.Stdout = io.MultiWriter(
|
||||
&stdoutBuf,
|
||||
&devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stdout", true)),
|
||||
writer: conf.stdout,
|
||||
},
|
||||
)
|
||||
c.Stderr = &devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stderr", true)),
|
||||
writer: conf.stderr,
|
||||
}
|
||||
c.Stdout = io.MultiWriter(stdoutWriters...)
|
||||
stderrWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))}}
|
||||
if conf.stderr != nil {
|
||||
stderrWriters = append(stderrWriters, conf.stderr)
|
||||
}
|
||||
c.Stderr = io.MultiWriter(stderrWriters...)
|
||||
|
||||
if err := c.Run(); err != nil {
|
||||
return DevcontainerConfig{}, xerrors.Errorf("devcontainer read-configuration failed: %w", err)
|
||||
@@ -385,6 +437,7 @@ type devcontainerCLIJSONLogLine struct {
|
||||
type devcontainerCLILogWriter struct {
|
||||
ctx context.Context
|
||||
logger slog.Logger
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (l *devcontainerCLILogWriter) Write(p []byte) (n int, err error) {
|
||||
@@ -405,8 +458,20 @@ func (l *devcontainerCLILogWriter) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
if logLine.Level >= 3 {
|
||||
l.logger.Info(l.ctx, "@devcontainer/cli", slog.F("line", string(line)))
|
||||
_, _ = l.writer.Write([]byte(strings.TrimSpace(logLine.Text) + "\n"))
|
||||
continue
|
||||
}
|
||||
// If we've successfully parsed the final log line, it will successfully parse
|
||||
// but will not fill out any of the fields for `logLine`. In this scenario we
|
||||
// assume it is the final log line, unmarshal it as that, and check if the
|
||||
// outcome is a non-empty string.
|
||||
if logLine.Level == 0 {
|
||||
var lastLine devcontainerCLIResult
|
||||
if err := json.Unmarshal(line, &lastLine); err == nil && lastLine.Outcome != "" {
|
||||
_, _ = l.writer.Write(line)
|
||||
_, _ = l.writer.Write([]byte{'\n'})
|
||||
}
|
||||
}
|
||||
l.logger.Debug(l.ctx, "@devcontainer/cli", slog.F("line", string(line)))
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
|
||||
@@ -3,6 +3,7 @@ package agentcontainers_test
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -10,9 +11,11 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -341,6 +344,10 @@ func TestDevcontainerCLI_WithOutput(t *testing.T) {
|
||||
t.Run("Up", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows uses CRLF line endings, golden file is LF")
|
||||
}
|
||||
|
||||
// Buffers to capture stdout and stderr.
|
||||
outBuf := &bytes.Buffer{}
|
||||
errBuf := &bytes.Buffer{}
|
||||
@@ -363,7 +370,7 @@ func TestDevcontainerCLI_WithOutput(t *testing.T) {
|
||||
require.NotEmpty(t, containerID, "expected non-empty container ID")
|
||||
|
||||
// Read expected log content.
|
||||
expLog, err := os.ReadFile(filepath.Join("testdata", "devcontainercli", "parse", "up.log"))
|
||||
expLog, err := os.ReadFile(filepath.Join("testdata", "devcontainercli", "parse", "up.golden"))
|
||||
require.NoError(t, err, "reading expected log file")
|
||||
|
||||
// Verify stdout buffer contains the CLI logs and stderr is empty.
|
||||
@@ -637,3 +644,107 @@ func removeDevcontainerByID(t *testing.T, pool *dockertest.Pool, id string) {
|
||||
assert.NoError(t, err, "remove container failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDevcontainerFeatures_OptionsAsEnvs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
realConfigJSON := `{
|
||||
"mergedConfiguration": {
|
||||
"features": {
|
||||
"./code-server": {
|
||||
"port": 9090
|
||||
},
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
||||
"moby": "false"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
var realConfig agentcontainers.DevcontainerConfig
|
||||
err := json.Unmarshal([]byte(realConfigJSON), &realConfig)
|
||||
require.NoError(t, err, "unmarshal JSON payload")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
features agentcontainers.DevcontainerFeatures
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "code-server feature",
|
||||
features: agentcontainers.DevcontainerFeatures{
|
||||
"./code-server": map[string]any{
|
||||
"port": 9090,
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"FEATURE_CODE_SERVER_OPTION_PORT=9090",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "docker-in-docker feature",
|
||||
features: agentcontainers.DevcontainerFeatures{
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": map[string]any{
|
||||
"moby": "false",
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"FEATURE_DOCKER_IN_DOCKER_OPTION_MOBY=false",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple features with multiple options",
|
||||
features: agentcontainers.DevcontainerFeatures{
|
||||
"./code-server": map[string]any{
|
||||
"port": 9090,
|
||||
"password": "secret",
|
||||
},
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": map[string]any{
|
||||
"moby": "false",
|
||||
"docker-dash-compose-version": "v2",
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"FEATURE_CODE_SERVER_OPTION_PASSWORD=secret",
|
||||
"FEATURE_CODE_SERVER_OPTION_PORT=9090",
|
||||
"FEATURE_DOCKER_IN_DOCKER_OPTION_DOCKER_DASH_COMPOSE_VERSION=v2",
|
||||
"FEATURE_DOCKER_IN_DOCKER_OPTION_MOBY=false",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature with non-map value (should be ignored)",
|
||||
features: agentcontainers.DevcontainerFeatures{
|
||||
"./code-server": map[string]any{
|
||||
"port": 9090,
|
||||
},
|
||||
"./invalid-feature": "not-a-map",
|
||||
},
|
||||
want: []string{
|
||||
"FEATURE_CODE_SERVER_OPTION_PORT=9090",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "real config example",
|
||||
features: realConfig.MergedConfiguration.Features,
|
||||
want: []string{
|
||||
"FEATURE_CODE_SERVER_OPTION_PORT=9090",
|
||||
"FEATURE_DOCKER_IN_DOCKER_OPTION_MOBY=false",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty features",
|
||||
features: agentcontainers.DevcontainerFeatures{},
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := tt.features.OptionsAsEnvs()
|
||||
if diff := cmp.Diff(tt.want, got); diff != "" {
|
||||
require.Failf(t, "OptionsAsEnvs() mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
@devcontainers/cli 0.75.0. Node.js v23.9.0. darwin 24.4.0 arm64.
|
||||
Resolving Feature dependencies for 'ghcr.io/devcontainers/features/docker-in-docker:2'...
|
||||
Soft-dependency 'ghcr.io/devcontainers/features/common-utils' is not required. Removing from installation order...
|
||||
Files to omit: ''
|
||||
Run: docker buildx build --load --build-context dev_containers_feature_content_source=/var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193 --build-arg _DEV_CONTAINERS_BASE_IMAGE=mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye --build-arg _DEV_CONTAINERS_IMAGE_USER=root --build-arg _DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp --target dev_containers_target_stage -f /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193/Dockerfile.extended -t vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/empty-folder
|
||||
#0 building with "orbstack" instance using docker driver
|
||||
|
||||
#1 [internal] load build definition from Dockerfile.extended
|
||||
#1 transferring dockerfile: 3.09kB done
|
||||
#1 DONE 0.0s
|
||||
|
||||
#2 resolve image config for docker-image://docker.io/docker/dockerfile:1.4
|
||||
#2 DONE 1.3s
|
||||
#3 docker-image://docker.io/docker/dockerfile:1.4@sha256:9ba7531bd80fb0a858632727cf7a112fbfd19b17e94c4e84ced81e24ef1a0dbc
|
||||
#3 CACHED
|
||||
|
||||
#4 [internal] load .dockerignore
|
||||
#4 transferring context: 2B done
|
||||
#4 DONE 0.0s
|
||||
|
||||
#5 [internal] load metadata for mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye
|
||||
#5 DONE 0.0s
|
||||
|
||||
#6 [context dev_containers_feature_content_source] load .dockerignore
|
||||
#6 transferring dev_containers_feature_content_source: 2B done
|
||||
#6 DONE 0.0s
|
||||
|
||||
#7 [dev_containers_feature_content_normalize 1/3] FROM mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye
|
||||
#7 DONE 0.0s
|
||||
|
||||
#8 [context dev_containers_feature_content_source] load from client
|
||||
#8 transferring dev_containers_feature_content_source: 82.11kB 0.0s done
|
||||
#8 DONE 0.0s
|
||||
|
||||
#9 [dev_containers_feature_content_normalize 2/3] COPY --from=dev_containers_feature_content_source devcontainer-features.builtin.env /tmp/build-features/
|
||||
#9 CACHED
|
||||
|
||||
#10 [dev_containers_target_stage 2/5] RUN mkdir -p /tmp/dev-container-features
|
||||
#10 CACHED
|
||||
|
||||
#11 [dev_containers_target_stage 3/5] COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
|
||||
#11 CACHED
|
||||
|
||||
#12 [dev_containers_target_stage 4/5] RUN echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
|
||||
#12 CACHED
|
||||
|
||||
#13 [dev_containers_feature_content_normalize 3/3] RUN chmod -R 0755 /tmp/build-features/
|
||||
#13 CACHED
|
||||
|
||||
#14 [dev_containers_target_stage 5/5] RUN --mount=type=bind,from=dev_containers_feature_content_source,source=docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features && chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 && cd /tmp/dev-container-features/docker-in-docker_0 && chmod +x ./devcontainer-features-install.sh && ./devcontainer-features-install.sh && rm -rf /tmp/dev-container-features/docker-in-docker_0
|
||||
#14 CACHED
|
||||
|
||||
#15 exporting to image
|
||||
#15 exporting layers done
|
||||
#15 writing image sha256:275dc193c905d448ef3945e3fc86220cc315fe0cb41013988d6ff9f8d6ef2357 done
|
||||
#15 naming to docker.io/library/vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features done
|
||||
#15 DONE 0.0s
|
||||
Run: docker buildx build --load --build-context dev_containers_feature_content_source=/var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193 --build-arg _DEV_CONTAINERS_BASE_IMAGE=mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye --build-arg _DEV_CONTAINERS_IMAGE_USER=root --build-arg _DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp --target dev_containers_target_stage -f /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193/Dockerfile.extended -t vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/empty-folder
|
||||
Run: docker run --sig-proxy=false -a STDOUT -a STDERR --mount type=bind,source=/code/devcontainers-template-starter,target=/workspaces/devcontainers-template-starter,consistency=cached --mount type=volume,src=dind-var-lib-docker-0pctifo8bbg3pd06g3j5s9ae8j7lp5qfcd67m25kuahurel7v7jm,dst=/var/lib/docker -l devcontainer.local_folder=/code/devcontainers-template-starter -l devcontainer.config_file=/code/devcontainers-template-starter/.devcontainer/devcontainer.json --privileged --entrypoint /bin/sh vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features -c echo Container started
|
||||
Container started
|
||||
Not setting dockerd DNS manually.
|
||||
[1mRunning the postCreateCommand from devcontainer.json...[0m
|
||||
added 1 package in 784ms
|
||||
{"outcome":"success","containerId":"bc72db8d0c4c4e941bd9ffc341aee64a18d3397fd45b87cd93d4746150967ba8","remoteUser":"node","remoteWorkspaceFolder":"/workspaces/devcontainers-template-starter"}
|
||||
@@ -79,21 +79,6 @@ func New(opts Options) *Runner {
|
||||
|
||||
type ScriptCompletedFunc func(context.Context, *proto.WorkspaceAgentScriptCompletedRequest) (*proto.WorkspaceAgentScriptCompletedResponse, error)
|
||||
|
||||
type runnerScript struct {
|
||||
runOnPostStart bool
|
||||
codersdk.WorkspaceAgentScript
|
||||
}
|
||||
|
||||
func toRunnerScript(scripts ...codersdk.WorkspaceAgentScript) []runnerScript {
|
||||
var rs []runnerScript
|
||||
for _, s := range scripts {
|
||||
rs = append(rs, runnerScript{
|
||||
WorkspaceAgentScript: s,
|
||||
})
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
type Runner struct {
|
||||
Options
|
||||
|
||||
@@ -103,7 +88,7 @@ type Runner struct {
|
||||
closed chan struct{}
|
||||
closeMutex sync.Mutex
|
||||
cron *cron.Cron
|
||||
scripts []runnerScript
|
||||
scripts []codersdk.WorkspaceAgentScript
|
||||
dataDir string
|
||||
scriptCompleted ScriptCompletedFunc
|
||||
|
||||
@@ -138,19 +123,6 @@ func (r *Runner) RegisterMetrics(reg prometheus.Registerer) {
|
||||
// InitOption describes an option for the runner initialization.
|
||||
type InitOption func(*Runner)
|
||||
|
||||
// WithPostStartScripts adds scripts that should be run after the workspace
|
||||
// start scripts but before the workspace is marked as started.
|
||||
func WithPostStartScripts(scripts ...codersdk.WorkspaceAgentScript) InitOption {
|
||||
return func(r *Runner) {
|
||||
for _, s := range scripts {
|
||||
r.scripts = append(r.scripts, runnerScript{
|
||||
runOnPostStart: true,
|
||||
WorkspaceAgentScript: s,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes the runner with the provided scripts.
|
||||
// It also schedules any scripts that have a schedule.
|
||||
// This function must be called before Execute.
|
||||
@@ -161,7 +133,7 @@ func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript, scriptCompleted S
|
||||
return xerrors.New("init: already initialized")
|
||||
}
|
||||
r.initialized = true
|
||||
r.scripts = toRunnerScript(scripts...)
|
||||
r.scripts = scripts
|
||||
r.scriptCompleted = scriptCompleted
|
||||
for _, opt := range opts {
|
||||
opt(r)
|
||||
@@ -179,7 +151,7 @@ func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript, scriptCompleted S
|
||||
}
|
||||
script := script
|
||||
_, err := r.cron.AddFunc(script.Cron, func() {
|
||||
err := r.trackRun(r.cronCtx, script.WorkspaceAgentScript, ExecuteCronScripts)
|
||||
err := r.trackRun(r.cronCtx, script, ExecuteCronScripts)
|
||||
if err != nil {
|
||||
r.Logger.Warn(context.Background(), "run agent script on schedule", slog.Error(err))
|
||||
}
|
||||
@@ -223,7 +195,6 @@ type ExecuteOption int
|
||||
const (
|
||||
ExecuteAllScripts ExecuteOption = iota
|
||||
ExecuteStartScripts
|
||||
ExecutePostStartScripts
|
||||
ExecuteStopScripts
|
||||
ExecuteCronScripts
|
||||
)
|
||||
@@ -246,7 +217,6 @@ func (r *Runner) Execute(ctx context.Context, option ExecuteOption) error {
|
||||
for _, script := range r.scripts {
|
||||
runScript := (option == ExecuteStartScripts && script.RunOnStart) ||
|
||||
(option == ExecuteStopScripts && script.RunOnStop) ||
|
||||
(option == ExecutePostStartScripts && script.runOnPostStart) ||
|
||||
(option == ExecuteCronScripts && script.Cron != "") ||
|
||||
option == ExecuteAllScripts
|
||||
|
||||
@@ -256,7 +226,7 @@ func (r *Runner) Execute(ctx context.Context, option ExecuteOption) error {
|
||||
|
||||
script := script
|
||||
eg.Go(func() error {
|
||||
err := r.trackRun(ctx, script.WorkspaceAgentScript, option)
|
||||
err := r.trackRun(ctx, script, option)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("run agent script %q: %w", script.LogSourceID, err)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -177,11 +176,6 @@ func TestExecuteOptions(t *testing.T) {
|
||||
Script: "echo stop",
|
||||
RunOnStop: true,
|
||||
}
|
||||
postStartScript := codersdk.WorkspaceAgentScript{
|
||||
ID: uuid.New(),
|
||||
LogSourceID: uuid.New(),
|
||||
Script: "echo poststart",
|
||||
}
|
||||
regularScript := codersdk.WorkspaceAgentScript{
|
||||
ID: uuid.New(),
|
||||
LogSourceID: uuid.New(),
|
||||
@@ -193,10 +187,9 @@ func TestExecuteOptions(t *testing.T) {
|
||||
stopScript,
|
||||
regularScript,
|
||||
}
|
||||
allScripts := append(slices.Clone(scripts), postStartScript)
|
||||
|
||||
scriptByID := func(t *testing.T, id uuid.UUID) codersdk.WorkspaceAgentScript {
|
||||
for _, script := range allScripts {
|
||||
for _, script := range scripts {
|
||||
if script.ID == id {
|
||||
return script
|
||||
}
|
||||
@@ -206,10 +199,9 @@ func TestExecuteOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
wantOutput := map[uuid.UUID]string{
|
||||
startScript.ID: "start",
|
||||
stopScript.ID: "stop",
|
||||
postStartScript.ID: "poststart",
|
||||
regularScript.ID: "regular",
|
||||
startScript.ID: "start",
|
||||
stopScript.ID: "stop",
|
||||
regularScript.ID: "regular",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@@ -220,18 +212,13 @@ func TestExecuteOptions(t *testing.T) {
|
||||
{
|
||||
name: "ExecuteAllScripts",
|
||||
option: agentscripts.ExecuteAllScripts,
|
||||
wantRun: []uuid.UUID{startScript.ID, stopScript.ID, regularScript.ID, postStartScript.ID},
|
||||
wantRun: []uuid.UUID{startScript.ID, stopScript.ID, regularScript.ID},
|
||||
},
|
||||
{
|
||||
name: "ExecuteStartScripts",
|
||||
option: agentscripts.ExecuteStartScripts,
|
||||
wantRun: []uuid.UUID{startScript.ID},
|
||||
},
|
||||
{
|
||||
name: "ExecutePostStartScripts",
|
||||
option: agentscripts.ExecutePostStartScripts,
|
||||
wantRun: []uuid.UUID{postStartScript.ID},
|
||||
},
|
||||
{
|
||||
name: "ExecuteStopScripts",
|
||||
option: agentscripts.ExecuteStopScripts,
|
||||
@@ -260,7 +247,6 @@ func TestExecuteOptions(t *testing.T) {
|
||||
err := runner.Init(
|
||||
scripts,
|
||||
aAPI.ScriptCompleted,
|
||||
agentscripts.WithPostStartScripts(postStartScript),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -274,7 +260,7 @@ func TestExecuteOptions(t *testing.T) {
|
||||
"script %s should have run when using filter %s", scriptByID(t, id).Script, tc.name)
|
||||
}
|
||||
|
||||
for _, script := range allScripts {
|
||||
for _, script := range scripts {
|
||||
if _, ok := gotRun[script.ID]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -125,6 +125,7 @@ type Server struct {
|
||||
listeners map[net.Listener]struct{}
|
||||
conns map[net.Conn]struct{}
|
||||
sessions map[ssh.Session]struct{}
|
||||
processes map[*os.Process]struct{}
|
||||
closing chan struct{}
|
||||
// Wait for goroutines to exit, waited without
|
||||
// a lock on mu but protected by closing.
|
||||
@@ -183,6 +184,7 @@ func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prom
|
||||
fs: fs,
|
||||
conns: make(map[net.Conn]struct{}),
|
||||
sessions: make(map[ssh.Session]struct{}),
|
||||
processes: make(map[*os.Process]struct{}),
|
||||
logger: logger,
|
||||
|
||||
config: config,
|
||||
@@ -587,7 +589,10 @@ func (s *Server) startNonPTYSession(logger slog.Logger, session ssh.Session, mag
|
||||
// otherwise context cancellation will not propagate properly
|
||||
// and SSH server close may be delayed.
|
||||
cmd.SysProcAttr = cmdSysProcAttr()
|
||||
cmd.Cancel = cmdCancel(session.Context(), logger, cmd)
|
||||
|
||||
// to match OpenSSH, we don't actually tear a non-TTY command down, even if the session ends.
|
||||
// c.f. https://github.com/coder/coder/issues/18519#issuecomment-3019118271
|
||||
cmd.Cancel = nil
|
||||
|
||||
cmd.Stdout = session
|
||||
cmd.Stderr = session.Stderr()
|
||||
@@ -610,6 +615,16 @@ func (s *Server) startNonPTYSession(logger slog.Logger, session ssh.Session, mag
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "start_command").Add(1)
|
||||
return xerrors.Errorf("start: %w", err)
|
||||
}
|
||||
|
||||
// Since we don't cancel the process when the session stops, we still need to tear it down if we are closing. So
|
||||
// track it here.
|
||||
if !s.trackProcess(cmd.Process, true) {
|
||||
// must be closing
|
||||
err = cmdCancel(logger, cmd.Process)
|
||||
return xerrors.Errorf("failed to track process: %w", err)
|
||||
}
|
||||
defer s.trackProcess(cmd.Process, false)
|
||||
|
||||
sigs := make(chan ssh.Signal, 1)
|
||||
session.Signals(sigs)
|
||||
defer func() {
|
||||
@@ -1070,6 +1085,27 @@ func (s *Server) trackSession(ss ssh.Session, add bool) (ok bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// trackCommand registers the process with the server. If the server is
|
||||
// closing, the process is not registered and should be closed.
|
||||
//
|
||||
//nolint:revive
|
||||
func (s *Server) trackProcess(p *os.Process, add bool) (ok bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if add {
|
||||
if s.closing != nil {
|
||||
// Server closed.
|
||||
return false
|
||||
}
|
||||
s.wg.Add(1)
|
||||
s.processes[p] = struct{}{}
|
||||
return true
|
||||
}
|
||||
s.wg.Done()
|
||||
delete(s.processes, p)
|
||||
return true
|
||||
}
|
||||
|
||||
// Close the server and all active connections. Server can be re-used
|
||||
// after Close is done.
|
||||
func (s *Server) Close() error {
|
||||
@@ -1109,6 +1145,10 @@ func (s *Server) Close() error {
|
||||
_ = c.Close()
|
||||
}
|
||||
|
||||
for p := range s.processes {
|
||||
_ = cmdCancel(s.logger, p)
|
||||
}
|
||||
|
||||
s.logger.Debug(ctx, "closing SSH server")
|
||||
err := s.srv.Close()
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ package agentssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"cdr.dev/slog"
|
||||
@@ -16,9 +16,7 @@ func cmdSysProcAttr() *syscall.SysProcAttr {
|
||||
}
|
||||
}
|
||||
|
||||
func cmdCancel(ctx context.Context, logger slog.Logger, cmd *exec.Cmd) func() error {
|
||||
return func() error {
|
||||
logger.Debug(ctx, "cmdCancel: sending SIGHUP to process and children", slog.F("pid", cmd.Process.Pid))
|
||||
return syscall.Kill(-cmd.Process.Pid, syscall.SIGHUP)
|
||||
}
|
||||
func cmdCancel(logger slog.Logger, p *os.Process) error {
|
||||
logger.Debug(context.Background(), "cmdCancel: sending SIGHUP to process and children", slog.F("pid", p.Pid))
|
||||
return syscall.Kill(-p.Pid, syscall.SIGHUP)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package agentssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"cdr.dev/slog"
|
||||
@@ -12,14 +12,12 @@ func cmdSysProcAttr() *syscall.SysProcAttr {
|
||||
return &syscall.SysProcAttr{}
|
||||
}
|
||||
|
||||
func cmdCancel(ctx context.Context, logger slog.Logger, cmd *exec.Cmd) func() error {
|
||||
return func() error {
|
||||
logger.Debug(ctx, "cmdCancel: killing process", slog.F("pid", cmd.Process.Pid))
|
||||
// Windows doesn't support sending signals to process groups, so we
|
||||
// have to kill the process directly. In the future, we may want to
|
||||
// implement a more sophisticated solution for process groups on
|
||||
// Windows, but for now, this is a simple way to ensure that the
|
||||
// process is terminated when the context is cancelled.
|
||||
return cmd.Process.Kill()
|
||||
}
|
||||
func cmdCancel(logger slog.Logger, p *os.Process) error {
|
||||
logger.Debug(context.Background(), "cmdCancel: killing process", slog.F("pid", p.Pid))
|
||||
// Windows doesn't support sending signals to process groups, so we
|
||||
// have to kill the process directly. In the future, we may want to
|
||||
// implement a more sophisticated solution for process groups on
|
||||
// Windows, but for now, this is a simple way to ensure that the
|
||||
// process is terminated when the context is cancelled.
|
||||
return p.Kill()
|
||||
}
|
||||
|
||||
+4
-40
@@ -7,15 +7,11 @@ import (
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func (a *agent) apiHandler(aAPI proto.DRPCAgentClient26) (http.Handler, func() error) {
|
||||
func (a *agent) apiHandler() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Get("/", func(rw http.ResponseWriter, r *http.Request) {
|
||||
httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{
|
||||
@@ -40,35 +36,8 @@ func (a *agent) apiHandler(aAPI proto.DRPCAgentClient26) (http.Handler, func() e
|
||||
cacheDuration: cacheDuration,
|
||||
}
|
||||
|
||||
if a.devcontainers {
|
||||
containerAPIOpts := []agentcontainers.Option{
|
||||
agentcontainers.WithExecer(a.execer),
|
||||
agentcontainers.WithCommandEnv(a.sshServer.CommandEnv),
|
||||
agentcontainers.WithScriptLogger(func(logSourceID uuid.UUID) agentcontainers.ScriptLogger {
|
||||
return a.logSender.GetScriptLogger(logSourceID)
|
||||
}),
|
||||
agentcontainers.WithSubAgentClient(agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI)),
|
||||
}
|
||||
manifest := a.manifest.Load()
|
||||
if manifest != nil {
|
||||
containerAPIOpts = append(containerAPIOpts,
|
||||
agentcontainers.WithManifestInfo(manifest.OwnerName, manifest.WorkspaceName),
|
||||
)
|
||||
|
||||
if len(manifest.Devcontainers) > 0 {
|
||||
containerAPIOpts = append(
|
||||
containerAPIOpts,
|
||||
agentcontainers.WithDevcontainers(manifest.Devcontainers, manifest.Scripts),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Append after to allow the agent options to override the default options.
|
||||
containerAPIOpts = append(containerAPIOpts, a.containerAPIOptions...)
|
||||
|
||||
containerAPI := agentcontainers.NewAPI(a.logger.Named("containers"), containerAPIOpts...)
|
||||
r.Mount("/api/v0/containers", containerAPI.Routes())
|
||||
a.containerAPI.Store(containerAPI)
|
||||
if a.containerAPI != nil {
|
||||
r.Mount("/api/v0/containers", a.containerAPI.Routes())
|
||||
} else {
|
||||
r.HandleFunc("/api/v0/containers", func(w http.ResponseWriter, r *http.Request) {
|
||||
httpapi.Write(r.Context(), w, http.StatusForbidden, codersdk.Response{
|
||||
@@ -89,12 +58,7 @@ func (a *agent) apiHandler(aAPI proto.DRPCAgentClient26) (http.Handler, func() e
|
||||
r.Get("/debug/manifest", a.HandleHTTPDebugManifest)
|
||||
r.Get("/debug/prometheus", promHandler.ServeHTTP)
|
||||
|
||||
return r, func() error {
|
||||
if containerAPI := a.containerAPI.Load(); containerAPI != nil {
|
||||
return containerAPI.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
type listeningPortsHandler struct {
|
||||
|
||||
+60
-26
@@ -362,11 +362,19 @@ func (*RootCmd) mcpConfigureCursor() *serpent.Command {
|
||||
}
|
||||
|
||||
type taskReport struct {
|
||||
link string
|
||||
messageID int64
|
||||
// link is optional.
|
||||
link string
|
||||
// messageID must be set if this update is from a *user* message. A user
|
||||
// message only happens when interacting via the AI AgentAPI (as opposed to
|
||||
// interacting with the terminal directly).
|
||||
messageID *int64
|
||||
// selfReported must be set if the update is directly from the AI agent
|
||||
// (as opposed to the screen watcher).
|
||||
selfReported bool
|
||||
state codersdk.WorkspaceAppStatusState
|
||||
summary string
|
||||
// state must always be set.
|
||||
state codersdk.WorkspaceAppStatusState
|
||||
// summary is optional.
|
||||
summary string
|
||||
}
|
||||
|
||||
type mcpServer struct {
|
||||
@@ -388,31 +396,48 @@ func (r *RootCmd) mcpServer() *serpent.Command {
|
||||
return &serpent.Command{
|
||||
Use: "server",
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
// lastUserMessageID is the ID of the last *user* message that we saw. A
|
||||
// user message only happens when interacting via the AI AgentAPI (as
|
||||
// opposed to interacting with the terminal directly).
|
||||
var lastUserMessageID int64
|
||||
var lastReport taskReport
|
||||
// Create a queue that skips duplicates and preserves summaries.
|
||||
queue := cliutil.NewQueue[taskReport](512).WithPredicate(func(report taskReport) (taskReport, bool) {
|
||||
// Use "working" status if this is a new user message. If this is not a
|
||||
// new user message, and the status is "working" and not self-reported
|
||||
// (meaning it came from the screen watcher), then it means one of two
|
||||
// things:
|
||||
// 1. The AI agent is still working, so there is nothing to update.
|
||||
// 2. The AI agent stopped working, then the user has interacted with
|
||||
// the terminal directly. For now, we are ignoring these updates.
|
||||
// This risks missing cases where the user manually submits a new
|
||||
// prompt and the AI agent becomes active and does not update itself,
|
||||
// but it avoids spamming useless status updates as the user is
|
||||
// typing, so the tradeoff is worth it. In the future, if we can
|
||||
// reliably distinguish between user and AI agent activity, we can
|
||||
// change this.
|
||||
if report.messageID > lastUserMessageID {
|
||||
report.state = codersdk.WorkspaceAppStatusStateWorking
|
||||
} else if report.state == codersdk.WorkspaceAppStatusStateWorking && !report.selfReported {
|
||||
// Avoid queuing empty statuses (this would probably indicate a
|
||||
// developer error)
|
||||
if report.state == "" {
|
||||
return report, false
|
||||
}
|
||||
// If this is a user message, discard if it is not new.
|
||||
if report.messageID != nil && lastReport.messageID != nil &&
|
||||
*lastReport.messageID >= *report.messageID {
|
||||
return report, false
|
||||
}
|
||||
// If this is not a user message, and the status is "working" and not
|
||||
// self-reported (meaning it came from the screen watcher), then it
|
||||
// means one of two things:
|
||||
//
|
||||
// 1. The AI agent is not working; the user is interacting with the
|
||||
// terminal directly.
|
||||
// 2. The AI agent is working.
|
||||
//
|
||||
// At the moment, we have no way to tell the difference between these
|
||||
// two states. In the future, if we can reliably distinguish between
|
||||
// user and AI agent activity, we can change this.
|
||||
//
|
||||
// If this is our first update, we assume it is the AI agent working and
|
||||
// accept the update.
|
||||
//
|
||||
// Otherwise we discard the update. This risks missing cases where the
|
||||
// user manually submits a new prompt and the AI agent becomes active
|
||||
// (and does not update itself), but it avoids spamming useless status
|
||||
// updates as the user is typing, so the tradeoff is worth it.
|
||||
if report.messageID == nil &&
|
||||
report.state == codersdk.WorkspaceAppStatusStateWorking &&
|
||||
!report.selfReported && lastReport.state != "" {
|
||||
return report, false
|
||||
}
|
||||
// Keep track of the last message ID so we can tell when a message is
|
||||
// new or if it has been re-emitted.
|
||||
if report.messageID == nil {
|
||||
report.messageID = lastReport.messageID
|
||||
}
|
||||
// Preserve previous message and URI if there was no message.
|
||||
if report.summary == "" {
|
||||
report.summary = lastReport.summary
|
||||
@@ -600,7 +625,8 @@ func (s *mcpServer) startWatcher(ctx context.Context, inv *serpent.Invocation) {
|
||||
case agentapi.EventMessageUpdate:
|
||||
if ev.Role == agentapi.RoleUser {
|
||||
err := s.queue.Push(taskReport{
|
||||
messageID: ev.Id,
|
||||
messageID: &ev.Id,
|
||||
state: codersdk.WorkspaceAppStatusStateWorking,
|
||||
})
|
||||
if err != nil {
|
||||
cliui.Warnf(inv.Stderr, "Failed to queue update: %s", err)
|
||||
@@ -650,10 +676,18 @@ func (s *mcpServer) startServer(ctx context.Context, inv *serpent.Invocation, in
|
||||
// Add tool dependencies.
|
||||
toolOpts := []func(*toolsdk.Deps){
|
||||
toolsdk.WithTaskReporter(func(args toolsdk.ReportTaskArgs) error {
|
||||
// The agent does not reliably report its status correctly. If AgentAPI
|
||||
// is enabled, we will always set the status to "working" when we get an
|
||||
// MCP message, and rely on the screen watcher to eventually catch the
|
||||
// idle state.
|
||||
state := codersdk.WorkspaceAppStatusStateWorking
|
||||
if s.aiAgentAPIClient == nil {
|
||||
state = codersdk.WorkspaceAppStatusState(args.State)
|
||||
}
|
||||
return s.queue.Push(taskReport{
|
||||
link: args.Link,
|
||||
selfReported: true,
|
||||
state: codersdk.WorkspaceAppStatusState(args.State),
|
||||
state: state,
|
||||
summary: args.Summary,
|
||||
})
|
||||
}),
|
||||
|
||||
+332
-201
@@ -763,220 +763,351 @@ func TestExpMcpReporter(t *testing.T) {
|
||||
<-cmdDone
|
||||
})
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
makeStatusEvent := func(status agentapi.AgentStatus) *codersdk.ServerSentEvent {
|
||||
return &codersdk.ServerSentEvent{
|
||||
Type: ServerSentEventTypeStatusChange,
|
||||
Data: agentapi.EventStatusChange{
|
||||
Status: status,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create a test deployment and workspace.
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
client, user2 := coderdtest.CreateAnotherUser(t, client, user.OrganizationID)
|
||||
makeMessageEvent := func(id int64, role agentapi.ConversationRole) *codersdk.ServerSentEvent {
|
||||
return &codersdk.ServerSentEvent{
|
||||
Type: ServerSentEventTypeMessageUpdate,
|
||||
Data: agentapi.EventMessageUpdate{
|
||||
Id: id,
|
||||
Role: role,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
r := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user2.ID,
|
||||
}).WithAgent(func(a []*proto.Agent) []*proto.Agent {
|
||||
a[0].Apps = []*proto.App{
|
||||
type test struct {
|
||||
// event simulates an event from the screen watcher.
|
||||
event *codersdk.ServerSentEvent
|
||||
// state, summary, and uri simulate a tool call from the AI agent.
|
||||
state codersdk.WorkspaceAppStatusState
|
||||
summary string
|
||||
uri string
|
||||
expected *codersdk.WorkspaceAppStatus
|
||||
}
|
||||
|
||||
runs := []struct {
|
||||
name string
|
||||
tests []test
|
||||
disableAgentAPI bool
|
||||
}{
|
||||
// In this run the AI agent starts with a state change but forgets to update
|
||||
// that it finished.
|
||||
{
|
||||
name: "Active",
|
||||
tests: []test{
|
||||
// First the AI agent updates with a state change.
|
||||
{
|
||||
Slug: "vscode",
|
||||
state: codersdk.WorkspaceAppStatusStateWorking,
|
||||
summary: "doing work",
|
||||
uri: "https://dev.coder.com",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "doing work",
|
||||
URI: "https://dev.coder.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
return a
|
||||
}).Do()
|
||||
|
||||
makeStatusEvent := func(status agentapi.AgentStatus) *codersdk.ServerSentEvent {
|
||||
return &codersdk.ServerSentEvent{
|
||||
Type: ServerSentEventTypeStatusChange,
|
||||
Data: agentapi.EventStatusChange{
|
||||
Status: status,
|
||||
// Terminal goes quiet but the AI agent forgot the update, and it is
|
||||
// caught by the screen watcher. Message and URI are preserved.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "doing work",
|
||||
URI: "https://dev.coder.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
makeMessageEvent := func(id int64, role agentapi.ConversationRole) *codersdk.ServerSentEvent {
|
||||
return &codersdk.ServerSentEvent{
|
||||
Type: ServerSentEventTypeMessageUpdate,
|
||||
Data: agentapi.EventMessageUpdate{
|
||||
Id: id,
|
||||
Role: role,
|
||||
// A stable update now from the watcher should be discarded, as it is a
|
||||
// duplicate.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
},
|
||||
}
|
||||
}
|
||||
// Terminal becomes active again according to the screen watcher, but no
|
||||
// new user message. This could be the AI agent being active again, but
|
||||
// it could also be the user messing around. We will prefer not updating
|
||||
// the status so the "working" update here should be skipped.
|
||||
//
|
||||
// TODO: How do we test the no-op updates? This update is skipped
|
||||
// because of the logic mentioned above, but how do we prove this update
|
||||
// was skipped because of that and not that the next update was skipped
|
||||
// because it is a duplicate state? We could mock the queue?
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusRunning),
|
||||
},
|
||||
// Agent messages are ignored.
|
||||
{
|
||||
event: makeMessageEvent(0, agentapi.RoleAgent),
|
||||
},
|
||||
// The watcher reports the screen is active again...
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusRunning),
|
||||
},
|
||||
// ... but this time we have a new user message so we know there is AI
|
||||
// agent activity. This time the "working" update will not be skipped.
|
||||
{
|
||||
event: makeMessageEvent(1, agentapi.RoleUser),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "doing work",
|
||||
URI: "https://dev.coder.com",
|
||||
},
|
||||
},
|
||||
// Watcher reports stable again.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "doing work",
|
||||
URI: "https://dev.coder.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// In this run the AI agent never sends any state changes.
|
||||
{
|
||||
name: "Inactive",
|
||||
tests: []test{
|
||||
// The "working" status from the watcher should be accepted, even though
|
||||
// there is no new user message, because it is the first update.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusRunning),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "",
|
||||
URI: "",
|
||||
},
|
||||
},
|
||||
// Stable update should be accepted.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "",
|
||||
URI: "",
|
||||
},
|
||||
},
|
||||
// Zero ID should be accepted.
|
||||
{
|
||||
event: makeMessageEvent(0, agentapi.RoleUser),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "",
|
||||
URI: "",
|
||||
},
|
||||
},
|
||||
// Stable again.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "",
|
||||
URI: "",
|
||||
},
|
||||
},
|
||||
// Next ID.
|
||||
{
|
||||
event: makeMessageEvent(1, agentapi.RoleUser),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "",
|
||||
URI: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// We ignore the state from the agent and assume "working".
|
||||
{
|
||||
name: "IgnoreAgentState",
|
||||
// AI agent reports that it is finished but the summary says it is doing
|
||||
// work.
|
||||
tests: []test{
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateIdle,
|
||||
summary: "doing work",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "doing work",
|
||||
},
|
||||
},
|
||||
// AI agent reports finished again, with a matching summary. We still
|
||||
// assume it is working.
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateIdle,
|
||||
summary: "finished",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "finished",
|
||||
},
|
||||
},
|
||||
// Once the watcher reports stable, then we record idle.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "finished",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// When AgentAPI is not being used, we accept agent state updates as-is.
|
||||
{
|
||||
name: "KeepAgentState",
|
||||
tests: []test{
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateWorking,
|
||||
summary: "doing work",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "doing work",
|
||||
},
|
||||
},
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateIdle,
|
||||
summary: "finished",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "finished",
|
||||
},
|
||||
},
|
||||
},
|
||||
disableAgentAPI: true,
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(testutil.Context(t, testutil.WaitShort))
|
||||
for _, run := range runs {
|
||||
run := run
|
||||
t.Run(run.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Mock the AI AgentAPI server.
|
||||
listening := make(chan func(sse codersdk.ServerSentEvent) error)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
send, closed, err := httpapi.ServerSentEventSender(w, r)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error setting up server-sent events.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
// Send initial message.
|
||||
send(*makeMessageEvent(0, agentapi.RoleAgent))
|
||||
listening <- send
|
||||
<-closed
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
aiAgentAPIURL := srv.URL
|
||||
ctx, cancel := context.WithCancel(testutil.Context(t, testutil.WaitShort))
|
||||
|
||||
// Watch the workspace for changes.
|
||||
watcher, err := client.WatchWorkspace(ctx, r.Workspace.ID)
|
||||
require.NoError(t, err)
|
||||
var lastAppStatus codersdk.WorkspaceAppStatus
|
||||
nextUpdate := func() codersdk.WorkspaceAppStatus {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
require.FailNow(t, "timed out waiting for status update")
|
||||
case w, ok := <-watcher:
|
||||
require.True(t, ok, "watch channel closed")
|
||||
if w.LatestAppStatus != nil && w.LatestAppStatus.ID != lastAppStatus.ID {
|
||||
lastAppStatus = *w.LatestAppStatus
|
||||
return lastAppStatus
|
||||
// Create a test deployment and workspace.
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
client, user2 := coderdtest.CreateAnotherUser(t, client, user.OrganizationID)
|
||||
|
||||
r := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user2.ID,
|
||||
}).WithAgent(func(a []*proto.Agent) []*proto.Agent {
|
||||
a[0].Apps = []*proto.App{
|
||||
{
|
||||
Slug: "vscode",
|
||||
},
|
||||
}
|
||||
return a
|
||||
}).Do()
|
||||
|
||||
// Watch the workspace for changes.
|
||||
watcher, err := client.WatchWorkspace(ctx, r.Workspace.ID)
|
||||
require.NoError(t, err)
|
||||
var lastAppStatus codersdk.WorkspaceAppStatus
|
||||
nextUpdate := func() codersdk.WorkspaceAppStatus {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
require.FailNow(t, "timed out waiting for status update")
|
||||
case w, ok := <-watcher:
|
||||
require.True(t, ok, "watch channel closed")
|
||||
if w.LatestAppStatus != nil && w.LatestAppStatus.ID != lastAppStatus.ID {
|
||||
t.Logf("Got status update: %s > %s", lastAppStatus.State, w.LatestAppStatus.State)
|
||||
lastAppStatus = *w.LatestAppStatus
|
||||
return lastAppStatus
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inv, _ := clitest.New(t,
|
||||
"exp", "mcp", "server",
|
||||
// We need the agent credentials, AI AgentAPI url, and a slug for reporting.
|
||||
"--agent-url", client.URL.String(),
|
||||
"--agent-token", r.AgentToken,
|
||||
"--app-status-slug", "vscode",
|
||||
"--ai-agentapi-url", aiAgentAPIURL,
|
||||
"--allowed-tools=coder_report_task",
|
||||
)
|
||||
inv = inv.WithContext(ctx)
|
||||
|
||||
pty := ptytest.New(t)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
stderr := ptytest.New(t)
|
||||
inv.Stderr = stderr.Output()
|
||||
|
||||
// Run the MCP server.
|
||||
cmdDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(cmdDone)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Initialize.
|
||||
payload := `{"jsonrpc":"2.0","id":1,"method":"initialize"}`
|
||||
pty.WriteLine(payload)
|
||||
_ = pty.ReadLine(ctx) // ignore echo
|
||||
_ = pty.ReadLine(ctx) // ignore init response
|
||||
|
||||
sender := <-listening
|
||||
|
||||
tests := []struct {
|
||||
// event simulates an event from the screen watcher.
|
||||
event *codersdk.ServerSentEvent
|
||||
// state, summary, and uri simulate a tool call from the AI agent.
|
||||
state codersdk.WorkspaceAppStatusState
|
||||
summary string
|
||||
uri string
|
||||
expected *codersdk.WorkspaceAppStatus
|
||||
}{
|
||||
// First the AI agent updates with a state change.
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateWorking,
|
||||
summary: "doing work",
|
||||
uri: "https://dev.coder.com",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "doing work",
|
||||
URI: "https://dev.coder.com",
|
||||
},
|
||||
},
|
||||
// Terminal goes quiet but the AI agent forgot the update, and it is
|
||||
// caught by the screen watcher. Message and URI are preserved.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "doing work",
|
||||
URI: "https://dev.coder.com",
|
||||
},
|
||||
},
|
||||
// A completed update at this point from the watcher should be discarded.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
},
|
||||
// Terminal becomes active again according to the screen watcher, but no
|
||||
// new user message. This could be the AI agent being active again, but
|
||||
// it could also be the user messing around. We will prefer not updating
|
||||
// the status so the "working" update here should be skipped.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusRunning),
|
||||
},
|
||||
// Agent messages are ignored.
|
||||
{
|
||||
event: makeMessageEvent(1, agentapi.RoleAgent),
|
||||
},
|
||||
// AI agent reports that it failed and URI is blank.
|
||||
{
|
||||
state: codersdk.WorkspaceAppStatusStateFailure,
|
||||
summary: "oops",
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateFailure,
|
||||
Message: "oops",
|
||||
URI: "",
|
||||
},
|
||||
},
|
||||
// The watcher reports the screen is active again...
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusRunning),
|
||||
},
|
||||
// ... but this time we have a new user message so we know there is AI
|
||||
// agent activity. This time the "working" update will not be skipped.
|
||||
{
|
||||
event: makeMessageEvent(2, agentapi.RoleUser),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateWorking,
|
||||
Message: "oops",
|
||||
URI: "",
|
||||
},
|
||||
},
|
||||
// Watcher reports stable again.
|
||||
{
|
||||
event: makeStatusEvent(agentapi.StatusStable),
|
||||
expected: &codersdk.WorkspaceAppStatus{
|
||||
State: codersdk.WorkspaceAppStatusStateIdle,
|
||||
Message: "oops",
|
||||
URI: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if test.event != nil {
|
||||
err := sender(*test.event)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
// Call the tool and ensure it works.
|
||||
payload := fmt.Sprintf(`{"jsonrpc":"2.0","id":3,"method":"tools/call", "params": {"name": "coder_report_task", "arguments": {"state": %q, "summary": %q, "link": %q}}}`, test.state, test.summary, test.uri)
|
||||
pty.WriteLine(payload)
|
||||
_ = pty.ReadLine(ctx) // ignore echo
|
||||
output := pty.ReadLine(ctx)
|
||||
require.NotEmpty(t, output, "did not receive a response from coder_report_task")
|
||||
// Ensure it is valid JSON.
|
||||
_, err = json.Marshal(output)
|
||||
require.NoError(t, err, "did not receive valid JSON from coder_report_task")
|
||||
args := []string{
|
||||
"exp", "mcp", "server",
|
||||
// We need the agent credentials, AI AgentAPI url (if not
|
||||
// disabled), and a slug for reporting.
|
||||
"--agent-url", client.URL.String(),
|
||||
"--agent-token", r.AgentToken,
|
||||
"--app-status-slug", "vscode",
|
||||
"--allowed-tools=coder_report_task",
|
||||
}
|
||||
if test.expected != nil {
|
||||
got := nextUpdate()
|
||||
require.Equal(t, got.State, test.expected.State)
|
||||
require.Equal(t, got.Message, test.expected.Message)
|
||||
require.Equal(t, got.URI, test.expected.URI)
|
||||
|
||||
// Mock the AI AgentAPI server.
|
||||
listening := make(chan func(sse codersdk.ServerSentEvent) error)
|
||||
if !run.disableAgentAPI {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
send, closed, err := httpapi.ServerSentEventSender(w, r)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Internal error setting up server-sent events.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
// Send initial message.
|
||||
send(*makeMessageEvent(0, agentapi.RoleAgent))
|
||||
listening <- send
|
||||
<-closed
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
aiAgentAPIURL := srv.URL
|
||||
args = append(args, "--ai-agentapi-url", aiAgentAPIURL)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
<-cmdDone
|
||||
})
|
||||
|
||||
inv, _ := clitest.New(t, args...)
|
||||
inv = inv.WithContext(ctx)
|
||||
|
||||
pty := ptytest.New(t)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
stderr := ptytest.New(t)
|
||||
inv.Stderr = stderr.Output()
|
||||
|
||||
// Run the MCP server.
|
||||
cmdDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(cmdDone)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Initialize.
|
||||
payload := `{"jsonrpc":"2.0","id":1,"method":"initialize"}`
|
||||
pty.WriteLine(payload)
|
||||
_ = pty.ReadLine(ctx) // ignore echo
|
||||
_ = pty.ReadLine(ctx) // ignore init response
|
||||
|
||||
var sender func(sse codersdk.ServerSentEvent) error
|
||||
if !run.disableAgentAPI {
|
||||
sender = <-listening
|
||||
}
|
||||
|
||||
for _, test := range run.tests {
|
||||
if test.event != nil {
|
||||
err := sender(*test.event)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
// Call the tool and ensure it works.
|
||||
payload := fmt.Sprintf(`{"jsonrpc":"2.0","id":3,"method":"tools/call", "params": {"name": "coder_report_task", "arguments": {"state": %q, "summary": %q, "link": %q}}}`, test.state, test.summary, test.uri)
|
||||
pty.WriteLine(payload)
|
||||
_ = pty.ReadLine(ctx) // ignore echo
|
||||
output := pty.ReadLine(ctx)
|
||||
require.NotEmpty(t, output, "did not receive a response from coder_report_task")
|
||||
// Ensure it is valid JSON.
|
||||
_, err = json.Marshal(output)
|
||||
require.NoError(t, err, "did not receive valid JSON from coder_report_task")
|
||||
}
|
||||
if test.expected != nil {
|
||||
got := nextUpdate()
|
||||
require.Equal(t, got.State, test.expected.State)
|
||||
require.Equal(t, got.Message, test.expected.Message)
|
||||
require.Equal(t, got.URI, test.expected.URI)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
<-cmdDone
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,7 +61,6 @@ import (
|
||||
"github.com/coder/serpent"
|
||||
"github.com/coder/wgtunnel/tunnelsdk"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/ai"
|
||||
"github.com/coder/coder/v2/coderd/entitlements"
|
||||
"github.com/coder/coder/v2/coderd/notifications/reports"
|
||||
"github.com/coder/coder/v2/coderd/runtimeconfig"
|
||||
@@ -611,22 +610,6 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
)
|
||||
}
|
||||
|
||||
aiProviders, err := ReadAIProvidersFromEnv(os.Environ())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("read ai providers from env: %w", err)
|
||||
}
|
||||
vals.AI.Value.Providers = append(vals.AI.Value.Providers, aiProviders...)
|
||||
for _, provider := range aiProviders {
|
||||
logger.Debug(
|
||||
ctx, "loaded ai provider",
|
||||
slog.F("type", provider.Type),
|
||||
)
|
||||
}
|
||||
languageModels, err := ai.ModelsFromConfig(ctx, vals.AI.Value.Providers)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create language models: %w", err)
|
||||
}
|
||||
|
||||
realIPConfig, err := httpmw.ParseRealIPConfig(vals.ProxyTrustedHeaders, vals.ProxyTrustedOrigins)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse real ip config: %w", err)
|
||||
@@ -657,7 +640,6 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
|
||||
CacheDir: cacheDir,
|
||||
GoogleTokenValidator: googleTokenValidator,
|
||||
ExternalAuthConfigs: externalAuthConfigs,
|
||||
LanguageModels: languageModels,
|
||||
RealIPConfig: realIPConfig,
|
||||
SSHKeygenAlgorithm: sshKeygenAlgorithm,
|
||||
TracerProvider: tracerProvider,
|
||||
@@ -2642,77 +2624,6 @@ func redirectHTTPToHTTPSDeprecation(ctx context.Context, logger slog.Logger, inv
|
||||
}
|
||||
}
|
||||
|
||||
func ReadAIProvidersFromEnv(environ []string) ([]codersdk.AIProviderConfig, error) {
|
||||
// The index numbers must be in-order.
|
||||
sort.Strings(environ)
|
||||
|
||||
var providers []codersdk.AIProviderConfig
|
||||
for _, v := range serpent.ParseEnviron(environ, "CODER_AI_PROVIDER_") {
|
||||
tokens := strings.SplitN(v.Name, "_", 2)
|
||||
if len(tokens) != 2 {
|
||||
return nil, xerrors.Errorf("invalid env var: %s", v.Name)
|
||||
}
|
||||
|
||||
providerNum, err := strconv.Atoi(tokens[0])
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse number: %s", v.Name)
|
||||
}
|
||||
|
||||
var provider codersdk.AIProviderConfig
|
||||
switch {
|
||||
case len(providers) < providerNum:
|
||||
return nil, xerrors.Errorf(
|
||||
"provider num %v skipped: %s",
|
||||
len(providers),
|
||||
v.Name,
|
||||
)
|
||||
case len(providers) == providerNum:
|
||||
// At the next next provider.
|
||||
providers = append(providers, provider)
|
||||
case len(providers) == providerNum+1:
|
||||
// At the current provider.
|
||||
provider = providers[providerNum]
|
||||
}
|
||||
|
||||
key := tokens[1]
|
||||
switch key {
|
||||
case "TYPE":
|
||||
provider.Type = v.Value
|
||||
case "API_KEY":
|
||||
provider.APIKey = v.Value
|
||||
case "BASE_URL":
|
||||
provider.BaseURL = v.Value
|
||||
case "MODELS":
|
||||
provider.Models = strings.Split(v.Value, ",")
|
||||
}
|
||||
providers[providerNum] = provider
|
||||
}
|
||||
for _, envVar := range environ {
|
||||
tokens := strings.SplitN(envVar, "=", 2)
|
||||
if len(tokens) != 2 {
|
||||
continue
|
||||
}
|
||||
switch tokens[0] {
|
||||
case "OPENAI_API_KEY":
|
||||
providers = append(providers, codersdk.AIProviderConfig{
|
||||
Type: "openai",
|
||||
APIKey: tokens[1],
|
||||
})
|
||||
case "ANTHROPIC_API_KEY":
|
||||
providers = append(providers, codersdk.AIProviderConfig{
|
||||
Type: "anthropic",
|
||||
APIKey: tokens[1],
|
||||
})
|
||||
case "GOOGLE_API_KEY":
|
||||
providers = append(providers, codersdk.AIProviderConfig{
|
||||
Type: "google",
|
||||
APIKey: tokens[1],
|
||||
})
|
||||
}
|
||||
}
|
||||
return providers, nil
|
||||
}
|
||||
|
||||
// ReadExternalAuthProvidersFromEnv is provided for compatibility purposes with
|
||||
// the viper CLI.
|
||||
func ReadExternalAuthProvidersFromEnv(environ []string) ([]codersdk.ExternalAuthConfig, error) {
|
||||
|
||||
+1
-1
@@ -251,7 +251,7 @@ func summarizeBundle(inv *serpent.Invocation, bun *support.Bundle) {
|
||||
|
||||
clientNetcheckSummary := bun.Network.Netcheck.Summarize("Client netcheck:", docsURL)
|
||||
if len(clientNetcheckSummary) > 0 {
|
||||
cliui.Warn(inv.Stdout, "Networking issues detected:", deployHealthSummary...)
|
||||
cliui.Warn(inv.Stdout, "Networking issues detected:", clientNetcheckSummary...)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+6
@@ -677,6 +677,12 @@ workspaces stopping during the day due to template scheduling.
|
||||
must be *. Only one hour and minute can be specified (ranges or comma
|
||||
separated values are not supported).
|
||||
|
||||
WORKSPACE PREBUILDS OPTIONS:
|
||||
Configure how workspace prebuilds behave.
|
||||
|
||||
--workspace-prebuilds-reconciliation-interval duration, $CODER_WORKSPACE_PREBUILDS_RECONCILIATION_INTERVAL (default: 15s)
|
||||
How often to reconcile workspace prebuilds state.
|
||||
|
||||
⚠️ DANGEROUS OPTIONS:
|
||||
--dangerous-allow-path-app-sharing bool, $CODER_DANGEROUS_ALLOW_PATH_APP_SHARING
|
||||
Allow workspace apps that are not served from subdomains to be shared.
|
||||
|
||||
-3
@@ -526,9 +526,6 @@ client:
|
||||
# Support links to display in the top right drop down menu.
|
||||
# (default: <unset>, type: struct[[]codersdk.LinkConfig])
|
||||
supportLinks: []
|
||||
# Configure AI providers.
|
||||
# (default: <unset>, type: struct[codersdk.AIConfig])
|
||||
ai: {}
|
||||
# External Authentication providers.
|
||||
# (default: <unset>, type: struct[[]codersdk.ExternalAuthConfig])
|
||||
externalAuthProviders: []
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
//go:build darwin
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/vpn"
|
||||
"github.com/coder/serpent"
|
||||
)
|
||||
|
||||
func (r *RootCmd) vpnDaemonRun() *serpent.Command {
|
||||
var (
|
||||
rpcReadFD int64
|
||||
rpcWriteFD int64
|
||||
)
|
||||
|
||||
cmd := &serpent.Command{
|
||||
Use: "run",
|
||||
Short: "Run the VPN daemon on macOS.",
|
||||
Middleware: serpent.Chain(
|
||||
serpent.RequireNArgs(0),
|
||||
),
|
||||
Options: serpent.OptionSet{
|
||||
{
|
||||
Flag: "rpc-read-fd",
|
||||
Env: "CODER_VPN_DAEMON_RPC_READ_FD",
|
||||
Description: "The file descriptor for the pipe to read from the RPC connection.",
|
||||
Value: serpent.Int64Of(&rpcReadFD),
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Flag: "rpc-write-fd",
|
||||
Env: "CODER_VPN_DAEMON_RPC_WRITE_FD",
|
||||
Description: "The file descriptor for the pipe to write to the RPC connection.",
|
||||
Value: serpent.Int64Of(&rpcWriteFD),
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
Handler: func(inv *serpent.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
|
||||
if rpcReadFD < 0 || rpcWriteFD < 0 {
|
||||
return xerrors.Errorf("rpc-read-fd (%v) and rpc-write-fd (%v) must be positive", rpcReadFD, rpcWriteFD)
|
||||
}
|
||||
if rpcReadFD == rpcWriteFD {
|
||||
return xerrors.Errorf("rpc-read-fd (%v) and rpc-write-fd (%v) must be different", rpcReadFD, rpcWriteFD)
|
||||
}
|
||||
|
||||
pipe, err := vpn.NewBidirectionalPipe(uintptr(rpcReadFD), uintptr(rpcWriteFD))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create bidirectional RPC pipe: %w", err)
|
||||
}
|
||||
defer pipe.Close()
|
||||
|
||||
tunnel, err := vpn.NewTunnel(ctx, slog.Make().Leveled(slog.LevelDebug), pipe,
|
||||
vpn.NewClient(),
|
||||
vpn.UseOSNetworkingStack(),
|
||||
vpn.UseAsLogger(),
|
||||
)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create new tunnel for client: %w", err)
|
||||
}
|
||||
defer tunnel.Close()
|
||||
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !windows
|
||||
//go:build !windows && !darwin
|
||||
|
||||
package cli
|
||||
|
||||
|
||||
@@ -63,10 +63,7 @@ func (r *RootCmd) vpnDaemonRun() *serpent.Command {
|
||||
defer pipe.Close()
|
||||
|
||||
logger.Info(ctx, "starting tunnel")
|
||||
tunnel, err := vpn.NewTunnel(ctx, logger, pipe, vpn.NewClient(),
|
||||
vpn.UseOSNetworkingStack(),
|
||||
vpn.UseCustomLogSinks(sinks...),
|
||||
)
|
||||
tunnel, err := vpn.NewTunnel(ctx, logger, pipe, vpn.NewClient(), vpn.UseOSNetworkingStack())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create new tunnel for client: %w", err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,9 @@ package agentapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/base32"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -165,11 +167,20 @@ func (a *SubAgentAPI) CreateSubAgent(ctx context.Context, req *agentproto.Create
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE(DanielleMaywood):
|
||||
// Slugs must be unique PER workspace/template. As of 2025-06-25,
|
||||
// there is no database-layer enforcement of this constraint.
|
||||
// We can get around this by creating a slug that *should* be
|
||||
// unique (at least highly probable).
|
||||
slugHash := sha256.Sum256([]byte(subAgent.Name + "/" + app.Slug))
|
||||
slugHashEnc := base32.HexEncoding.WithPadding(base32.NoPadding).EncodeToString(slugHash[:])
|
||||
computedSlug := strings.ToLower(slugHashEnc[:8]) + "-" + app.Slug
|
||||
|
||||
_, err := a.Database.UpsertWorkspaceApp(ctx, database.UpsertWorkspaceAppParams{
|
||||
ID: uuid.New(), // NOTE: we may need to maintain the app's ID here for stability, but for now we'll leave this as-is.
|
||||
CreatedAt: createdAt,
|
||||
AgentID: subAgent.ID,
|
||||
Slug: app.Slug,
|
||||
Slug: computedSlug,
|
||||
DisplayName: app.GetDisplayName(),
|
||||
Icon: app.GetIcon(),
|
||||
Command: sql.NullString{
|
||||
|
||||
@@ -216,7 +216,7 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
},
|
||||
expectApps: []database.WorkspaceApp{
|
||||
{
|
||||
Slug: "code-server",
|
||||
Slug: "fdqf0lpd-code-server",
|
||||
DisplayName: "VS Code",
|
||||
Icon: "/icon/code.svg",
|
||||
Command: sql.NullString{},
|
||||
@@ -234,7 +234,7 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
DisplayGroup: sql.NullString{},
|
||||
},
|
||||
{
|
||||
Slug: "vim",
|
||||
Slug: "547knu0f-vim",
|
||||
DisplayName: "Vim",
|
||||
Icon: "/icon/vim.svg",
|
||||
Command: sql.NullString{Valid: true, String: "vim"},
|
||||
@@ -377,7 +377,7 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
},
|
||||
expectApps: []database.WorkspaceApp{
|
||||
{
|
||||
Slug: "valid-app",
|
||||
Slug: "511ctirn-valid-app",
|
||||
DisplayName: "Valid App",
|
||||
SharingLevel: database.AppSharingLevelOwner,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
@@ -410,19 +410,19 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
},
|
||||
expectApps: []database.WorkspaceApp{
|
||||
{
|
||||
Slug: "authenticated-app",
|
||||
Slug: "atpt261l-authenticated-app",
|
||||
SharingLevel: database.AppSharingLevelAuthenticated,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
OpenIn: database.WorkspaceAppOpenInSlimWindow,
|
||||
},
|
||||
{
|
||||
Slug: "owner-app",
|
||||
Slug: "eh5gp1he-owner-app",
|
||||
SharingLevel: database.AppSharingLevelOwner,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
OpenIn: database.WorkspaceAppOpenInSlimWindow,
|
||||
},
|
||||
{
|
||||
Slug: "public-app",
|
||||
Slug: "oopjevf1-public-app",
|
||||
SharingLevel: database.AppSharingLevelPublic,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
OpenIn: database.WorkspaceAppOpenInSlimWindow,
|
||||
@@ -443,13 +443,13 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
},
|
||||
expectApps: []database.WorkspaceApp{
|
||||
{
|
||||
Slug: "tab-app",
|
||||
Slug: "ci9500rm-tab-app",
|
||||
SharingLevel: database.AppSharingLevelOwner,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
OpenIn: database.WorkspaceAppOpenInTab,
|
||||
},
|
||||
{
|
||||
Slug: "window-app",
|
||||
Slug: "p17s76re-window-app",
|
||||
SharingLevel: database.AppSharingLevelOwner,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
OpenIn: database.WorkspaceAppOpenInSlimWindow,
|
||||
@@ -479,7 +479,7 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
},
|
||||
expectApps: []database.WorkspaceApp{
|
||||
{
|
||||
Slug: "full-app",
|
||||
Slug: "0ccdbg39-full-app",
|
||||
Command: sql.NullString{Valid: true, String: "echo hello"},
|
||||
DisplayName: "Full Featured App",
|
||||
External: true,
|
||||
@@ -507,7 +507,7 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
},
|
||||
expectApps: []database.WorkspaceApp{
|
||||
{
|
||||
Slug: "no-health-app",
|
||||
Slug: "nphrhbh6-no-health-app",
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
SharingLevel: database.AppSharingLevelOwner,
|
||||
OpenIn: database.WorkspaceAppOpenInSlimWindow,
|
||||
@@ -531,7 +531,7 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
},
|
||||
expectApps: []database.WorkspaceApp{
|
||||
{
|
||||
Slug: "duplicate-app",
|
||||
Slug: "uiklfckv-duplicate-app",
|
||||
DisplayName: "First App",
|
||||
SharingLevel: database.AppSharingLevelOwner,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
@@ -568,14 +568,14 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
},
|
||||
expectApps: []database.WorkspaceApp{
|
||||
{
|
||||
Slug: "duplicate-app",
|
||||
Slug: "uiklfckv-duplicate-app",
|
||||
DisplayName: "First Duplicate",
|
||||
SharingLevel: database.AppSharingLevelOwner,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
OpenIn: database.WorkspaceAppOpenInSlimWindow,
|
||||
},
|
||||
{
|
||||
Slug: "valid-app",
|
||||
Slug: "511ctirn-valid-app",
|
||||
DisplayName: "Valid App",
|
||||
SharingLevel: database.AppSharingLevelOwner,
|
||||
Health: database.WorkspaceAppHealthDisabled,
|
||||
@@ -754,7 +754,7 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
apps, err := db.GetWorkspaceAppsByAgentID(dbauthz.AsSystemRestricted(ctx), agentID) //nolint:gocritic // this is a test.
|
||||
require.NoError(t, err)
|
||||
require.Len(t, apps, 1)
|
||||
require.Equal(t, "duplicate-slug", apps[0].Slug)
|
||||
require.Equal(t, "k5jd7a99-duplicate-slug", apps[0].Slug)
|
||||
require.Equal(t, "First Duplicate", apps[0].DisplayName)
|
||||
})
|
||||
})
|
||||
@@ -1128,7 +1128,7 @@ func TestSubAgentAPI(t *testing.T) {
|
||||
apps, err := api.Database.GetWorkspaceAppsByAgentID(dbauthz.AsSystemRestricted(ctx), agentID) //nolint:gocritic // this is a test.
|
||||
require.NoError(t, err)
|
||||
require.Len(t, apps, 1)
|
||||
require.Equal(t, "custom-app", apps[0].Slug)
|
||||
require.Equal(t, "v4qhkq17-custom-app", apps[0].Slug)
|
||||
require.Equal(t, "Custom App", apps[0].DisplayName)
|
||||
})
|
||||
|
||||
|
||||
-167
@@ -1,167 +0,0 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/anthropics/anthropic-sdk-go"
|
||||
anthropicoption "github.com/anthropics/anthropic-sdk-go/option"
|
||||
"github.com/kylecarbs/aisdk-go"
|
||||
"github.com/openai/openai-go"
|
||||
openaioption "github.com/openai/openai-go/option"
|
||||
"golang.org/x/xerrors"
|
||||
"google.golang.org/genai"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
type LanguageModel struct {
|
||||
codersdk.LanguageModel
|
||||
StreamFunc StreamFunc
|
||||
}
|
||||
|
||||
type StreamOptions struct {
|
||||
SystemPrompt string
|
||||
Model string
|
||||
Messages []aisdk.Message
|
||||
Thinking bool
|
||||
Tools []aisdk.Tool
|
||||
}
|
||||
|
||||
type StreamFunc func(ctx context.Context, options StreamOptions) (aisdk.DataStream, error)
|
||||
|
||||
// LanguageModels is a map of language model ID to language model.
|
||||
type LanguageModels map[string]LanguageModel
|
||||
|
||||
func ModelsFromConfig(ctx context.Context, configs []codersdk.AIProviderConfig) (LanguageModels, error) {
|
||||
models := make(LanguageModels)
|
||||
|
||||
for _, config := range configs {
|
||||
var streamFunc StreamFunc
|
||||
|
||||
switch config.Type {
|
||||
case "openai":
|
||||
opts := []openaioption.RequestOption{
|
||||
openaioption.WithAPIKey(config.APIKey),
|
||||
}
|
||||
if config.BaseURL != "" {
|
||||
opts = append(opts, openaioption.WithBaseURL(config.BaseURL))
|
||||
}
|
||||
client := openai.NewClient(opts...)
|
||||
streamFunc = func(ctx context.Context, options StreamOptions) (aisdk.DataStream, error) {
|
||||
openaiMessages, err := aisdk.MessagesToOpenAI(options.Messages)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tools := aisdk.ToolsToOpenAI(options.Tools)
|
||||
if options.SystemPrompt != "" {
|
||||
openaiMessages = append([]openai.ChatCompletionMessageParamUnion{
|
||||
openai.SystemMessage(options.SystemPrompt),
|
||||
}, openaiMessages...)
|
||||
}
|
||||
|
||||
return aisdk.OpenAIToDataStream(client.Chat.Completions.NewStreaming(ctx, openai.ChatCompletionNewParams{
|
||||
Messages: openaiMessages,
|
||||
Model: options.Model,
|
||||
Tools: tools,
|
||||
MaxTokens: openai.Int(8192),
|
||||
})), nil
|
||||
}
|
||||
if config.Models == nil {
|
||||
models, err := client.Models.List(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Models = make([]string, len(models.Data))
|
||||
for i, model := range models.Data {
|
||||
config.Models[i] = model.ID
|
||||
}
|
||||
}
|
||||
case "anthropic":
|
||||
client := anthropic.NewClient(anthropicoption.WithAPIKey(config.APIKey))
|
||||
streamFunc = func(ctx context.Context, options StreamOptions) (aisdk.DataStream, error) {
|
||||
anthropicMessages, systemMessage, err := aisdk.MessagesToAnthropic(options.Messages)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if options.SystemPrompt != "" {
|
||||
systemMessage = []anthropic.TextBlockParam{
|
||||
*anthropic.NewTextBlock(options.SystemPrompt).OfRequestTextBlock,
|
||||
}
|
||||
}
|
||||
return aisdk.AnthropicToDataStream(client.Messages.NewStreaming(ctx, anthropic.MessageNewParams{
|
||||
Messages: anthropicMessages,
|
||||
Model: options.Model,
|
||||
System: systemMessage,
|
||||
Tools: aisdk.ToolsToAnthropic(options.Tools),
|
||||
MaxTokens: 8192,
|
||||
})), nil
|
||||
}
|
||||
if config.Models == nil {
|
||||
models, err := client.Models.List(ctx, anthropic.ModelListParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Models = make([]string, len(models.Data))
|
||||
for i, model := range models.Data {
|
||||
config.Models[i] = model.ID
|
||||
}
|
||||
}
|
||||
case "google":
|
||||
client, err := genai.NewClient(ctx, &genai.ClientConfig{
|
||||
APIKey: config.APIKey,
|
||||
Backend: genai.BackendGeminiAPI,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
streamFunc = func(ctx context.Context, options StreamOptions) (aisdk.DataStream, error) {
|
||||
googleMessages, err := aisdk.MessagesToGoogle(options.Messages)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tools, err := aisdk.ToolsToGoogle(options.Tools)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var systemInstruction *genai.Content
|
||||
if options.SystemPrompt != "" {
|
||||
systemInstruction = &genai.Content{
|
||||
Parts: []*genai.Part{
|
||||
genai.NewPartFromText(options.SystemPrompt),
|
||||
},
|
||||
Role: "model",
|
||||
}
|
||||
}
|
||||
return aisdk.GoogleToDataStream(client.Models.GenerateContentStream(ctx, options.Model, googleMessages, &genai.GenerateContentConfig{
|
||||
SystemInstruction: systemInstruction,
|
||||
Tools: tools,
|
||||
})), nil
|
||||
}
|
||||
if config.Models == nil {
|
||||
models, err := client.Models.List(ctx, &genai.ListModelsConfig{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Models = make([]string, len(models.Items))
|
||||
for i, model := range models.Items {
|
||||
config.Models[i] = model.Name
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, xerrors.Errorf("unsupported model type: %s", config.Type)
|
||||
}
|
||||
|
||||
for _, model := range config.Models {
|
||||
models[model] = LanguageModel{
|
||||
LanguageModel: codersdk.LanguageModel{
|
||||
ID: model,
|
||||
DisplayName: model,
|
||||
Provider: config.Type,
|
||||
},
|
||||
StreamFunc: streamFunc,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return models, nil
|
||||
}
|
||||
Generated
+10
-597
@@ -343,173 +343,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Chat"
|
||||
],
|
||||
"summary": "List chats",
|
||||
"operationId": "list-chats",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Chat"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Chat"
|
||||
],
|
||||
"summary": "Create a chat",
|
||||
"operationId": "create-a-chat",
|
||||
"responses": {
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.Chat"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/{chat}": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Chat"
|
||||
],
|
||||
"summary": "Get a chat",
|
||||
"operationId": "get-a-chat",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Chat ID",
|
||||
"name": "chat",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.Chat"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/{chat}/messages": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Chat"
|
||||
],
|
||||
"summary": "Get chat messages",
|
||||
"operationId": "get-chat-messages",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Chat ID",
|
||||
"name": "chat",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Message"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Chat"
|
||||
],
|
||||
"summary": "Create a chat message",
|
||||
"operationId": "create-a-chat-message",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Chat ID",
|
||||
"name": "chat",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Request body",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.CreateChatMessageRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/csp/reports": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -826,31 +659,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/deployment/llms": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"General"
|
||||
],
|
||||
"summary": "Get language models",
|
||||
"operationId": "get-language-models",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.LanguageModelConfig"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/deployment/ssh": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -8645,7 +8453,7 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/workspaceagents/{workspaceagent}/containers/devcontainers/container/{container}/recreate": {
|
||||
"/workspaceagents/{workspaceagent}/containers/devcontainers/{devcontainer}/recreate": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
@@ -8671,8 +8479,8 @@ const docTemplate = `{
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Container ID or name",
|
||||
"name": "container",
|
||||
"description": "Devcontainer ID",
|
||||
"name": "devcontainer",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
@@ -10617,190 +10425,6 @@ const docTemplate = `{
|
||||
"ReinitializeReasonPrebuildClaimed"
|
||||
]
|
||||
},
|
||||
"aisdk.Attachment": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"contentType": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.Message": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"type": "array",
|
||||
"items": {}
|
||||
},
|
||||
"content": {
|
||||
"type": "string"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"experimental_attachments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Attachment"
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"parts": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Part"
|
||||
}
|
||||
},
|
||||
"role": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.Part": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.ReasoningDetail"
|
||||
}
|
||||
},
|
||||
"mimeType": {
|
||||
"description": "Type: \"file\"",
|
||||
"type": "string"
|
||||
},
|
||||
"reasoning": {
|
||||
"description": "Type: \"reasoning\"",
|
||||
"type": "string"
|
||||
},
|
||||
"source": {
|
||||
"description": "Type: \"source\"",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/aisdk.SourceInfo"
|
||||
}
|
||||
]
|
||||
},
|
||||
"text": {
|
||||
"description": "Type: \"text\"",
|
||||
"type": "string"
|
||||
},
|
||||
"toolInvocation": {
|
||||
"description": "Type: \"tool-invocation\"",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/aisdk.ToolInvocation"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"$ref": "#/definitions/aisdk.PartType"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.PartType": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"text",
|
||||
"reasoning",
|
||||
"tool-invocation",
|
||||
"source",
|
||||
"file",
|
||||
"step-start"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"PartTypeText",
|
||||
"PartTypeReasoning",
|
||||
"PartTypeToolInvocation",
|
||||
"PartTypeSource",
|
||||
"PartTypeFile",
|
||||
"PartTypeStepStart"
|
||||
]
|
||||
},
|
||||
"aisdk.ReasoningDetail": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data": {
|
||||
"type": "string"
|
||||
},
|
||||
"signature": {
|
||||
"type": "string"
|
||||
},
|
||||
"text": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.SourceInfo": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"contentType": {
|
||||
"type": "string"
|
||||
},
|
||||
"data": {
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"uri": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.ToolInvocation": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"args": {},
|
||||
"result": {},
|
||||
"state": {
|
||||
"$ref": "#/definitions/aisdk.ToolInvocationState"
|
||||
},
|
||||
"step": {
|
||||
"type": "integer"
|
||||
},
|
||||
"toolCallId": {
|
||||
"type": "string"
|
||||
},
|
||||
"toolName": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.ToolInvocationState": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"call",
|
||||
"partial-call",
|
||||
"result"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"ToolInvocationStateCall",
|
||||
"ToolInvocationStatePartialCall",
|
||||
"ToolInvocationStateResult"
|
||||
]
|
||||
},
|
||||
"coderd.SCIMUser": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -10892,37 +10516,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"providers": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.AIProviderConfig"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIProviderConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"base_url": {
|
||||
"description": "BaseURL is the base URL to use for the API provider.",
|
||||
"type": "string"
|
||||
},
|
||||
"models": {
|
||||
"description": "Models is the list of models to use for the API provider.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"description": "Type is the type of the API provider.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.APIKey": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
@@ -11480,12 +11073,14 @@ const docTemplate = `{
|
||||
"enum": [
|
||||
"initiator",
|
||||
"autostart",
|
||||
"autostop"
|
||||
"autostop",
|
||||
"dormancy"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"BuildReasonInitiator",
|
||||
"BuildReasonAutostart",
|
||||
"BuildReasonAutostop"
|
||||
"BuildReasonAutostop",
|
||||
"BuildReasonDormancy"
|
||||
]
|
||||
},
|
||||
"codersdk.ChangePasswordWithOneTimePasscodeRequest": {
|
||||
@@ -11508,62 +11103,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.Chat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ChatMessage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"type": "array",
|
||||
"items": {}
|
||||
},
|
||||
"content": {
|
||||
"type": "string"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"experimental_attachments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Attachment"
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"parts": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Part"
|
||||
}
|
||||
},
|
||||
"role": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ConnectionLatency": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -11597,20 +11136,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.CreateChatMessageRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"$ref": "#/definitions/codersdk.ChatMessage"
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"thinking": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.CreateFirstUserRequest": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
@@ -11898,73 +11423,7 @@ const docTemplate = `{
|
||||
}
|
||||
},
|
||||
"codersdk.CreateTestAuditLogRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"enum": [
|
||||
"create",
|
||||
"write",
|
||||
"delete",
|
||||
"start",
|
||||
"stop"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.AuditAction"
|
||||
}
|
||||
]
|
||||
},
|
||||
"additional_fields": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"build_reason": {
|
||||
"enum": [
|
||||
"autostart",
|
||||
"autostop",
|
||||
"initiator"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.BuildReason"
|
||||
}
|
||||
]
|
||||
},
|
||||
"organization_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"request_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"resource_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"resource_type": {
|
||||
"enum": [
|
||||
"template",
|
||||
"template_version",
|
||||
"user",
|
||||
"workspace",
|
||||
"workspace_build",
|
||||
"git_ssh_key",
|
||||
"auditable_group"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.ResourceType"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
"type": "object"
|
||||
},
|
||||
"codersdk.CreateTokenRequest": {
|
||||
"type": "object",
|
||||
@@ -12410,9 +11869,6 @@ const docTemplate = `{
|
||||
"agent_stat_refresh_interval": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ai": {
|
||||
"$ref": "#/definitions/serpent.Struct-codersdk_AIConfig"
|
||||
},
|
||||
"allow_workspace_renames": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -12740,17 +12196,13 @@ const docTemplate = `{
|
||||
"auto-fill-parameters",
|
||||
"notifications",
|
||||
"workspace-usage",
|
||||
"web-push",
|
||||
"workspace-prebuilds",
|
||||
"agentic-chat"
|
||||
"web-push"
|
||||
],
|
||||
"x-enum-comments": {
|
||||
"ExperimentAgenticChat": "Enables the new agentic AI chat feature.",
|
||||
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"ExperimentExample": "This isn't used for anything.",
|
||||
"ExperimentNotifications": "Sends notifications via SMTP and webhooks following certain events.",
|
||||
"ExperimentWebPush": "Enables web push notifications through the browser.",
|
||||
"ExperimentWorkspacePrebuilds": "Enables the new workspace prebuilds feature.",
|
||||
"ExperimentWorkspaceUsage": "Enables the new workspace usage tracking."
|
||||
},
|
||||
"x-enum-varnames": [
|
||||
@@ -12758,9 +12210,7 @@ const docTemplate = `{
|
||||
"ExperimentAutoFillParameters",
|
||||
"ExperimentNotifications",
|
||||
"ExperimentWorkspaceUsage",
|
||||
"ExperimentWebPush",
|
||||
"ExperimentWorkspacePrebuilds",
|
||||
"ExperimentAgenticChat"
|
||||
"ExperimentWebPush"
|
||||
]
|
||||
},
|
||||
"codersdk.ExternalAuth": {
|
||||
@@ -13288,33 +12738,6 @@ const docTemplate = `{
|
||||
"RequiredTemplateVariables"
|
||||
]
|
||||
},
|
||||
"codersdk.LanguageModel": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"display_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"description": "ID is used by the provider to identify the LLM.",
|
||||
"type": "string"
|
||||
},
|
||||
"provider": {
|
||||
"description": "Provider is the provider of the LLM. e.g. openai, anthropic, etc.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.LanguageModelConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"models": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.LanguageModel"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.License": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -15233,7 +14656,6 @@ const docTemplate = `{
|
||||
"assign_org_role",
|
||||
"assign_role",
|
||||
"audit_log",
|
||||
"chat",
|
||||
"crypto_key",
|
||||
"debug_info",
|
||||
"deployment_config",
|
||||
@@ -15273,7 +14695,6 @@ const docTemplate = `{
|
||||
"ResourceAssignOrgRole",
|
||||
"ResourceAssignRole",
|
||||
"ResourceAuditLog",
|
||||
"ResourceChat",
|
||||
"ResourceCryptoKey",
|
||||
"ResourceDebugInfo",
|
||||
"ResourceDeploymentConfig",
|
||||
@@ -19342,14 +18763,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"serpent.Struct-codersdk_AIConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"value": {
|
||||
"$ref": "#/definitions/codersdk.AIConfig"
|
||||
}
|
||||
}
|
||||
},
|
||||
"serpent.URL": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
Generated
+9
-557
@@ -291,151 +291,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Chat"],
|
||||
"summary": "List chats",
|
||||
"operationId": "list-chats",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.Chat"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Chat"],
|
||||
"summary": "Create a chat",
|
||||
"operationId": "create-a-chat",
|
||||
"responses": {
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.Chat"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/{chat}": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Chat"],
|
||||
"summary": "Get a chat",
|
||||
"operationId": "get-a-chat",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Chat ID",
|
||||
"name": "chat",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.Chat"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/chats/{chat}/messages": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Chat"],
|
||||
"summary": "Get chat messages",
|
||||
"operationId": "get-chat-messages",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Chat ID",
|
||||
"name": "chat",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Message"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"consumes": ["application/json"],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Chat"],
|
||||
"summary": "Create a chat message",
|
||||
"operationId": "create-a-chat-message",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Chat ID",
|
||||
"name": "chat",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Request body",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.CreateChatMessageRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/csp/reports": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -708,27 +563,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/deployment/llms": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["General"],
|
||||
"summary": "Get language models",
|
||||
"operationId": "get-language-models",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/codersdk.LanguageModelConfig"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/deployment/ssh": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -7638,7 +7472,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/workspaceagents/{workspaceagent}/containers/devcontainers/container/{container}/recreate": {
|
||||
"/workspaceagents/{workspaceagent}/containers/devcontainers/{devcontainer}/recreate": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
@@ -7660,8 +7494,8 @@
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Container ID or name",
|
||||
"name": "container",
|
||||
"description": "Devcontainer ID",
|
||||
"name": "devcontainer",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
@@ -9410,186 +9244,6 @@
|
||||
"enum": ["prebuild_claimed"],
|
||||
"x-enum-varnames": ["ReinitializeReasonPrebuildClaimed"]
|
||||
},
|
||||
"aisdk.Attachment": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"contentType": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.Message": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"type": "array",
|
||||
"items": {}
|
||||
},
|
||||
"content": {
|
||||
"type": "string"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"experimental_attachments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Attachment"
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"parts": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Part"
|
||||
}
|
||||
},
|
||||
"role": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.Part": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.ReasoningDetail"
|
||||
}
|
||||
},
|
||||
"mimeType": {
|
||||
"description": "Type: \"file\"",
|
||||
"type": "string"
|
||||
},
|
||||
"reasoning": {
|
||||
"description": "Type: \"reasoning\"",
|
||||
"type": "string"
|
||||
},
|
||||
"source": {
|
||||
"description": "Type: \"source\"",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/aisdk.SourceInfo"
|
||||
}
|
||||
]
|
||||
},
|
||||
"text": {
|
||||
"description": "Type: \"text\"",
|
||||
"type": "string"
|
||||
},
|
||||
"toolInvocation": {
|
||||
"description": "Type: \"tool-invocation\"",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/aisdk.ToolInvocation"
|
||||
}
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
"$ref": "#/definitions/aisdk.PartType"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.PartType": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"text",
|
||||
"reasoning",
|
||||
"tool-invocation",
|
||||
"source",
|
||||
"file",
|
||||
"step-start"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"PartTypeText",
|
||||
"PartTypeReasoning",
|
||||
"PartTypeToolInvocation",
|
||||
"PartTypeSource",
|
||||
"PartTypeFile",
|
||||
"PartTypeStepStart"
|
||||
]
|
||||
},
|
||||
"aisdk.ReasoningDetail": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data": {
|
||||
"type": "string"
|
||||
},
|
||||
"signature": {
|
||||
"type": "string"
|
||||
},
|
||||
"text": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.SourceInfo": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"contentType": {
|
||||
"type": "string"
|
||||
},
|
||||
"data": {
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"uri": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.ToolInvocation": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"args": {},
|
||||
"result": {},
|
||||
"state": {
|
||||
"$ref": "#/definitions/aisdk.ToolInvocationState"
|
||||
},
|
||||
"step": {
|
||||
"type": "integer"
|
||||
},
|
||||
"toolCallId": {
|
||||
"type": "string"
|
||||
},
|
||||
"toolName": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aisdk.ToolInvocationState": {
|
||||
"type": "string",
|
||||
"enum": ["call", "partial-call", "result"],
|
||||
"x-enum-varnames": [
|
||||
"ToolInvocationStateCall",
|
||||
"ToolInvocationStatePartialCall",
|
||||
"ToolInvocationStateResult"
|
||||
]
|
||||
},
|
||||
"coderd.SCIMUser": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -9681,37 +9335,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"providers": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.AIProviderConfig"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.AIProviderConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"base_url": {
|
||||
"description": "BaseURL is the base URL to use for the API provider.",
|
||||
"type": "string"
|
||||
},
|
||||
"models": {
|
||||
"description": "Models is the list of models to use for the API provider.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"description": "Type is the type of the API provider.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.APIKey": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
@@ -10235,11 +9858,12 @@
|
||||
},
|
||||
"codersdk.BuildReason": {
|
||||
"type": "string",
|
||||
"enum": ["initiator", "autostart", "autostop"],
|
||||
"enum": ["initiator", "autostart", "autostop", "dormancy"],
|
||||
"x-enum-varnames": [
|
||||
"BuildReasonInitiator",
|
||||
"BuildReasonAutostart",
|
||||
"BuildReasonAutostop"
|
||||
"BuildReasonAutostop",
|
||||
"BuildReasonDormancy"
|
||||
]
|
||||
},
|
||||
"codersdk.ChangePasswordWithOneTimePasscodeRequest": {
|
||||
@@ -10258,62 +9882,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.Chat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ChatMessage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"type": "array",
|
||||
"items": {}
|
||||
},
|
||||
"content": {
|
||||
"type": "string"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"experimental_attachments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Attachment"
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"parts": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/aisdk.Part"
|
||||
}
|
||||
},
|
||||
"role": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.ConnectionLatency": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -10344,20 +9912,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.CreateChatMessageRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"$ref": "#/definitions/codersdk.ChatMessage"
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"thinking": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.CreateFirstUserRequest": {
|
||||
"type": "object",
|
||||
"required": ["email", "password", "username"],
|
||||
@@ -10626,63 +10180,7 @@
|
||||
}
|
||||
},
|
||||
"codersdk.CreateTestAuditLogRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"enum": ["create", "write", "delete", "start", "stop"],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.AuditAction"
|
||||
}
|
||||
]
|
||||
},
|
||||
"additional_fields": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"build_reason": {
|
||||
"enum": ["autostart", "autostop", "initiator"],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.BuildReason"
|
||||
}
|
||||
]
|
||||
},
|
||||
"organization_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"request_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"resource_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"resource_type": {
|
||||
"enum": [
|
||||
"template",
|
||||
"template_version",
|
||||
"user",
|
||||
"workspace",
|
||||
"workspace_build",
|
||||
"git_ssh_key",
|
||||
"auditable_group"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/codersdk.ResourceType"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
"type": "object"
|
||||
},
|
||||
"codersdk.CreateTokenRequest": {
|
||||
"type": "object",
|
||||
@@ -11110,9 +10608,6 @@
|
||||
"agent_stat_refresh_interval": {
|
||||
"type": "integer"
|
||||
},
|
||||
"ai": {
|
||||
"$ref": "#/definitions/serpent.Struct-codersdk_AIConfig"
|
||||
},
|
||||
"allow_workspace_renames": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -11433,17 +10928,13 @@
|
||||
"auto-fill-parameters",
|
||||
"notifications",
|
||||
"workspace-usage",
|
||||
"web-push",
|
||||
"workspace-prebuilds",
|
||||
"agentic-chat"
|
||||
"web-push"
|
||||
],
|
||||
"x-enum-comments": {
|
||||
"ExperimentAgenticChat": "Enables the new agentic AI chat feature.",
|
||||
"ExperimentAutoFillParameters": "This should not be taken out of experiments until we have redesigned the feature.",
|
||||
"ExperimentExample": "This isn't used for anything.",
|
||||
"ExperimentNotifications": "Sends notifications via SMTP and webhooks following certain events.",
|
||||
"ExperimentWebPush": "Enables web push notifications through the browser.",
|
||||
"ExperimentWorkspacePrebuilds": "Enables the new workspace prebuilds feature.",
|
||||
"ExperimentWorkspaceUsage": "Enables the new workspace usage tracking."
|
||||
},
|
||||
"x-enum-varnames": [
|
||||
@@ -11451,9 +10942,7 @@
|
||||
"ExperimentAutoFillParameters",
|
||||
"ExperimentNotifications",
|
||||
"ExperimentWorkspaceUsage",
|
||||
"ExperimentWebPush",
|
||||
"ExperimentWorkspacePrebuilds",
|
||||
"ExperimentAgenticChat"
|
||||
"ExperimentWebPush"
|
||||
]
|
||||
},
|
||||
"codersdk.ExternalAuth": {
|
||||
@@ -11965,33 +11454,6 @@
|
||||
"enum": ["REQUIRED_TEMPLATE_VARIABLES"],
|
||||
"x-enum-varnames": ["RequiredTemplateVariables"]
|
||||
},
|
||||
"codersdk.LanguageModel": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"display_name": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"description": "ID is used by the provider to identify the LLM.",
|
||||
"type": "string"
|
||||
},
|
||||
"provider": {
|
||||
"description": "Provider is the provider of the LLM. e.g. openai, anthropic, etc.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.LanguageModelConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"models": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/codersdk.LanguageModel"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codersdk.License": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -13825,7 +13287,6 @@
|
||||
"assign_org_role",
|
||||
"assign_role",
|
||||
"audit_log",
|
||||
"chat",
|
||||
"crypto_key",
|
||||
"debug_info",
|
||||
"deployment_config",
|
||||
@@ -13865,7 +13326,6 @@
|
||||
"ResourceAssignOrgRole",
|
||||
"ResourceAssignRole",
|
||||
"ResourceAuditLog",
|
||||
"ResourceChat",
|
||||
"ResourceCryptoKey",
|
||||
"ResourceDebugInfo",
|
||||
"ResourceDeploymentConfig",
|
||||
@@ -17720,14 +17180,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"serpent.Struct-codersdk_AIConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"value": {
|
||||
"$ref": "#/definitions/codersdk.AIConfig"
|
||||
}
|
||||
}
|
||||
},
|
||||
"serpent.URL": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
+33
-3
@@ -12,6 +12,8 @@ import (
|
||||
"github.com/moby/moby/pkg/namesgenerator"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/apikey"
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
@@ -56,6 +58,14 @@ func (api *API) postToken(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(Cian): System users technically just have the 'member' role
|
||||
// and we don't want to disallow all members from creating API keys.
|
||||
if user.IsSystem {
|
||||
api.Logger.Warn(ctx, "disallowed creating api key for system user", slog.F("user_id", user.ID))
|
||||
httpapi.Forbidden(rw)
|
||||
return
|
||||
}
|
||||
|
||||
scope := database.APIKeyScopeAll
|
||||
if scope != "" {
|
||||
scope = database.APIKeyScope(createToken.Scope)
|
||||
@@ -121,10 +131,29 @@ func (api *API) postToken(rw http.ResponseWriter, r *http.Request) {
|
||||
// @Success 201 {object} codersdk.GenerateAPIKeyResponse
|
||||
// @Router /users/{user}/keys [post]
|
||||
func (api *API) postAPIKey(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
user := httpmw.UserParam(r)
|
||||
var (
|
||||
ctx = r.Context()
|
||||
user = httpmw.UserParam(r)
|
||||
auditor = api.Auditor.Load()
|
||||
aReq, commitAudit = audit.InitRequest[database.APIKey](rw, &audit.RequestParams{
|
||||
Audit: *auditor,
|
||||
Log: api.Logger,
|
||||
Request: r,
|
||||
Action: database.AuditActionCreate,
|
||||
})
|
||||
)
|
||||
aReq.Old = database.APIKey{}
|
||||
defer commitAudit()
|
||||
|
||||
cookie, _, err := api.createAPIKey(ctx, apikey.CreateParams{
|
||||
// TODO(Cian): System users technically just have the 'member' role
|
||||
// and we don't want to disallow all members from creating API keys.
|
||||
if user.IsSystem {
|
||||
api.Logger.Warn(ctx, "disallowed creating api key for system user", slog.F("user_id", user.ID))
|
||||
httpapi.Forbidden(rw)
|
||||
return
|
||||
}
|
||||
|
||||
cookie, key, err := api.createAPIKey(ctx, apikey.CreateParams{
|
||||
UserID: user.ID,
|
||||
DefaultLifetime: api.DeploymentValues.Sessions.DefaultTokenDuration.Value(),
|
||||
LoginType: database.LoginTypePassword,
|
||||
@@ -138,6 +167,7 @@ func (api *API) postAPIKey(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
aReq.New = *key
|
||||
// We intentionally do not set the cookie on the response here.
|
||||
// Setting the cookie will couple the browser session to the API
|
||||
// key we return here, meaning logging out of the website would
|
||||
|
||||
+54
-2
@@ -2,6 +2,7 @@ package coderd_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -13,8 +14,10 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/serpent"
|
||||
@@ -301,14 +304,32 @@ func TestSessionExpiry(t *testing.T) {
|
||||
|
||||
func TestAPIKey_OK(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: a deployment with auditing enabled
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
auditor := audit.NewMock()
|
||||
client := coderdtest.New(t, &coderdtest.Options{Auditor: auditor})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
auditor.ResetLogs()
|
||||
|
||||
// When: an API key is created
|
||||
res, err := client.CreateAPIKey(ctx, codersdk.Me)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(res.Key), 2)
|
||||
|
||||
// Then: an audit log is generated
|
||||
als := auditor.AuditLogs()
|
||||
require.Len(t, als, 1)
|
||||
al := als[0]
|
||||
assert.Equal(t, owner.UserID, al.UserID)
|
||||
assert.Equal(t, database.AuditActionCreate, al.Action)
|
||||
assert.Equal(t, database.ResourceTypeApiKey, al.ResourceType)
|
||||
|
||||
// Then: the diff MUST NOT contain the generated key.
|
||||
raw, err := json.Marshal(al)
|
||||
require.NoError(t, err)
|
||||
require.NotContains(t, res.Key, string(raw))
|
||||
}
|
||||
|
||||
func TestAPIKey_Deleted(t *testing.T) {
|
||||
@@ -351,3 +372,34 @@ func TestAPIKey_SetDefault(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, dc.Sessions.DefaultTokenDuration.Value().Seconds(), apiKey1.LifetimeSeconds)
|
||||
}
|
||||
|
||||
func TestAPIKey_PrebuildsNotAllowed(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, pubsub := dbtestutil.NewDB(t)
|
||||
dc := coderdtest.DeploymentValues(t)
|
||||
dc.Sessions.DefaultTokenDuration = serpent.Duration(time.Hour * 12)
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
Database: db,
|
||||
Pubsub: pubsub,
|
||||
DeploymentValues: dc,
|
||||
})
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// Given: an existing api token for the prebuilds user
|
||||
_, prebuildsToken := dbgen.APIKey(t, db, database.APIKey{
|
||||
UserID: database.PrebuildsSystemUserID,
|
||||
})
|
||||
client.SetSessionToken(prebuildsToken)
|
||||
|
||||
// When: the prebuilds user tries to create an API key
|
||||
_, err := client.CreateAPIKey(ctx, database.PrebuildsSystemUserID.String())
|
||||
// Then: denied.
|
||||
require.ErrorContains(t, err, httpapi.ResourceForbiddenResponse.Message)
|
||||
|
||||
// When: the prebuilds user tries to create a token
|
||||
_, err = client.CreateToken(ctx, database.PrebuildsSystemUserID.String(), codersdk.CreateTokenRequest{})
|
||||
// Then: also denied.
|
||||
require.ErrorContains(t, err, httpapi.ResourceForbiddenResponse.Message)
|
||||
}
|
||||
|
||||
+23
-11
@@ -46,7 +46,7 @@ func (api *API) auditLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
queryStr := r.URL.Query().Get("q")
|
||||
filter, errs := searchquery.AuditLogs(ctx, api.Database, queryStr)
|
||||
filter, countFilter, errs := searchquery.AuditLogs(ctx, api.Database, queryStr)
|
||||
if len(errs) > 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid audit search query.",
|
||||
@@ -62,6 +62,27 @@ func (api *API) auditLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
if filter.Username == "me" {
|
||||
filter.UserID = apiKey.UserID
|
||||
filter.Username = ""
|
||||
countFilter.UserID = apiKey.UserID
|
||||
countFilter.Username = ""
|
||||
}
|
||||
|
||||
// Use the same filters to count the number of audit logs
|
||||
count, err := api.Database.CountAuditLogs(ctx, countFilter)
|
||||
if dbauthz.IsNotAuthorizedError(err) {
|
||||
httpapi.Forbidden(rw)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
httpapi.InternalServerError(rw, err)
|
||||
return
|
||||
}
|
||||
// If count is 0, then we don't need to query audit logs
|
||||
if count == 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusOK, codersdk.AuditLogResponse{
|
||||
AuditLogs: []codersdk.AuditLog{},
|
||||
Count: 0,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
dblogs, err := api.Database.GetAuditLogsOffset(ctx, filter)
|
||||
@@ -73,19 +94,10 @@ func (api *API) auditLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
httpapi.InternalServerError(rw, err)
|
||||
return
|
||||
}
|
||||
// GetAuditLogsOffset does not return ErrNoRows because it uses a window function to get the count.
|
||||
// So we need to check if the dblogs is empty and return an empty array if so.
|
||||
if len(dblogs) == 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusOK, codersdk.AuditLogResponse{
|
||||
AuditLogs: []codersdk.AuditLog{},
|
||||
Count: 0,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, codersdk.AuditLogResponse{
|
||||
AuditLogs: api.convertAuditLogs(ctx, dblogs),
|
||||
Count: dblogs[0].Count,
|
||||
Count: count,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -520,6 +520,8 @@ func isEligibleForAutostart(user database.User, ws database.Workspace, build dat
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the next allowed autostart time after the build's creation time,
|
||||
// based on the workspace's schedule and the template's allowed days.
|
||||
nextTransition, err := schedule.NextAllowedAutostart(build.CreatedAt, ws.AutostartSchedule.String, templateSchedule)
|
||||
if err != nil {
|
||||
return false
|
||||
|
||||
@@ -2,9 +2,16 @@ package autobuild_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/quartz"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -1183,6 +1190,348 @@ func TestNotifications(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestExecutorPrebuilds verifies AGPL behavior for prebuilt workspaces.
|
||||
// It ensures that workspace schedules do not trigger while the workspace
|
||||
// is still in a prebuilt state. Scheduling behavior only applies after the
|
||||
// workspace has been claimed and becomes a regular user workspace.
|
||||
// For enterprise-related functionality, see enterprise/coderd/workspaces_test.go.
|
||||
func TestExecutorPrebuilds(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("this test requires postgres")
|
||||
}
|
||||
|
||||
// Prebuild workspaces should not be autostopped when the deadline is reached.
|
||||
// After being claimed, the workspace should stop at the deadline.
|
||||
t.Run("OnlyStopsAfterClaimed", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Setup
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
clock := quartz.NewMock(t)
|
||||
db, pb := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
var (
|
||||
tickCh = make(chan time.Time)
|
||||
statsCh = make(chan autobuild.Stats)
|
||||
client = coderdtest.New(t, &coderdtest.Options{
|
||||
Database: db,
|
||||
Pubsub: pb,
|
||||
AutobuildTicker: tickCh,
|
||||
IncludeProvisionerDaemon: true,
|
||||
AutobuildStats: statsCh,
|
||||
})
|
||||
)
|
||||
|
||||
// Setup user, template and template version
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
_, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleMember())
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
// Database setup of a preset with a prebuild instance
|
||||
preset := setupTestDBPreset(t, db, version.ID, int32(1))
|
||||
|
||||
// Given: a running prebuilt workspace with a deadline and ready to be claimed
|
||||
dbPrebuild := setupTestDBPrebuiltWorkspace(
|
||||
ctx, t, clock, db, pb,
|
||||
owner.OrganizationID,
|
||||
template.ID,
|
||||
version.ID,
|
||||
preset.ID,
|
||||
)
|
||||
prebuild := coderdtest.MustWorkspace(t, client, dbPrebuild.ID)
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition)
|
||||
require.NotZero(t, prebuild.LatestBuild.Deadline)
|
||||
|
||||
// When: the autobuild executor ticks *after* the deadline:
|
||||
go func() {
|
||||
tickCh <- prebuild.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
}()
|
||||
|
||||
// Then: the prebuilt workspace should remain in a start transition
|
||||
prebuildStats := <-statsCh
|
||||
require.Len(t, prebuildStats.Errors, 0)
|
||||
require.Len(t, prebuildStats.Transitions, 0)
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStart, prebuild.LatestBuild.Transition)
|
||||
prebuild = coderdtest.MustWorkspace(t, client, prebuild.ID)
|
||||
require.Equal(t, codersdk.BuildReasonInitiator, prebuild.LatestBuild.Reason)
|
||||
|
||||
// Given: a user claims the prebuilt workspace
|
||||
dbWorkspace := dbgen.ClaimPrebuild(t, db, user.ID, "claimedWorkspace-autostop", preset.ID)
|
||||
workspace := coderdtest.MustWorkspace(t, client, dbWorkspace.ID)
|
||||
|
||||
// When: the autobuild executor ticks *after* the deadline:
|
||||
go func() {
|
||||
tickCh <- workspace.LatestBuild.Deadline.Time.Add(time.Minute)
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
// Then: the workspace should be stopped
|
||||
workspaceStats := <-statsCh
|
||||
require.Len(t, workspaceStats.Errors, 0)
|
||||
require.Len(t, workspaceStats.Transitions, 1)
|
||||
require.Contains(t, workspaceStats.Transitions, workspace.ID)
|
||||
require.Equal(t, database.WorkspaceTransitionStop, workspaceStats.Transitions[workspace.ID])
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
require.Equal(t, codersdk.BuildReasonAutostop, workspace.LatestBuild.Reason)
|
||||
})
|
||||
|
||||
// Prebuild workspaces should not be autostarted when the autostart scheduled is reached.
|
||||
// After being claimed, the workspace should autostart at the schedule.
|
||||
t.Run("OnlyStartsAfterClaimed", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Setup
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
clock := quartz.NewMock(t)
|
||||
db, pb := dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
var (
|
||||
tickCh = make(chan time.Time)
|
||||
statsCh = make(chan autobuild.Stats)
|
||||
client = coderdtest.New(t, &coderdtest.Options{
|
||||
Database: db,
|
||||
Pubsub: pb,
|
||||
AutobuildTicker: tickCh,
|
||||
IncludeProvisionerDaemon: true,
|
||||
AutobuildStats: statsCh,
|
||||
})
|
||||
)
|
||||
|
||||
// Setup user, template and template version
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
_, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID, rbac.RoleMember())
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
// Database setup of a preset with a prebuild instance
|
||||
preset := setupTestDBPreset(t, db, version.ID, int32(1))
|
||||
|
||||
// Given: prebuilt workspace is stopped and set to autostart daily at midnight
|
||||
sched := mustSchedule(t, "CRON_TZ=UTC 0 0 * * *")
|
||||
autostartSched := sql.NullString{
|
||||
String: sched.String(),
|
||||
Valid: true,
|
||||
}
|
||||
dbPrebuild := setupTestDBPrebuiltWorkspace(
|
||||
ctx, t, clock, db, pb,
|
||||
owner.OrganizationID,
|
||||
template.ID,
|
||||
version.ID,
|
||||
preset.ID,
|
||||
WithAutostartSchedule(autostartSched),
|
||||
WithIsStopped(true),
|
||||
)
|
||||
prebuild := coderdtest.MustWorkspace(t, client, dbPrebuild.ID)
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStop, prebuild.LatestBuild.Transition)
|
||||
require.NotNil(t, prebuild.AutostartSchedule)
|
||||
|
||||
// Tick at the next scheduled time after the prebuild’s LatestBuild.CreatedAt,
|
||||
// since the next allowed autostart is calculated starting from that point.
|
||||
// When: the autobuild executor ticks after the scheduled time
|
||||
go func() {
|
||||
tickCh <- sched.Next(prebuild.LatestBuild.CreatedAt).Add(time.Minute)
|
||||
}()
|
||||
|
||||
// Then: the prebuilt workspace should remain in a stop transition
|
||||
prebuildStats := <-statsCh
|
||||
require.Len(t, prebuildStats.Errors, 0)
|
||||
require.Len(t, prebuildStats.Transitions, 0)
|
||||
require.Equal(t, codersdk.WorkspaceTransitionStop, prebuild.LatestBuild.Transition)
|
||||
prebuild = coderdtest.MustWorkspace(t, client, prebuild.ID)
|
||||
require.Equal(t, codersdk.BuildReasonInitiator, prebuild.LatestBuild.Reason)
|
||||
|
||||
// Given: prebuilt workspace is in a start status
|
||||
setupTestDBWorkspaceBuild(
|
||||
ctx, t, clock, db, pb,
|
||||
owner.OrganizationID,
|
||||
prebuild.ID,
|
||||
version.ID,
|
||||
preset.ID,
|
||||
database.WorkspaceTransitionStart)
|
||||
|
||||
// Given: a user claims the prebuilt workspace
|
||||
dbWorkspace := dbgen.ClaimPrebuild(t, db, user.ID, "claimedWorkspace-autostart", preset.ID)
|
||||
workspace := coderdtest.MustWorkspace(t, client, dbWorkspace.ID)
|
||||
|
||||
// Given: the prebuilt workspace goes to a stop status
|
||||
setupTestDBWorkspaceBuild(
|
||||
ctx, t, clock, db, pb,
|
||||
owner.OrganizationID,
|
||||
prebuild.ID,
|
||||
version.ID,
|
||||
preset.ID,
|
||||
database.WorkspaceTransitionStop)
|
||||
|
||||
// Tick at the next scheduled time after the prebuild’s LatestBuild.CreatedAt,
|
||||
// since the next allowed autostart is calculated starting from that point.
|
||||
// When: the autobuild executor ticks after the scheduled time
|
||||
go func() {
|
||||
tickCh <- sched.Next(workspace.LatestBuild.CreatedAt).Add(time.Minute)
|
||||
close(tickCh)
|
||||
}()
|
||||
|
||||
// Then: the workspace should eventually be started
|
||||
workspaceStats := <-statsCh
|
||||
require.Len(t, workspaceStats.Errors, 0)
|
||||
require.Len(t, workspaceStats.Transitions, 1)
|
||||
require.Contains(t, workspaceStats.Transitions, workspace.ID)
|
||||
require.Equal(t, database.WorkspaceTransitionStart, workspaceStats.Transitions[workspace.ID])
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
require.Equal(t, codersdk.BuildReasonAutostart, workspace.LatestBuild.Reason)
|
||||
})
|
||||
}
|
||||
|
||||
func setupTestDBPreset(
|
||||
t *testing.T,
|
||||
db database.Store,
|
||||
templateVersionID uuid.UUID,
|
||||
desiredInstances int32,
|
||||
) database.TemplateVersionPreset {
|
||||
t.Helper()
|
||||
|
||||
preset := dbgen.Preset(t, db, database.InsertPresetParams{
|
||||
TemplateVersionID: templateVersionID,
|
||||
Name: "preset-test",
|
||||
DesiredInstances: sql.NullInt32{
|
||||
Valid: true,
|
||||
Int32: desiredInstances,
|
||||
},
|
||||
})
|
||||
dbgen.PresetParameter(t, db, database.InsertPresetParametersParams{
|
||||
TemplateVersionPresetID: preset.ID,
|
||||
Names: []string{"test-name"},
|
||||
Values: []string{"test-value"},
|
||||
})
|
||||
|
||||
return preset
|
||||
}
|
||||
|
||||
type SetupPrebuiltOptions struct {
|
||||
AutostartSchedule sql.NullString
|
||||
IsStopped bool
|
||||
}
|
||||
|
||||
func WithAutostartSchedule(sched sql.NullString) func(*SetupPrebuiltOptions) {
|
||||
return func(o *SetupPrebuiltOptions) {
|
||||
o.AutostartSchedule = sched
|
||||
}
|
||||
}
|
||||
|
||||
func WithIsStopped(isStopped bool) func(*SetupPrebuiltOptions) {
|
||||
return func(o *SetupPrebuiltOptions) {
|
||||
o.IsStopped = isStopped
|
||||
}
|
||||
}
|
||||
|
||||
func setupTestDBWorkspaceBuild(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
clock quartz.Clock,
|
||||
db database.Store,
|
||||
ps pubsub.Pubsub,
|
||||
orgID uuid.UUID,
|
||||
workspaceID uuid.UUID,
|
||||
templateVersionID uuid.UUID,
|
||||
presetID uuid.UUID,
|
||||
transition database.WorkspaceTransition,
|
||||
) (database.ProvisionerJob, database.WorkspaceBuild) {
|
||||
t.Helper()
|
||||
|
||||
var buildNumber int32 = 1
|
||||
latestWorkspaceBuild, err := db.GetLatestWorkspaceBuildByWorkspaceID(ctx, workspaceID)
|
||||
if !errors.Is(err, sql.ErrNoRows) {
|
||||
buildNumber = latestWorkspaceBuild.BuildNumber + 1
|
||||
}
|
||||
|
||||
job := dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
|
||||
InitiatorID: database.PrebuildsSystemUserID,
|
||||
CreatedAt: clock.Now().Add(-time.Hour * 2),
|
||||
StartedAt: sql.NullTime{Time: clock.Now().Add(-time.Hour * 2), Valid: true},
|
||||
CompletedAt: sql.NullTime{Time: clock.Now().Add(-time.Hour), Valid: true},
|
||||
OrganizationID: orgID,
|
||||
JobStatus: database.ProvisionerJobStatusSucceeded,
|
||||
})
|
||||
workspaceBuild := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
WorkspaceID: workspaceID,
|
||||
InitiatorID: database.PrebuildsSystemUserID,
|
||||
TemplateVersionID: templateVersionID,
|
||||
BuildNumber: buildNumber,
|
||||
JobID: job.ID,
|
||||
TemplateVersionPresetID: uuid.NullUUID{UUID: presetID, Valid: true},
|
||||
Transition: transition,
|
||||
CreatedAt: clock.Now(),
|
||||
})
|
||||
dbgen.WorkspaceBuildParameters(t, db, []database.WorkspaceBuildParameter{
|
||||
{
|
||||
WorkspaceBuildID: workspaceBuild.ID,
|
||||
Name: "test",
|
||||
Value: "test",
|
||||
},
|
||||
})
|
||||
|
||||
workspaceResource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
|
||||
JobID: job.ID,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
Type: "compute",
|
||||
Name: "main",
|
||||
})
|
||||
|
||||
// Workspaces are eligible to be claimed once their agent is marked "ready"
|
||||
dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
Name: "test",
|
||||
ResourceID: workspaceResource.ID,
|
||||
Architecture: "i386",
|
||||
OperatingSystem: "linux",
|
||||
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
|
||||
StartedAt: sql.NullTime{Time: time.Now().Add(time.Hour), Valid: true},
|
||||
ReadyAt: sql.NullTime{Time: time.Now().Add(-1 * time.Hour), Valid: true},
|
||||
APIKeyScope: database.AgentKeyScopeEnumAll,
|
||||
})
|
||||
|
||||
return job, workspaceBuild
|
||||
}
|
||||
|
||||
func setupTestDBPrebuiltWorkspace(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
clock quartz.Clock,
|
||||
db database.Store,
|
||||
ps pubsub.Pubsub,
|
||||
orgID uuid.UUID,
|
||||
templateID uuid.UUID,
|
||||
templateVersionID uuid.UUID,
|
||||
presetID uuid.UUID,
|
||||
opts ...func(*SetupPrebuiltOptions),
|
||||
) database.WorkspaceTable {
|
||||
t.Helper()
|
||||
|
||||
// Optional parameters
|
||||
options := &SetupPrebuiltOptions{}
|
||||
for _, opt := range opts {
|
||||
opt(options)
|
||||
}
|
||||
|
||||
buildTransition := database.WorkspaceTransitionStart
|
||||
if options.IsStopped {
|
||||
buildTransition = database.WorkspaceTransitionStop
|
||||
}
|
||||
|
||||
workspace := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
TemplateID: templateID,
|
||||
OrganizationID: orgID,
|
||||
OwnerID: database.PrebuildsSystemUserID,
|
||||
Deleted: false,
|
||||
CreatedAt: time.Now().Add(-time.Hour * 2),
|
||||
AutostartSchedule: options.AutostartSchedule,
|
||||
})
|
||||
setupTestDBWorkspaceBuild(ctx, t, clock, db, ps, orgID, workspace.ID, templateVersionID, presetID, buildTransition)
|
||||
|
||||
return workspace
|
||||
}
|
||||
|
||||
func mustProvisionWorkspace(t *testing.T, client *codersdk.Client, mut ...func(*codersdk.CreateWorkspaceRequest)) codersdk.Workspace {
|
||||
t.Helper()
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
-366
@@ -1,366 +0,0 @@
|
||||
package coderd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/kylecarbs/aisdk-go"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/ai"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/db2sdk"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/util/strings"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/toolsdk"
|
||||
)
|
||||
|
||||
// postChats creates a new chat.
|
||||
//
|
||||
// @Summary Create a chat
|
||||
// @ID create-a-chat
|
||||
// @Security CoderSessionToken
|
||||
// @Produce json
|
||||
// @Tags Chat
|
||||
// @Success 201 {object} codersdk.Chat
|
||||
// @Router /chats [post]
|
||||
func (api *API) postChats(w http.ResponseWriter, r *http.Request) {
|
||||
apiKey := httpmw.APIKey(r)
|
||||
ctx := r.Context()
|
||||
|
||||
chat, err := api.Database.InsertChat(ctx, database.InsertChatParams{
|
||||
OwnerID: apiKey.UserID,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
Title: "New Chat",
|
||||
})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to create chat",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, w, http.StatusCreated, db2sdk.Chat(chat))
|
||||
}
|
||||
|
||||
// listChats lists all chats for a user.
|
||||
//
|
||||
// @Summary List chats
|
||||
// @ID list-chats
|
||||
// @Security CoderSessionToken
|
||||
// @Produce json
|
||||
// @Tags Chat
|
||||
// @Success 200 {array} codersdk.Chat
|
||||
// @Router /chats [get]
|
||||
func (api *API) listChats(w http.ResponseWriter, r *http.Request) {
|
||||
apiKey := httpmw.APIKey(r)
|
||||
ctx := r.Context()
|
||||
|
||||
chats, err := api.Database.GetChatsByOwnerID(ctx, apiKey.UserID)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to list chats",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, w, http.StatusOK, db2sdk.Chats(chats))
|
||||
}
|
||||
|
||||
// chat returns a chat by ID.
|
||||
//
|
||||
// @Summary Get a chat
|
||||
// @ID get-a-chat
|
||||
// @Security CoderSessionToken
|
||||
// @Produce json
|
||||
// @Tags Chat
|
||||
// @Param chat path string true "Chat ID"
|
||||
// @Success 200 {object} codersdk.Chat
|
||||
// @Router /chats/{chat} [get]
|
||||
func (*API) chat(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
chat := httpmw.ChatParam(r)
|
||||
httpapi.Write(ctx, w, http.StatusOK, db2sdk.Chat(chat))
|
||||
}
|
||||
|
||||
// chatMessages returns the messages of a chat.
|
||||
//
|
||||
// @Summary Get chat messages
|
||||
// @ID get-chat-messages
|
||||
// @Security CoderSessionToken
|
||||
// @Produce json
|
||||
// @Tags Chat
|
||||
// @Param chat path string true "Chat ID"
|
||||
// @Success 200 {array} aisdk.Message
|
||||
// @Router /chats/{chat}/messages [get]
|
||||
func (api *API) chatMessages(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
chat := httpmw.ChatParam(r)
|
||||
rawMessages, err := api.Database.GetChatMessagesByChatID(ctx, chat.ID)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to get chat messages",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
messages := make([]aisdk.Message, len(rawMessages))
|
||||
for i, message := range rawMessages {
|
||||
var msg aisdk.Message
|
||||
err = json.Unmarshal(message.Content, &msg)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to unmarshal chat message",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
messages[i] = msg
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, w, http.StatusOK, messages)
|
||||
}
|
||||
|
||||
// postChatMessages creates a new chat message and streams the response.
|
||||
//
|
||||
// @Summary Create a chat message
|
||||
// @ID create-a-chat-message
|
||||
// @Security CoderSessionToken
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Tags Chat
|
||||
// @Param chat path string true "Chat ID"
|
||||
// @Param request body codersdk.CreateChatMessageRequest true "Request body"
|
||||
// @Success 200 {array} aisdk.DataStreamPart
|
||||
// @Router /chats/{chat}/messages [post]
|
||||
func (api *API) postChatMessages(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
chat := httpmw.ChatParam(r)
|
||||
var req codersdk.CreateChatMessageRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Failed to decode chat message",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
dbMessages, err := api.Database.GetChatMessagesByChatID(ctx, chat.ID)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to get chat messages",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
messages := make([]codersdk.ChatMessage, 0)
|
||||
for _, dbMsg := range dbMessages {
|
||||
var msg codersdk.ChatMessage
|
||||
err = json.Unmarshal(dbMsg.Content, &msg)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to unmarshal chat message",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
messages = append(messages, msg)
|
||||
}
|
||||
messages = append(messages, req.Message)
|
||||
|
||||
client := codersdk.New(api.AccessURL)
|
||||
client.SetSessionToken(httpmw.APITokenFromRequest(r))
|
||||
|
||||
tools := make([]aisdk.Tool, 0)
|
||||
handlers := map[string]toolsdk.GenericHandlerFunc{}
|
||||
for _, tool := range toolsdk.All {
|
||||
if tool.Name == "coder_report_task" {
|
||||
continue // This tool requires an agent to run.
|
||||
}
|
||||
tools = append(tools, tool.Tool)
|
||||
handlers[tool.Tool.Name] = tool.Handler
|
||||
}
|
||||
|
||||
provider, ok := api.LanguageModels[req.Model]
|
||||
if !ok {
|
||||
httpapi.Write(ctx, w, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Model not found",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// If it's the user's first message, generate a title for the chat.
|
||||
if len(messages) == 1 {
|
||||
var acc aisdk.DataStreamAccumulator
|
||||
stream, err := provider.StreamFunc(ctx, ai.StreamOptions{
|
||||
Model: req.Model,
|
||||
SystemPrompt: `- You will generate a short title based on the user's message.
|
||||
- It should be maximum of 40 characters.
|
||||
- Do not use quotes, colons, special characters, or emojis.`,
|
||||
Messages: messages,
|
||||
Tools: []aisdk.Tool{}, // This initial stream doesn't use tools.
|
||||
})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to create stream",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
stream = stream.WithAccumulator(&acc)
|
||||
err = stream.Pipe(io.Discard)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to pipe stream",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
var newTitle string
|
||||
accMessages := acc.Messages()
|
||||
// If for some reason the stream didn't return any messages, use the
|
||||
// original message as the title.
|
||||
if len(accMessages) == 0 {
|
||||
newTitle = strings.Truncate(messages[0].Content, 40)
|
||||
} else {
|
||||
newTitle = strings.Truncate(accMessages[0].Content, 40)
|
||||
}
|
||||
err = api.Database.UpdateChatByID(ctx, database.UpdateChatByIDParams{
|
||||
ID: chat.ID,
|
||||
Title: newTitle,
|
||||
UpdatedAt: dbtime.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to update chat title",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Write headers for the data stream!
|
||||
aisdk.WriteDataStreamHeaders(w)
|
||||
|
||||
// Insert the user-requested message into the database!
|
||||
raw, err := json.Marshal([]aisdk.Message{req.Message})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to marshal chat message",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
_, err = api.Database.InsertChatMessages(ctx, database.InsertChatMessagesParams{
|
||||
ChatID: chat.ID,
|
||||
CreatedAt: dbtime.Now(),
|
||||
Model: req.Model,
|
||||
Provider: provider.Provider,
|
||||
Content: raw,
|
||||
})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to insert chat messages",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
deps, err := toolsdk.NewDeps(client)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to create tool dependencies",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var acc aisdk.DataStreamAccumulator
|
||||
stream, err := provider.StreamFunc(ctx, ai.StreamOptions{
|
||||
Model: req.Model,
|
||||
Messages: messages,
|
||||
Tools: tools,
|
||||
SystemPrompt: `You are a chat assistant for Coder - an open-source platform for creating and managing cloud development environments on any infrastructure. You are expected to be precise, concise, and helpful.
|
||||
|
||||
You are running as an agent - please keep going until the user's query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. Do NOT guess or make up an answer.`,
|
||||
})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to create stream",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
stream = stream.WithToolCalling(func(toolCall aisdk.ToolCall) aisdk.ToolCallResult {
|
||||
tool, ok := handlers[toolCall.Name]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
toolArgs, err := json.Marshal(toolCall.Args)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
result, err := tool(ctx, deps, toolArgs)
|
||||
if err != nil {
|
||||
return map[string]any{
|
||||
"error": err.Error(),
|
||||
}
|
||||
}
|
||||
return result
|
||||
}).WithAccumulator(&acc)
|
||||
|
||||
err = stream.Pipe(w)
|
||||
if err != nil {
|
||||
// The client disppeared!
|
||||
api.Logger.Error(ctx, "stream pipe error", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// acc.Messages() may sometimes return nil. Serializing this
|
||||
// will cause a pq error: "cannot extract elements from a scalar".
|
||||
newMessages := append([]aisdk.Message{}, acc.Messages()...)
|
||||
if len(newMessages) > 0 {
|
||||
raw, err := json.Marshal(newMessages)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to marshal chat message",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
messages = append(messages, newMessages...)
|
||||
|
||||
// Insert these messages into the database!
|
||||
_, err = api.Database.InsertChatMessages(ctx, database.InsertChatMessagesParams{
|
||||
ChatID: chat.ID,
|
||||
CreatedAt: dbtime.Now(),
|
||||
Model: req.Model,
|
||||
Provider: provider.Provider,
|
||||
Content: raw,
|
||||
})
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to insert chat messages",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if acc.FinishReason() == aisdk.FinishReasonToolCalls {
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package coderd_test
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestChat(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ExperimentAgenticChatDisabled", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, _ := coderdtest.NewWithDatabase(t, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
memberClient, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
// Hit the endpoint to get the chat. It should return a 404.
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
_, err := memberClient.ListChats(ctx)
|
||||
require.Error(t, err, "list chats should fail")
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr, "request should fail with an SDK error")
|
||||
require.Equal(t, http.StatusForbidden, sdkErr.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("ChatCRUD", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dv := coderdtest.DeploymentValues(t)
|
||||
dv.Experiments = []string{string(codersdk.ExperimentAgenticChat)}
|
||||
dv.AI.Value = codersdk.AIConfig{
|
||||
Providers: []codersdk.AIProviderConfig{
|
||||
{
|
||||
Type: "fake",
|
||||
APIKey: "",
|
||||
BaseURL: "http://localhost",
|
||||
Models: []string{"fake-model"},
|
||||
},
|
||||
},
|
||||
}
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
DeploymentValues: dv,
|
||||
})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
memberClient, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
// Seed the database with some data.
|
||||
dbChat := dbgen.Chat(t, db, database.Chat{
|
||||
OwnerID: memberUser.ID,
|
||||
CreatedAt: dbtime.Now().Add(-time.Hour),
|
||||
UpdatedAt: dbtime.Now().Add(-time.Hour),
|
||||
Title: "This is a test chat",
|
||||
})
|
||||
_ = dbgen.ChatMessage(t, db, database.ChatMessage{
|
||||
ChatID: dbChat.ID,
|
||||
CreatedAt: dbtime.Now().Add(-time.Hour),
|
||||
Content: []byte(`[{"content": "Hello world"}]`),
|
||||
Model: "fake model",
|
||||
Provider: "fake",
|
||||
})
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Listing chats should return the chat we just inserted.
|
||||
chats, err := memberClient.ListChats(ctx)
|
||||
require.NoError(t, err, "list chats should succeed")
|
||||
require.Len(t, chats, 1, "response should have one chat")
|
||||
require.Equal(t, dbChat.ID, chats[0].ID, "unexpected chat ID")
|
||||
require.Equal(t, dbChat.Title, chats[0].Title, "unexpected chat title")
|
||||
require.Equal(t, dbChat.CreatedAt.UTC(), chats[0].CreatedAt.UTC(), "unexpected chat created at")
|
||||
require.Equal(t, dbChat.UpdatedAt.UTC(), chats[0].UpdatedAt.UTC(), "unexpected chat updated at")
|
||||
|
||||
// Fetching a single chat by ID should return the same chat.
|
||||
chat, err := memberClient.Chat(ctx, dbChat.ID)
|
||||
require.NoError(t, err, "get chat should succeed")
|
||||
require.Equal(t, chats[0], chat, "get chat should return the same chat")
|
||||
|
||||
// Listing chat messages should return the message we just inserted.
|
||||
messages, err := memberClient.ChatMessages(ctx, dbChat.ID)
|
||||
require.NoError(t, err, "list chat messages should succeed")
|
||||
require.Len(t, messages, 1, "response should have one message")
|
||||
require.Equal(t, "Hello world", messages[0].Content, "response should have the correct message content")
|
||||
|
||||
// Creating a new chat will fail because the model does not exist.
|
||||
// TODO: Test the message streaming functionality with a mock model.
|
||||
// Inserting a chat message will fail due to the model not existing.
|
||||
_, err = memberClient.CreateChatMessage(ctx, dbChat.ID, codersdk.CreateChatMessageRequest{
|
||||
Model: "echo",
|
||||
Message: codersdk.ChatMessage{
|
||||
Role: "user",
|
||||
Content: "Hello world",
|
||||
},
|
||||
Thinking: false,
|
||||
})
|
||||
require.Error(t, err, "create chat message should fail")
|
||||
var sdkErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &sdkErr, "create chat should fail with an SDK error")
|
||||
require.Equal(t, http.StatusBadRequest, sdkErr.StatusCode(), "create chat should fail with a 400 when model does not exist")
|
||||
|
||||
// Creating a new chat message with malformed content should fail.
|
||||
res, err := memberClient.Request(ctx, http.MethodPost, "/api/v2/chats/"+dbChat.ID.String()+"/messages", strings.NewReader(`{malformed json}`))
|
||||
require.NoError(t, err)
|
||||
defer res.Body.Close()
|
||||
apiErr := codersdk.ReadBodyAsError(res)
|
||||
require.Contains(t, apiErr.Error(), "Failed to decode chat message")
|
||||
|
||||
_, err = memberClient.CreateChat(ctx)
|
||||
require.NoError(t, err, "create chat should succeed")
|
||||
chats, err = memberClient.ListChats(ctx)
|
||||
require.NoError(t, err, "list chats should succeed")
|
||||
require.Len(t, chats, 2, "response should have two chats")
|
||||
})
|
||||
}
|
||||
+1
-19
@@ -45,7 +45,6 @@ import (
|
||||
|
||||
"github.com/coder/coder/v2/codersdk/drpcsdk"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/ai"
|
||||
"github.com/coder/coder/v2/coderd/cryptokeys"
|
||||
"github.com/coder/coder/v2/coderd/entitlements"
|
||||
"github.com/coder/coder/v2/coderd/files"
|
||||
@@ -160,7 +159,6 @@ type Options struct {
|
||||
Authorizer rbac.Authorizer
|
||||
AzureCertificates x509.VerifyOptions
|
||||
GoogleTokenValidator *idtoken.Validator
|
||||
LanguageModels ai.LanguageModels
|
||||
GithubOAuth2Config *GithubOAuth2Config
|
||||
OIDCConfig *OIDCConfig
|
||||
PrometheusRegistry *prometheus.Registry
|
||||
@@ -976,7 +974,6 @@ func New(options *Options) *API {
|
||||
r.Get("/config", api.deploymentValues)
|
||||
r.Get("/stats", api.deploymentStats)
|
||||
r.Get("/ssh", api.sshConfig)
|
||||
r.Get("/llms", api.deploymentLLMs)
|
||||
})
|
||||
r.Route("/experiments", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
@@ -1019,21 +1016,6 @@ func New(options *Options) *API {
|
||||
r.Get("/{fileID}", api.fileByID)
|
||||
r.Post("/", api.postFile)
|
||||
})
|
||||
// Chats are an experimental feature
|
||||
r.Route("/chats", func(r chi.Router) {
|
||||
r.Use(
|
||||
apiKeyMiddleware,
|
||||
httpmw.RequireExperiment(api.Experiments, codersdk.ExperimentAgenticChat),
|
||||
)
|
||||
r.Get("/", api.listChats)
|
||||
r.Post("/", api.postChats)
|
||||
r.Route("/{chat}", func(r chi.Router) {
|
||||
r.Use(httpmw.ExtractChatParam(options.Database))
|
||||
r.Get("/", api.chat)
|
||||
r.Get("/messages", api.chatMessages)
|
||||
r.Post("/messages", api.postChatMessages)
|
||||
})
|
||||
})
|
||||
r.Route("/external-auth", func(r chi.Router) {
|
||||
r.Use(
|
||||
apiKeyMiddleware,
|
||||
@@ -1332,7 +1314,7 @@ func New(options *Options) *API {
|
||||
r.Get("/listening-ports", api.workspaceAgentListeningPorts)
|
||||
r.Get("/connection", api.workspaceAgentConnection)
|
||||
r.Get("/containers", api.workspaceAgentListContainers)
|
||||
r.Post("/containers/devcontainers/container/{container}/recreate", api.workspaceAgentRecreateDevcontainer)
|
||||
r.Post("/containers/devcontainers/{devcontainer}/recreate", api.workspaceAgentRecreateDevcontainer)
|
||||
r.Get("/coordinate", api.workspaceAgentClientCoordinate)
|
||||
|
||||
// PTY is part of workspaceAppServer.
|
||||
|
||||
@@ -22,6 +22,9 @@ type DynamicParameterTemplateParams struct {
|
||||
|
||||
// StaticParams is used if the provisioner daemon version does not support dynamic parameters.
|
||||
StaticParams []*proto.RichParameter
|
||||
|
||||
// TemplateID is used to update an existing template instead of creating a new one.
|
||||
TemplateID uuid.UUID
|
||||
}
|
||||
|
||||
func DynamicParameterTemplate(t *testing.T, client *codersdk.Client, org uuid.UUID, args DynamicParameterTemplateParams) (codersdk.Template, codersdk.TemplateVersion) {
|
||||
@@ -40,16 +43,30 @@ func DynamicParameterTemplate(t *testing.T, client *codersdk.Client, org uuid.UU
|
||||
},
|
||||
}}
|
||||
|
||||
version := CreateTemplateVersion(t, client, org, files)
|
||||
version := CreateTemplateVersion(t, client, org, files, func(request *codersdk.CreateTemplateVersionRequest) {
|
||||
if args.TemplateID != uuid.Nil {
|
||||
request.TemplateID = args.TemplateID
|
||||
}
|
||||
})
|
||||
AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
tpl := CreateTemplate(t, client, org, version.ID)
|
||||
|
||||
tplID := args.TemplateID
|
||||
if args.TemplateID == uuid.Nil {
|
||||
tpl := CreateTemplate(t, client, org, version.ID)
|
||||
tplID = tpl.ID
|
||||
}
|
||||
|
||||
var err error
|
||||
tpl, err = client.UpdateTemplateMeta(t.Context(), tpl.ID, codersdk.UpdateTemplateMeta{
|
||||
tpl, err := client.UpdateTemplateMeta(t.Context(), tplID, codersdk.UpdateTemplateMeta{
|
||||
UseClassicParameterFlow: ptr.Ref(false),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.UpdateActiveTemplateVersion(t.Context(), tpl.ID, codersdk.UpdateActiveTemplateVersion{
|
||||
ID: version.ID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return tpl, version
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
"tailscale.com/tailcfg"
|
||||
|
||||
previewtypes "github.com/coder/preview/types"
|
||||
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
@@ -26,7 +28,6 @@ import (
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/provisionersdk/proto"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
previewtypes "github.com/coder/preview/types"
|
||||
)
|
||||
|
||||
// List is a helper function to reduce boilerplate when converting slices of
|
||||
@@ -803,19 +804,6 @@ func AgentProtoConnectionActionToAuditAction(action database.AuditAction) (agent
|
||||
}
|
||||
}
|
||||
|
||||
func Chat(chat database.Chat) codersdk.Chat {
|
||||
return codersdk.Chat{
|
||||
ID: chat.ID,
|
||||
Title: chat.Title,
|
||||
CreatedAt: chat.CreatedAt,
|
||||
UpdatedAt: chat.UpdatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func Chats(chats []database.Chat) []codersdk.Chat {
|
||||
return List(chats, Chat)
|
||||
}
|
||||
|
||||
func PreviewParameter(param previewtypes.Parameter) codersdk.PreviewParameter {
|
||||
return codersdk.PreviewParameter{
|
||||
PreviewParameterData: codersdk.PreviewParameterData{
|
||||
|
||||
@@ -1301,6 +1301,22 @@ func (q *querier) CleanTailnetTunnels(ctx context.Context) error {
|
||||
return q.db.CleanTailnetTunnels(ctx)
|
||||
}
|
||||
|
||||
func (q *querier) CountAuditLogs(ctx context.Context, arg database.CountAuditLogsParams) (int64, error) {
|
||||
// Shortcut if the user is an owner. The SQL filter is noticeable,
|
||||
// and this is an easy win for owners. Which is the common case.
|
||||
err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceAuditLog)
|
||||
if err == nil {
|
||||
return q.db.CountAuditLogs(ctx, arg)
|
||||
}
|
||||
|
||||
prep, err := prepareSQLFilter(ctx, q.auth, policy.ActionRead, rbac.ResourceAuditLog.Type)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("(dev error) prepare sql filter: %w", err)
|
||||
}
|
||||
|
||||
return q.db.CountAuthorizedAuditLogs(ctx, arg, prep)
|
||||
}
|
||||
|
||||
func (q *querier) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceWorkspace.All()); err != nil {
|
||||
return nil, err
|
||||
@@ -1373,10 +1389,6 @@ func (q *querier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, u
|
||||
return q.db.DeleteApplicationConnectAPIKeysByUserID(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteChat(ctx context.Context, id uuid.UUID) error {
|
||||
return deleteQ(q.log, q.auth, q.db.GetChatByID, q.db.DeleteChat)(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceTailnetCoordinator); err != nil {
|
||||
return err
|
||||
@@ -1640,6 +1652,13 @@ func (q *querier) EnqueueNotificationMessage(ctx context.Context, arg database.E
|
||||
return q.db.EnqueueNotificationMessage(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionDelete, rbac.ResourceApiKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return q.db.ExpirePrebuildsAPIKeys(ctx, now)
|
||||
}
|
||||
|
||||
func (q *querier) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
|
||||
fetch := func(ctx context.Context, id uuid.UUID) (database.Workspace, error) {
|
||||
return q.db.GetWorkspaceByID(ctx, id)
|
||||
@@ -1814,22 +1833,6 @@ func (q *querier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUI
|
||||
return q.db.GetAuthorizationUserRoles(ctx, userID)
|
||||
}
|
||||
|
||||
func (q *querier) GetChatByID(ctx context.Context, id uuid.UUID) (database.Chat, error) {
|
||||
return fetch(q.log, q.auth, q.db.GetChatByID)(ctx, id)
|
||||
}
|
||||
|
||||
func (q *querier) GetChatMessagesByChatID(ctx context.Context, chatID uuid.UUID) ([]database.ChatMessage, error) {
|
||||
c, err := q.GetChatByID(ctx, chatID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.GetChatMessagesByChatID(ctx, c.ID)
|
||||
}
|
||||
|
||||
func (q *querier) GetChatsByOwnerID(ctx context.Context, ownerID uuid.UUID) ([]database.Chat, error) {
|
||||
return fetchWithPostFilter(q.auth, policy.ActionRead, q.db.GetChatsByOwnerID)(ctx, ownerID)
|
||||
}
|
||||
|
||||
func (q *querier) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionRead, rbac.ResourceSystem); err != nil {
|
||||
return "", err
|
||||
@@ -3511,6 +3514,14 @@ func (q *querier) HasTemplateVersionsWithAITask(ctx context.Context) (bool, erro
|
||||
}
|
||||
|
||||
func (q *querier) InsertAPIKey(ctx context.Context, arg database.InsertAPIKeyParams) (database.APIKey, error) {
|
||||
// TODO(Cian): ideally this would be encoded in the policy, but system users are just members and we
|
||||
// don't currently have a capability to conditionally deny creating resources by owner ID in a role.
|
||||
// We also need to enrich rbac.Actor with IsSystem so that we can distinguish all system users.
|
||||
// For now, there is only one system user (prebuilds).
|
||||
if act, ok := ActorFromContext(ctx); ok && act.ID == database.PrebuildsSystemUserID.String() {
|
||||
return database.APIKey{}, logNotAuthorizedError(ctx, q.log, NotAuthorizedError{Err: xerrors.Errorf("prebuild user may not create api keys")})
|
||||
}
|
||||
|
||||
return insert(q.log, q.auth,
|
||||
rbac.ResourceApiKey.WithOwner(arg.UserID.String()),
|
||||
q.db.InsertAPIKey)(ctx, arg)
|
||||
@@ -3525,21 +3536,6 @@ func (q *querier) InsertAuditLog(ctx context.Context, arg database.InsertAuditLo
|
||||
return insert(q.log, q.auth, rbac.ResourceAuditLog, q.db.InsertAuditLog)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertChat(ctx context.Context, arg database.InsertChatParams) (database.Chat, error) {
|
||||
return insert(q.log, q.auth, rbac.ResourceChat.WithOwner(arg.OwnerID.String()), q.db.InsertChat)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertChatMessages(ctx context.Context, arg database.InsertChatMessagesParams) ([]database.ChatMessage, error) {
|
||||
c, err := q.db.GetChatByID(ctx, arg.ChatID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.db.InsertChatMessages(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) InsertCryptoKey(ctx context.Context, arg database.InsertCryptoKeyParams) (database.CryptoKey, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionCreate, rbac.ResourceCryptoKey); err != nil {
|
||||
return database.CryptoKey{}, err
|
||||
@@ -4201,13 +4197,6 @@ func (q *querier) UpdateAPIKeyByID(ctx context.Context, arg database.UpdateAPIKe
|
||||
return update(q.log, q.auth, fetch, q.db.UpdateAPIKeyByID)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateChatByID(ctx context.Context, arg database.UpdateChatByIDParams) error {
|
||||
fetch := func(ctx context.Context, arg database.UpdateChatByIDParams) (database.Chat, error) {
|
||||
return q.db.GetChatByID(ctx, arg.ID)
|
||||
}
|
||||
return update(q.log, q.auth, fetch, q.db.UpdateChatByID)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateCryptoKeyDeletesAt(ctx context.Context, arg database.UpdateCryptoKeyDeletesAtParams) (database.CryptoKey, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceCryptoKey); err != nil {
|
||||
return database.CryptoKey{}, err
|
||||
@@ -5298,3 +5287,7 @@ func (q *querier) GetAuthorizedUsers(ctx context.Context, arg database.GetUsersP
|
||||
func (q *querier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg database.GetAuditLogsOffsetParams, _ rbac.PreparedAuthorized) ([]database.GetAuditLogsOffsetRow, error) {
|
||||
return q.GetAuditLogsOffset(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) CountAuthorizedAuditLogs(ctx context.Context, arg database.CountAuditLogsParams, _ rbac.PreparedAuthorized) (int64, error) {
|
||||
return q.CountAuditLogs(ctx, arg)
|
||||
}
|
||||
|
||||
@@ -14,24 +14,26 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/sqlc-dev/pqtype"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database/db2sdk"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmem"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/db2sdk"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmem"
|
||||
"github.com/coder/coder/v2/coderd/database/dbmock"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/provisionersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
@@ -327,6 +329,16 @@ func (s *MethodTestSuite) TestAuditLogs() {
|
||||
LimitOpt: 10,
|
||||
}, emptyPreparedAuthorized{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead)
|
||||
}))
|
||||
s.Run("CountAuditLogs", s.Subtest(func(db database.Store, check *expects) {
|
||||
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
|
||||
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
|
||||
check.Args(database.CountAuditLogsParams{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead).WithNotAuthorized("nil")
|
||||
}))
|
||||
s.Run("CountAuthorizedAuditLogs", s.Subtest(func(db database.Store, check *expects) {
|
||||
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
|
||||
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
|
||||
check.Args(database.CountAuditLogsParams{}, emptyPreparedAuthorized{}).Asserts(rbac.ResourceAuditLog, policy.ActionRead)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestFile() {
|
||||
@@ -1563,6 +1575,9 @@ func (s *MethodTestSuite) TestUser() {
|
||||
UserID: u.ID,
|
||||
OrganizationID: uuid.New(),
|
||||
}).Asserts(u, policy.ActionRead).Returns(int64(0))
|
||||
s.Run("ExpirePrebuildsAPIKeys", s.Subtest(func(db database.Store, check *expects) {
|
||||
check.Args(dbtime.Now()).Asserts(rbac.ResourceApiKey, policy.ActionDelete).Returns()
|
||||
}))
|
||||
}))
|
||||
s.Run("GetQuotaConsumedForUser", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
@@ -5059,8 +5074,7 @@ func (s *MethodTestSuite) TestPrebuilds() {
|
||||
}))
|
||||
s.Run("GetPrebuildMetrics", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args().
|
||||
Asserts(rbac.ResourceWorkspace.All(), policy.ActionRead).
|
||||
ErrorsWithInMemDB(dbmem.ErrUnimplemented)
|
||||
Asserts(rbac.ResourceWorkspace.All(), policy.ActionRead)
|
||||
}))
|
||||
s.Run("CountInProgressPrebuilds", s.Subtest(func(_ database.Store, check *expects) {
|
||||
check.Args().
|
||||
@@ -5549,80 +5563,6 @@ func (s *MethodTestSuite) TestResourcesProvisionerdserver() {
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestChat() {
|
||||
createChat := func(t *testing.T, db database.Store) (database.User, database.Chat, database.ChatMessage) {
|
||||
t.Helper()
|
||||
|
||||
usr := dbgen.User(t, db, database.User{})
|
||||
chat := dbgen.Chat(s.T(), db, database.Chat{
|
||||
OwnerID: usr.ID,
|
||||
})
|
||||
msg := dbgen.ChatMessage(s.T(), db, database.ChatMessage{
|
||||
ChatID: chat.ID,
|
||||
})
|
||||
|
||||
return usr, chat, msg
|
||||
}
|
||||
|
||||
s.Run("DeleteChat", s.Subtest(func(db database.Store, check *expects) {
|
||||
_, c, _ := createChat(s.T(), db)
|
||||
check.Args(c.ID).Asserts(c, policy.ActionDelete)
|
||||
}))
|
||||
|
||||
s.Run("GetChatByID", s.Subtest(func(db database.Store, check *expects) {
|
||||
_, c, _ := createChat(s.T(), db)
|
||||
check.Args(c.ID).Asserts(c, policy.ActionRead).Returns(c)
|
||||
}))
|
||||
|
||||
s.Run("GetChatMessagesByChatID", s.Subtest(func(db database.Store, check *expects) {
|
||||
_, c, m := createChat(s.T(), db)
|
||||
check.Args(c.ID).Asserts(c, policy.ActionRead).Returns([]database.ChatMessage{m})
|
||||
}))
|
||||
|
||||
s.Run("GetChatsByOwnerID", s.Subtest(func(db database.Store, check *expects) {
|
||||
u1, u1c1, _ := createChat(s.T(), db)
|
||||
u1c2 := dbgen.Chat(s.T(), db, database.Chat{
|
||||
OwnerID: u1.ID,
|
||||
CreatedAt: u1c1.CreatedAt.Add(time.Hour),
|
||||
})
|
||||
_, _, _ = createChat(s.T(), db) // other user's chat
|
||||
check.Args(u1.ID).Asserts(u1c2, policy.ActionRead, u1c1, policy.ActionRead).Returns([]database.Chat{u1c2, u1c1})
|
||||
}))
|
||||
|
||||
s.Run("InsertChat", s.Subtest(func(db database.Store, check *expects) {
|
||||
usr := dbgen.User(s.T(), db, database.User{})
|
||||
check.Args(database.InsertChatParams{
|
||||
OwnerID: usr.ID,
|
||||
Title: "test chat",
|
||||
CreatedAt: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
}).Asserts(rbac.ResourceChat.WithOwner(usr.ID.String()), policy.ActionCreate)
|
||||
}))
|
||||
|
||||
s.Run("InsertChatMessages", s.Subtest(func(db database.Store, check *expects) {
|
||||
usr := dbgen.User(s.T(), db, database.User{})
|
||||
chat := dbgen.Chat(s.T(), db, database.Chat{
|
||||
OwnerID: usr.ID,
|
||||
})
|
||||
check.Args(database.InsertChatMessagesParams{
|
||||
ChatID: chat.ID,
|
||||
CreatedAt: dbtime.Now(),
|
||||
Model: "test-model",
|
||||
Provider: "test-provider",
|
||||
Content: []byte(`[]`),
|
||||
}).Asserts(chat, policy.ActionUpdate)
|
||||
}))
|
||||
|
||||
s.Run("UpdateChatByID", s.Subtest(func(db database.Store, check *expects) {
|
||||
_, c, _ := createChat(s.T(), db)
|
||||
check.Args(database.UpdateChatByIDParams{
|
||||
ID: c.ID,
|
||||
Title: "new title",
|
||||
UpdatedAt: dbtime.Now(),
|
||||
}).Asserts(c, policy.ActionUpdate)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestAuthorizePrebuiltWorkspace() {
|
||||
s.Run("PrebuildDelete/InsertWorkspaceBuild", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
@@ -5702,3 +5642,18 @@ func (s *MethodTestSuite) TestAuthorizePrebuiltWorkspace() {
|
||||
}).Asserts(w, policy.ActionUpdate, w.AsPrebuild(), policy.ActionUpdate)
|
||||
}))
|
||||
}
|
||||
|
||||
// Ensures that the prebuilds actor may never insert an api key.
|
||||
func TestInsertAPIKey_AsPrebuildsUser(t *testing.T) {
|
||||
t.Parallel()
|
||||
prebuildsSubj := rbac.Subject{
|
||||
ID: database.PrebuildsSystemUserID.String(),
|
||||
}
|
||||
ctx := dbauthz.As(testutil.Context(t, testutil.WaitShort), prebuildsSubj)
|
||||
mDB := dbmock.NewMockStore(gomock.NewController(t))
|
||||
log := slogtest.Make(t, nil)
|
||||
mDB.EXPECT().Wrappers().Times(1).Return([]string{})
|
||||
dbz := dbauthz.New(mDB, nil, log, nil)
|
||||
_, err := dbz.InsertAPIKey(ctx, database.InsertAPIKeyParams{})
|
||||
require.True(t, dbauthz.IsNotAuthorizedError(err))
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderd
|
||||
|
||||
// This is unfortunate, but if we are using `Filter` the error returned will be nil. So filter out
|
||||
// any case where the error is nil and the response is an empty slice.
|
||||
if err != nil || !hasEmptySliceResponse(resp) {
|
||||
if err != nil || !hasEmptyResponse(resp) {
|
||||
// Expect the default error
|
||||
if testCase.notAuthorizedExpect == "" {
|
||||
s.ErrorContainsf(err, "unauthorized", "error string should have a good message")
|
||||
@@ -296,8 +296,8 @@ func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderd
|
||||
resp, err := callMethod(ctx)
|
||||
|
||||
// This is unfortunate, but if we are using `Filter` the error returned will be nil. So filter out
|
||||
// any case where the error is nil and the response is an empty slice.
|
||||
if err != nil || !hasEmptySliceResponse(resp) {
|
||||
// any case where the error is nil and the response is an empty slice or int64(0).
|
||||
if err != nil || !hasEmptyResponse(resp) {
|
||||
if testCase.cancelledCtxExpect == "" {
|
||||
s.Errorf(err, "method should an error with cancellation")
|
||||
s.ErrorIsf(err, context.Canceled, "error should match context.Canceled")
|
||||
@@ -308,13 +308,20 @@ func (s *MethodTestSuite) NotAuthorizedErrorTest(ctx context.Context, az *coderd
|
||||
})
|
||||
}
|
||||
|
||||
func hasEmptySliceResponse(values []reflect.Value) bool {
|
||||
func hasEmptyResponse(values []reflect.Value) bool {
|
||||
for _, r := range values {
|
||||
if r.Kind() == reflect.Slice || r.Kind() == reflect.Array {
|
||||
if r.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for int64, as it's the return type for count query.
|
||||
if r.Kind() == reflect.Int64 {
|
||||
if r.Int() == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func Template(t testing.TB, db database.Store, seed database.Template) database.
|
||||
return template
|
||||
}
|
||||
|
||||
func APIKey(t testing.TB, db database.Store, seed database.APIKey) (key database.APIKey, token string) {
|
||||
func APIKey(t testing.TB, db database.Store, seed database.APIKey, munge ...func(*database.InsertAPIKeyParams)) (key database.APIKey, token string) {
|
||||
id, _ := cryptorand.String(10)
|
||||
secret, _ := cryptorand.String(22)
|
||||
hashed := sha256.Sum256([]byte(secret))
|
||||
@@ -124,7 +124,7 @@ func APIKey(t testing.TB, db database.Store, seed database.APIKey) (key database
|
||||
}
|
||||
}
|
||||
|
||||
key, err := db.InsertAPIKey(genCtx, database.InsertAPIKeyParams{
|
||||
params := database.InsertAPIKeyParams{
|
||||
ID: takeFirst(seed.ID, id),
|
||||
// 0 defaults to 86400 at the db layer
|
||||
LifetimeSeconds: takeFirst(seed.LifetimeSeconds, 0),
|
||||
@@ -138,35 +138,15 @@ func APIKey(t testing.TB, db database.Store, seed database.APIKey) (key database
|
||||
LoginType: takeFirst(seed.LoginType, database.LoginTypePassword),
|
||||
Scope: takeFirst(seed.Scope, database.APIKeyScopeAll),
|
||||
TokenName: takeFirst(seed.TokenName),
|
||||
})
|
||||
}
|
||||
for _, fn := range munge {
|
||||
fn(¶ms)
|
||||
}
|
||||
key, err := db.InsertAPIKey(genCtx, params)
|
||||
require.NoError(t, err, "insert api key")
|
||||
return key, fmt.Sprintf("%s-%s", key.ID, secret)
|
||||
}
|
||||
|
||||
func Chat(t testing.TB, db database.Store, seed database.Chat) database.Chat {
|
||||
chat, err := db.InsertChat(genCtx, database.InsertChatParams{
|
||||
OwnerID: takeFirst(seed.OwnerID, uuid.New()),
|
||||
CreatedAt: takeFirst(seed.CreatedAt, dbtime.Now()),
|
||||
UpdatedAt: takeFirst(seed.UpdatedAt, dbtime.Now()),
|
||||
Title: takeFirst(seed.Title, "Test Chat"),
|
||||
})
|
||||
require.NoError(t, err, "insert chat")
|
||||
return chat
|
||||
}
|
||||
|
||||
func ChatMessage(t testing.TB, db database.Store, seed database.ChatMessage) database.ChatMessage {
|
||||
msg, err := db.InsertChatMessages(genCtx, database.InsertChatMessagesParams{
|
||||
CreatedAt: takeFirst(seed.CreatedAt, dbtime.Now()),
|
||||
ChatID: takeFirst(seed.ChatID, uuid.New()),
|
||||
Model: takeFirst(seed.Model, "train"),
|
||||
Provider: takeFirst(seed.Provider, "thomas"),
|
||||
Content: takeFirstSlice(seed.Content, []byte(`[{"text": "Choo choo!"}]`)),
|
||||
})
|
||||
require.NoError(t, err, "insert chat message")
|
||||
require.Len(t, msg, 1, "insert one chat message did not return exactly one message")
|
||||
return msg[0]
|
||||
}
|
||||
|
||||
func WorkspaceAgentPortShare(t testing.TB, db database.Store, orig database.WorkspaceAgentPortShare) database.WorkspaceAgentPortShare {
|
||||
ps, err := db.UpsertWorkspaceAgentPortShare(genCtx, database.UpsertWorkspaceAgentPortShareParams{
|
||||
WorkspaceID: takeFirst(orig.WorkspaceID, uuid.New()),
|
||||
@@ -227,6 +207,17 @@ func WorkspaceAgent(t testing.TB, db database.Store, orig database.WorkspaceAgen
|
||||
require.NoError(t, err, "update workspace agent first connected at")
|
||||
}
|
||||
|
||||
// If the lifecycle state is "ready", update the agent with the corresponding timestamps
|
||||
if orig.LifecycleState == database.WorkspaceAgentLifecycleStateReady && orig.StartedAt.Valid && orig.ReadyAt.Valid {
|
||||
err := db.UpdateWorkspaceAgentLifecycleStateByID(genCtx, database.UpdateWorkspaceAgentLifecycleStateByIDParams{
|
||||
ID: agt.ID,
|
||||
LifecycleState: orig.LifecycleState,
|
||||
StartedAt: orig.StartedAt,
|
||||
ReadyAt: orig.ReadyAt,
|
||||
})
|
||||
require.NoError(t, err, "update workspace agent lifecycle state")
|
||||
}
|
||||
|
||||
if orig.ParentID.UUID == uuid.Nil {
|
||||
// Add a test antagonist. For every agent we add a deleted sub agent
|
||||
// to discover cases where deletion should be handled.
|
||||
@@ -1350,6 +1341,17 @@ func PresetParameter(t testing.TB, db database.Store, seed database.InsertPreset
|
||||
return parameters
|
||||
}
|
||||
|
||||
func ClaimPrebuild(t testing.TB, db database.Store, newUserID uuid.UUID, newName string, presetID uuid.UUID) database.ClaimPrebuiltWorkspaceRow {
|
||||
claimedWorkspace, err := db.ClaimPrebuiltWorkspace(genCtx, database.ClaimPrebuiltWorkspaceParams{
|
||||
NewUserID: newUserID,
|
||||
NewName: newName,
|
||||
PresetID: presetID,
|
||||
})
|
||||
require.NoError(t, err, "claim prebuilt workspace")
|
||||
|
||||
return claimedWorkspace
|
||||
}
|
||||
|
||||
func provisionerJobTiming(t testing.TB, db database.Store, seed database.ProvisionerJobTiming) database.ProvisionerJobTiming {
|
||||
timing, err := db.InsertProvisionerJobTimings(genCtx, database.InsertProvisionerJobTimingsParams{
|
||||
JobID: takeFirst(seed.JobID, uuid.New()),
|
||||
|
||||
+87
-144
@@ -215,8 +215,6 @@ type data struct {
|
||||
|
||||
// New tables
|
||||
auditLogs []database.AuditLog
|
||||
chats []database.Chat
|
||||
chatMessages []database.ChatMessage
|
||||
cryptoKeys []database.CryptoKey
|
||||
dbcryptKeys []database.DBCryptKey
|
||||
files []database.File
|
||||
@@ -1781,6 +1779,10 @@ func (*FakeQuerier) CleanTailnetTunnels(context.Context) error {
|
||||
return ErrUnimplemented
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) CountAuditLogs(ctx context.Context, arg database.CountAuditLogsParams) (int64, error) {
|
||||
return q.CountAuthorizedAuditLogs(ctx, arg, nil)
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) {
|
||||
return nil, ErrUnimplemented
|
||||
}
|
||||
@@ -1909,19 +1911,6 @@ func (q *FakeQuerier) DeleteApplicationConnectAPIKeysByUserID(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) DeleteChat(ctx context.Context, id uuid.UUID) error {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for i, chat := range q.chats {
|
||||
if chat.ID == id {
|
||||
q.chats = append(q.chats[:i], q.chats[i+1:]...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (*FakeQuerier) DeleteCoordinator(context.Context, uuid.UUID) error {
|
||||
return ErrUnimplemented
|
||||
}
|
||||
@@ -2608,6 +2597,11 @@ func (q *FakeQuerier) EnqueueNotificationMessage(_ context.Context, arg database
|
||||
return err
|
||||
}
|
||||
|
||||
func (*FakeQuerier) ExpirePrebuildsAPIKeys(_ context.Context, _ time.Time) error {
|
||||
// Implemented in postgres.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) FavoriteWorkspace(_ context.Context, arg uuid.UUID) error {
|
||||
err := validateDatabaseType(arg)
|
||||
if err != nil {
|
||||
@@ -2955,47 +2949,6 @@ func (q *FakeQuerier) GetAuthorizationUserRoles(_ context.Context, userID uuid.U
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) GetChatByID(ctx context.Context, id uuid.UUID) (database.Chat, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
for _, chat := range q.chats {
|
||||
if chat.ID == id {
|
||||
return chat, nil
|
||||
}
|
||||
}
|
||||
return database.Chat{}, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) GetChatMessagesByChatID(ctx context.Context, chatID uuid.UUID) ([]database.ChatMessage, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
messages := []database.ChatMessage{}
|
||||
for _, chatMessage := range q.chatMessages {
|
||||
if chatMessage.ChatID == chatID {
|
||||
messages = append(messages, chatMessage)
|
||||
}
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) GetChatsByOwnerID(ctx context.Context, ownerID uuid.UUID) ([]database.Chat, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
chats := []database.Chat{}
|
||||
for _, chat := range q.chats {
|
||||
if chat.OwnerID == ownerID {
|
||||
chats = append(chats, chat)
|
||||
}
|
||||
}
|
||||
sort.Slice(chats, func(i, j int) bool {
|
||||
return chats[i].CreatedAt.After(chats[j].CreatedAt)
|
||||
})
|
||||
return chats, nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) GetCoordinatorResumeTokenSigningKey(_ context.Context) (string, error) {
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
@@ -4326,7 +4279,7 @@ func (q *FakeQuerier) GetParameterSchemasByJobID(_ context.Context, jobID uuid.U
|
||||
}
|
||||
|
||||
func (*FakeQuerier) GetPrebuildMetrics(_ context.Context) ([]database.GetPrebuildMetricsRow, error) {
|
||||
return nil, ErrUnimplemented
|
||||
return make([]database.GetPrebuildMetricsRow, 0), nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) GetPresetByID(ctx context.Context, presetID uuid.UUID) (database.GetPresetByIDRow, error) {
|
||||
@@ -8630,66 +8583,6 @@ func (q *FakeQuerier) InsertAuditLog(_ context.Context, arg database.InsertAudit
|
||||
return alog, nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) InsertChat(ctx context.Context, arg database.InsertChatParams) (database.Chat, error) {
|
||||
err := validateDatabaseType(arg)
|
||||
if err != nil {
|
||||
return database.Chat{}, err
|
||||
}
|
||||
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
chat := database.Chat{
|
||||
ID: uuid.New(),
|
||||
CreatedAt: arg.CreatedAt,
|
||||
UpdatedAt: arg.UpdatedAt,
|
||||
OwnerID: arg.OwnerID,
|
||||
Title: arg.Title,
|
||||
}
|
||||
q.chats = append(q.chats, chat)
|
||||
|
||||
return chat, nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) InsertChatMessages(ctx context.Context, arg database.InsertChatMessagesParams) ([]database.ChatMessage, error) {
|
||||
err := validateDatabaseType(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
id := int64(0)
|
||||
if len(q.chatMessages) > 0 {
|
||||
id = q.chatMessages[len(q.chatMessages)-1].ID
|
||||
}
|
||||
|
||||
messages := make([]database.ChatMessage, 0)
|
||||
|
||||
rawMessages := make([]json.RawMessage, 0)
|
||||
err = json.Unmarshal(arg.Content, &rawMessages)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, content := range rawMessages {
|
||||
id++
|
||||
_ = content
|
||||
messages = append(messages, database.ChatMessage{
|
||||
ID: id,
|
||||
ChatID: arg.ChatID,
|
||||
CreatedAt: arg.CreatedAt,
|
||||
Model: arg.Model,
|
||||
Provider: arg.Provider,
|
||||
Content: content,
|
||||
})
|
||||
}
|
||||
|
||||
q.chatMessages = append(q.chatMessages, messages...)
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) InsertCryptoKey(_ context.Context, arg database.InsertCryptoKeyParams) (database.CryptoKey, error) {
|
||||
err := validateDatabaseType(arg)
|
||||
if err != nil {
|
||||
@@ -10638,27 +10531,6 @@ func (q *FakeQuerier) UpdateAPIKeyByID(_ context.Context, arg database.UpdateAPI
|
||||
return sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) UpdateChatByID(ctx context.Context, arg database.UpdateChatByIDParams) error {
|
||||
err := validateDatabaseType(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for i, chat := range q.chats {
|
||||
if chat.ID == arg.ID {
|
||||
q.chats[i].Title = arg.Title
|
||||
q.chats[i].UpdatedAt = arg.UpdatedAt
|
||||
q.chats[i] = chat
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) UpdateCryptoKeyDeletesAt(_ context.Context, arg database.UpdateCryptoKeyDeletesAtParams) (database.CryptoKey, error) {
|
||||
err := validateDatabaseType(arg)
|
||||
if err != nil {
|
||||
@@ -14067,7 +13939,6 @@ func (q *FakeQuerier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg data
|
||||
UserQuietHoursSchedule: sql.NullString{String: user.QuietHoursSchedule, Valid: userValid},
|
||||
UserStatus: database.NullUserStatus{UserStatus: user.Status, Valid: userValid},
|
||||
UserRoles: user.RBACRoles,
|
||||
Count: 0,
|
||||
})
|
||||
|
||||
if len(logs) >= int(arg.LimitOpt) {
|
||||
@@ -14075,10 +13946,82 @@ func (q *FakeQuerier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg data
|
||||
}
|
||||
}
|
||||
|
||||
count := int64(len(logs))
|
||||
for i := range logs {
|
||||
logs[i].Count = count
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) CountAuthorizedAuditLogs(ctx context.Context, arg database.CountAuditLogsParams, prepared rbac.PreparedAuthorized) (int64, error) {
|
||||
if err := validateDatabaseType(arg); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Call this to match the same function calls as the SQL implementation.
|
||||
// It functionally does nothing for filtering.
|
||||
if prepared != nil {
|
||||
_, err := prepared.CompileToSQL(ctx, regosql.ConvertConfig{
|
||||
VariableConverter: regosql.AuditLogConverter(),
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
q.mutex.RLock()
|
||||
defer q.mutex.RUnlock()
|
||||
|
||||
var count int64
|
||||
|
||||
// q.auditLogs are already sorted by time DESC, so no need to sort after the fact.
|
||||
for _, alog := range q.auditLogs {
|
||||
if arg.RequestID != uuid.Nil && arg.RequestID != alog.RequestID {
|
||||
continue
|
||||
}
|
||||
if arg.OrganizationID != uuid.Nil && arg.OrganizationID != alog.OrganizationID {
|
||||
continue
|
||||
}
|
||||
if arg.Action != "" && string(alog.Action) != arg.Action {
|
||||
continue
|
||||
}
|
||||
if arg.ResourceType != "" && !strings.Contains(string(alog.ResourceType), arg.ResourceType) {
|
||||
continue
|
||||
}
|
||||
if arg.ResourceID != uuid.Nil && alog.ResourceID != arg.ResourceID {
|
||||
continue
|
||||
}
|
||||
if arg.Username != "" {
|
||||
user, err := q.getUserByIDNoLock(alog.UserID)
|
||||
if err == nil && !strings.EqualFold(arg.Username, user.Username) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if arg.Email != "" {
|
||||
user, err := q.getUserByIDNoLock(alog.UserID)
|
||||
if err == nil && !strings.EqualFold(arg.Email, user.Email) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !arg.DateFrom.IsZero() {
|
||||
if alog.Time.Before(arg.DateFrom) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !arg.DateTo.IsZero() {
|
||||
if alog.Time.After(arg.DateTo) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if arg.BuildReason != "" {
|
||||
workspaceBuild, err := q.getWorkspaceBuildByIDNoLock(context.Background(), alog.ResourceID)
|
||||
if err == nil && !strings.EqualFold(arg.BuildReason, string(workspaceBuild.Reason)) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// If the filter exists, ensure the object is authorized.
|
||||
if prepared != nil && prepared.Authorize(ctx, alog.RBACObject()) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
count++
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
@@ -186,6 +186,13 @@ func (m queryMetricsStore) CleanTailnetTunnels(ctx context.Context) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CountAuditLogs(ctx context.Context, arg database.CountAuditLogsParams) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CountAuditLogs(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("CountAuditLogs").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CountInProgressPrebuilds(ctx)
|
||||
@@ -249,13 +256,6 @@ func (m queryMetricsStore) DeleteApplicationConnectAPIKeysByUserID(ctx context.C
|
||||
return err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteChat(ctx context.Context, id uuid.UUID) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteChat(ctx, id)
|
||||
m.queryLatencies.WithLabelValues("DeleteChat").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.DeleteCoordinator(ctx, id)
|
||||
@@ -487,6 +487,13 @@ func (m queryMetricsStore) EnqueueNotificationMessage(ctx context.Context, arg d
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.ExpirePrebuildsAPIKeys(ctx, now)
|
||||
m.queryLatencies.WithLabelValues("ExpirePrebuildsAPIKeys").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) FavoriteWorkspace(ctx context.Context, arg uuid.UUID) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.FavoriteWorkspace(ctx, arg)
|
||||
@@ -648,27 +655,6 @@ func (m queryMetricsStore) GetAuthorizationUserRoles(ctx context.Context, userID
|
||||
return row, err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetChatByID(ctx context.Context, id uuid.UUID) (database.Chat, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetChatByID(ctx, id)
|
||||
m.queryLatencies.WithLabelValues("GetChatByID").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetChatMessagesByChatID(ctx context.Context, chatID uuid.UUID) ([]database.ChatMessage, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetChatMessagesByChatID(ctx, chatID)
|
||||
m.queryLatencies.WithLabelValues("GetChatMessagesByChatID").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetChatsByOwnerID(ctx context.Context, ownerID uuid.UUID) ([]database.Chat, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetChatsByOwnerID(ctx, ownerID)
|
||||
m.queryLatencies.WithLabelValues("GetChatsByOwnerID").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.GetCoordinatorResumeTokenSigningKey(ctx)
|
||||
@@ -2083,20 +2069,6 @@ func (m queryMetricsStore) InsertAuditLog(ctx context.Context, arg database.Inse
|
||||
return log, err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) InsertChat(ctx context.Context, arg database.InsertChatParams) (database.Chat, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.InsertChat(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("InsertChat").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) InsertChatMessages(ctx context.Context, arg database.InsertChatMessagesParams) ([]database.ChatMessage, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.InsertChatMessages(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("InsertChatMessages").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) InsertCryptoKey(ctx context.Context, arg database.InsertCryptoKeyParams) (database.CryptoKey, error) {
|
||||
start := time.Now()
|
||||
key, err := m.s.InsertCryptoKey(ctx, arg)
|
||||
@@ -2622,13 +2594,6 @@ func (m queryMetricsStore) UpdateAPIKeyByID(ctx context.Context, arg database.Up
|
||||
return err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateChatByID(ctx context.Context, arg database.UpdateChatByIDParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.UpdateChatByID(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpdateChatByID").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateCryptoKeyDeletesAt(ctx context.Context, arg database.UpdateCryptoKeyDeletesAtParams) (database.CryptoKey, error) {
|
||||
start := time.Now()
|
||||
key, err := m.s.UpdateCryptoKeyDeletesAt(ctx, arg)
|
||||
@@ -3370,3 +3335,10 @@ func (m queryMetricsStore) GetAuthorizedAuditLogsOffset(ctx context.Context, arg
|
||||
m.queryLatencies.WithLabelValues("GetAuthorizedAuditLogsOffset").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) CountAuthorizedAuditLogs(ctx context.Context, arg database.CountAuditLogsParams, prepared rbac.PreparedAuthorized) (int64, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.CountAuthorizedAuditLogs(ctx, arg, prepared)
|
||||
m.queryLatencies.WithLabelValues("CountAuthorizedAuditLogs").Observe(time.Since(start).Seconds())
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
@@ -247,6 +247,36 @@ func (mr *MockStoreMockRecorder) CleanTailnetTunnels(ctx any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanTailnetTunnels", reflect.TypeOf((*MockStore)(nil).CleanTailnetTunnels), ctx)
|
||||
}
|
||||
|
||||
// CountAuditLogs mocks base method.
|
||||
func (m *MockStore) CountAuditLogs(ctx context.Context, arg database.CountAuditLogsParams) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CountAuditLogs", ctx, arg)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CountAuditLogs indicates an expected call of CountAuditLogs.
|
||||
func (mr *MockStoreMockRecorder) CountAuditLogs(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountAuditLogs", reflect.TypeOf((*MockStore)(nil).CountAuditLogs), ctx, arg)
|
||||
}
|
||||
|
||||
// CountAuthorizedAuditLogs mocks base method.
|
||||
func (m *MockStore) CountAuthorizedAuditLogs(ctx context.Context, arg database.CountAuditLogsParams, prepared rbac.PreparedAuthorized) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CountAuthorizedAuditLogs", ctx, arg, prepared)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CountAuthorizedAuditLogs indicates an expected call of CountAuthorizedAuditLogs.
|
||||
func (mr *MockStoreMockRecorder) CountAuthorizedAuditLogs(ctx, arg, prepared any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountAuthorizedAuditLogs", reflect.TypeOf((*MockStore)(nil).CountAuthorizedAuditLogs), ctx, arg, prepared)
|
||||
}
|
||||
|
||||
// CountInProgressPrebuilds mocks base method.
|
||||
func (m *MockStore) CountInProgressPrebuilds(ctx context.Context) ([]database.CountInProgressPrebuildsRow, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -376,20 +406,6 @@ func (mr *MockStoreMockRecorder) DeleteApplicationConnectAPIKeysByUserID(ctx, us
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteApplicationConnectAPIKeysByUserID", reflect.TypeOf((*MockStore)(nil).DeleteApplicationConnectAPIKeysByUserID), ctx, userID)
|
||||
}
|
||||
|
||||
// DeleteChat mocks base method.
|
||||
func (m *MockStore) DeleteChat(ctx context.Context, id uuid.UUID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteChat", ctx, id)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteChat indicates an expected call of DeleteChat.
|
||||
func (mr *MockStoreMockRecorder) DeleteChat(ctx, id any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteChat", reflect.TypeOf((*MockStore)(nil).DeleteChat), ctx, id)
|
||||
}
|
||||
|
||||
// DeleteCoordinator mocks base method.
|
||||
func (m *MockStore) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -858,6 +874,20 @@ func (mr *MockStoreMockRecorder) EnqueueNotificationMessage(ctx, arg any) *gomoc
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnqueueNotificationMessage", reflect.TypeOf((*MockStore)(nil).EnqueueNotificationMessage), ctx, arg)
|
||||
}
|
||||
|
||||
// ExpirePrebuildsAPIKeys mocks base method.
|
||||
func (m *MockStore) ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ExpirePrebuildsAPIKeys", ctx, now)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ExpirePrebuildsAPIKeys indicates an expected call of ExpirePrebuildsAPIKeys.
|
||||
func (mr *MockStoreMockRecorder) ExpirePrebuildsAPIKeys(ctx, now any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpirePrebuildsAPIKeys", reflect.TypeOf((*MockStore)(nil).ExpirePrebuildsAPIKeys), ctx, now)
|
||||
}
|
||||
|
||||
// FavoriteWorkspace mocks base method.
|
||||
func (m *MockStore) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1292,51 +1322,6 @@ func (mr *MockStoreMockRecorder) GetAuthorizedWorkspacesAndAgentsByOwnerID(ctx,
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorizedWorkspacesAndAgentsByOwnerID", reflect.TypeOf((*MockStore)(nil).GetAuthorizedWorkspacesAndAgentsByOwnerID), ctx, ownerID, prepared)
|
||||
}
|
||||
|
||||
// GetChatByID mocks base method.
|
||||
func (m *MockStore) GetChatByID(ctx context.Context, id uuid.UUID) (database.Chat, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetChatByID", ctx, id)
|
||||
ret0, _ := ret[0].(database.Chat)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetChatByID indicates an expected call of GetChatByID.
|
||||
func (mr *MockStoreMockRecorder) GetChatByID(ctx, id any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChatByID", reflect.TypeOf((*MockStore)(nil).GetChatByID), ctx, id)
|
||||
}
|
||||
|
||||
// GetChatMessagesByChatID mocks base method.
|
||||
func (m *MockStore) GetChatMessagesByChatID(ctx context.Context, chatID uuid.UUID) ([]database.ChatMessage, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetChatMessagesByChatID", ctx, chatID)
|
||||
ret0, _ := ret[0].([]database.ChatMessage)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetChatMessagesByChatID indicates an expected call of GetChatMessagesByChatID.
|
||||
func (mr *MockStoreMockRecorder) GetChatMessagesByChatID(ctx, chatID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChatMessagesByChatID", reflect.TypeOf((*MockStore)(nil).GetChatMessagesByChatID), ctx, chatID)
|
||||
}
|
||||
|
||||
// GetChatsByOwnerID mocks base method.
|
||||
func (m *MockStore) GetChatsByOwnerID(ctx context.Context, ownerID uuid.UUID) ([]database.Chat, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetChatsByOwnerID", ctx, ownerID)
|
||||
ret0, _ := ret[0].([]database.Chat)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetChatsByOwnerID indicates an expected call of GetChatsByOwnerID.
|
||||
func (mr *MockStoreMockRecorder) GetChatsByOwnerID(ctx, ownerID any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChatsByOwnerID", reflect.TypeOf((*MockStore)(nil).GetChatsByOwnerID), ctx, ownerID)
|
||||
}
|
||||
|
||||
// GetCoordinatorResumeTokenSigningKey mocks base method.
|
||||
func (m *MockStore) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -4411,36 +4396,6 @@ func (mr *MockStoreMockRecorder) InsertAuditLog(ctx, arg any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertAuditLog", reflect.TypeOf((*MockStore)(nil).InsertAuditLog), ctx, arg)
|
||||
}
|
||||
|
||||
// InsertChat mocks base method.
|
||||
func (m *MockStore) InsertChat(ctx context.Context, arg database.InsertChatParams) (database.Chat, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "InsertChat", ctx, arg)
|
||||
ret0, _ := ret[0].(database.Chat)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// InsertChat indicates an expected call of InsertChat.
|
||||
func (mr *MockStoreMockRecorder) InsertChat(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertChat", reflect.TypeOf((*MockStore)(nil).InsertChat), ctx, arg)
|
||||
}
|
||||
|
||||
// InsertChatMessages mocks base method.
|
||||
func (m *MockStore) InsertChatMessages(ctx context.Context, arg database.InsertChatMessagesParams) ([]database.ChatMessage, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "InsertChatMessages", ctx, arg)
|
||||
ret0, _ := ret[0].([]database.ChatMessage)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// InsertChatMessages indicates an expected call of InsertChatMessages.
|
||||
func (mr *MockStoreMockRecorder) InsertChatMessages(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertChatMessages", reflect.TypeOf((*MockStore)(nil).InsertChatMessages), ctx, arg)
|
||||
}
|
||||
|
||||
// InsertCryptoKey mocks base method.
|
||||
func (m *MockStore) InsertCryptoKey(ctx context.Context, arg database.InsertCryptoKeyParams) (database.CryptoKey, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -5575,20 +5530,6 @@ func (mr *MockStoreMockRecorder) UpdateAPIKeyByID(ctx, arg any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAPIKeyByID", reflect.TypeOf((*MockStore)(nil).UpdateAPIKeyByID), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateChatByID mocks base method.
|
||||
func (m *MockStore) UpdateChatByID(ctx context.Context, arg database.UpdateChatByIDParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateChatByID", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpdateChatByID indicates an expected call of UpdateChatByID.
|
||||
func (mr *MockStoreMockRecorder) UpdateChatByID(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateChatByID", reflect.TypeOf((*MockStore)(nil).UpdateChatByID), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateCryptoKeyDeletesAt mocks base method.
|
||||
func (m *MockStore) UpdateCryptoKeyDeletesAt(ctx context.Context, arg database.UpdateCryptoKeyDeletesAtParams) (database.CryptoKey, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
@@ -62,6 +62,9 @@ func New(ctx context.Context, logger slog.Logger, db database.Store, clk quartz.
|
||||
if err := tx.DeleteOldNotificationMessages(ctx); err != nil {
|
||||
return xerrors.Errorf("failed to delete old notification messages: %w", err)
|
||||
}
|
||||
if err := tx.ExpirePrebuildsAPIKeys(ctx, dbtime.Time(start)); err != nil {
|
||||
return xerrors.Errorf("failed to expire prebuilds user api keys: %w", err)
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "purged old database entries", slog.F("duration", clk.Since(start)))
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/dbrollup"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/provisionerdserver"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/provisionerd/proto"
|
||||
"github.com/coder/coder/v2/provisionersdk"
|
||||
@@ -40,6 +41,9 @@ func TestMain(m *testing.M) {
|
||||
//
|
||||
//nolint:paralleltest // It uses LockIDDBPurge.
|
||||
func TestPurge(t *testing.T) {
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("requires postgres")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
@@ -490,3 +494,71 @@ func containsProvisionerDaemon(daemons []database.ProvisionerDaemon, name string
|
||||
return d.Name == name
|
||||
})
|
||||
}
|
||||
|
||||
func TestExpireOldAPIKeys(t *testing.T) {
|
||||
t.Parallel()
|
||||
if !dbtestutil.WillUsePostgres() {
|
||||
t.Skip("only implemented in postgres")
|
||||
}
|
||||
|
||||
// Given: a number of workspaces and API keys owned by a regular user and the prebuilds system user.
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
now = dbtime.Now()
|
||||
db, _ = dbtestutil.NewDB(t, dbtestutil.WithDumpOnFailure())
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
tpl = dbgen.Template(t, db, database.Template{OrganizationID: org.ID, CreatedBy: user.ID})
|
||||
userWs = dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: user.ID,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
prebuildsWs = dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OwnerID: database.PrebuildsSystemUserID,
|
||||
TemplateID: tpl.ID,
|
||||
})
|
||||
createAPIKey = func(userID uuid.UUID, name string) database.APIKey {
|
||||
k, _ := dbgen.APIKey(t, db, database.APIKey{UserID: userID, TokenName: name, ExpiresAt: now.Add(time.Hour)}, func(iap *database.InsertAPIKeyParams) {
|
||||
iap.TokenName = name
|
||||
})
|
||||
return k
|
||||
}
|
||||
assertKeyActive = func(kid string) {
|
||||
k, err := db.GetAPIKeyByID(ctx, kid)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, k.ExpiresAt.After(now))
|
||||
}
|
||||
assertKeyExpired = func(kid string) {
|
||||
k, err := db.GetAPIKeyByID(ctx, kid)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, k.ExpiresAt.Equal(now))
|
||||
}
|
||||
unnamedUserAPIKey = createAPIKey(user.ID, "")
|
||||
unnamedPrebuildsAPIKey = createAPIKey(database.PrebuildsSystemUserID, "")
|
||||
namedUserAPIKey = createAPIKey(user.ID, "my-token")
|
||||
namedPrebuildsAPIKey = createAPIKey(database.PrebuildsSystemUserID, "also-my-token")
|
||||
userWorkspaceAPIKey1 = createAPIKey(user.ID, provisionerdserver.WorkspaceSessionTokenName(user.ID, userWs.ID))
|
||||
userWorkspaceAPIKey2 = createAPIKey(user.ID, provisionerdserver.WorkspaceSessionTokenName(user.ID, prebuildsWs.ID))
|
||||
prebuildsWorkspaceAPIKey1 = createAPIKey(database.PrebuildsSystemUserID, provisionerdserver.WorkspaceSessionTokenName(database.PrebuildsSystemUserID, prebuildsWs.ID))
|
||||
prebuildsWorkspaceAPIKey2 = createAPIKey(database.PrebuildsSystemUserID, provisionerdserver.WorkspaceSessionTokenName(database.PrebuildsSystemUserID, userWs.ID))
|
||||
)
|
||||
|
||||
// When: we call ExpirePrebuildsAPIKeys
|
||||
err := db.ExpirePrebuildsAPIKeys(ctx, now)
|
||||
// Then: no errors is reported.
|
||||
require.NoError(t, err)
|
||||
|
||||
// We do not touch user API keys.
|
||||
assertKeyActive(unnamedUserAPIKey.ID)
|
||||
assertKeyActive(namedUserAPIKey.ID)
|
||||
assertKeyActive(userWorkspaceAPIKey1.ID)
|
||||
assertKeyActive(userWorkspaceAPIKey2.ID)
|
||||
// Unnamed prebuilds API keys get expired.
|
||||
assertKeyExpired(unnamedPrebuildsAPIKey.ID)
|
||||
// API keys for workspaces still owned by prebuilds user remain active until claimed.
|
||||
assertKeyActive(prebuildsWorkspaceAPIKey1.ID)
|
||||
// API keys for workspaces no longer owned by prebuilds user get expired.
|
||||
assertKeyExpired(prebuildsWorkspaceAPIKey2.ID)
|
||||
// Out of an abundance of caution, we do not expire explicitly named prebuilds API keys.
|
||||
assertKeyActive(namedPrebuildsAPIKey.ID)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -254,26 +253,31 @@ func PGDump(dbURL string) ([]byte, error) {
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
|
||||
const minimumPostgreSQLVersion = 13
|
||||
const (
|
||||
minimumPostgreSQLVersion = 13
|
||||
postgresImageSha = "sha256:467e7f2fb97b2f29d616e0be1d02218a7bbdfb94eb3cda7461fd80165edfd1f7"
|
||||
)
|
||||
|
||||
// PGDumpSchemaOnly is for use by gen/dump only.
|
||||
// It runs pg_dump against dbURL and sets a consistent timezone and encoding.
|
||||
func PGDumpSchemaOnly(dbURL string) ([]byte, error) {
|
||||
hasPGDump := false
|
||||
if _, err := exec.LookPath("pg_dump"); err == nil {
|
||||
out, err := exec.Command("pg_dump", "--version").Output()
|
||||
if err == nil {
|
||||
// Parse output:
|
||||
// pg_dump (PostgreSQL) 14.5 (Ubuntu 14.5-0ubuntu0.22.04.1)
|
||||
parts := strings.Split(string(out), " ")
|
||||
if len(parts) > 2 {
|
||||
version, err := strconv.Atoi(strings.Split(parts[2], ".")[0])
|
||||
if err == nil && version >= minimumPostgreSQLVersion {
|
||||
hasPGDump = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: Temporarily pin pg_dump to the docker image until
|
||||
// https://github.com/sqlc-dev/sqlc/issues/4065 is resolved.
|
||||
// if _, err := exec.LookPath("pg_dump"); err == nil {
|
||||
// out, err := exec.Command("pg_dump", "--version").Output()
|
||||
// if err == nil {
|
||||
// // Parse output:
|
||||
// // pg_dump (PostgreSQL) 14.5 (Ubuntu 14.5-0ubuntu0.22.04.1)
|
||||
// parts := strings.Split(string(out), " ")
|
||||
// if len(parts) > 2 {
|
||||
// version, err := strconv.Atoi(strings.Split(parts[2], ".")[0])
|
||||
// if err == nil && version >= minimumPostgreSQLVersion {
|
||||
// hasPGDump = true
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
cmdArgs := []string{
|
||||
"pg_dump",
|
||||
@@ -298,7 +302,7 @@ func PGDumpSchemaOnly(dbURL string) ([]byte, error) {
|
||||
"run",
|
||||
"--rm",
|
||||
"--network=host",
|
||||
fmt.Sprintf("%s:%d", postgresImage, minimumPostgreSQLVersion),
|
||||
fmt.Sprintf("%s:%d@%s", postgresImage, minimumPostgreSQLVersion, postgresImageSha),
|
||||
}, cmdArgs...)
|
||||
}
|
||||
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) //#nosec
|
||||
|
||||
Generated
-40
@@ -822,32 +822,6 @@ CREATE TABLE audit_logs (
|
||||
resource_icon text NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE chat_messages (
|
||||
id bigint NOT NULL,
|
||||
chat_id uuid NOT NULL,
|
||||
created_at timestamp with time zone DEFAULT now() NOT NULL,
|
||||
model text NOT NULL,
|
||||
provider text NOT NULL,
|
||||
content jsonb NOT NULL
|
||||
);
|
||||
|
||||
CREATE SEQUENCE chat_messages_id_seq
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
ALTER SEQUENCE chat_messages_id_seq OWNED BY chat_messages.id;
|
||||
|
||||
CREATE TABLE chats (
|
||||
id uuid DEFAULT gen_random_uuid() NOT NULL,
|
||||
owner_id uuid NOT NULL,
|
||||
created_at timestamp with time zone DEFAULT now() NOT NULL,
|
||||
updated_at timestamp with time zone DEFAULT now() NOT NULL,
|
||||
title text NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE crypto_keys (
|
||||
feature crypto_key_feature NOT NULL,
|
||||
sequence integer NOT NULL,
|
||||
@@ -2342,8 +2316,6 @@ CREATE VIEW workspaces_expanded AS
|
||||
|
||||
COMMENT ON VIEW workspaces_expanded IS 'Joins in the display name information such as username, avatar, and organization name.';
|
||||
|
||||
ALTER TABLE ONLY chat_messages ALTER COLUMN id SET DEFAULT nextval('chat_messages_id_seq'::regclass);
|
||||
|
||||
ALTER TABLE ONLY licenses ALTER COLUMN id SET DEFAULT nextval('licenses_id_seq'::regclass);
|
||||
|
||||
ALTER TABLE ONLY provisioner_job_logs ALTER COLUMN id SET DEFAULT nextval('provisioner_job_logs_id_seq'::regclass);
|
||||
@@ -2365,12 +2337,6 @@ ALTER TABLE ONLY api_keys
|
||||
ALTER TABLE ONLY audit_logs
|
||||
ADD CONSTRAINT audit_logs_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY chat_messages
|
||||
ADD CONSTRAINT chat_messages_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY chats
|
||||
ADD CONSTRAINT chats_pkey PRIMARY KEY (id);
|
||||
|
||||
ALTER TABLE ONLY crypto_keys
|
||||
ADD CONSTRAINT crypto_keys_pkey PRIMARY KEY (feature, sequence);
|
||||
|
||||
@@ -2867,12 +2833,6 @@ forward without requiring a migration to clean up historical data.';
|
||||
ALTER TABLE ONLY api_keys
|
||||
ADD CONSTRAINT api_keys_user_id_uuid_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY chat_messages
|
||||
ADD CONSTRAINT chat_messages_chat_id_fkey FOREIGN KEY (chat_id) REFERENCES chats(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY chats
|
||||
ADD CONSTRAINT chats_owner_id_fkey FOREIGN KEY (owner_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE ONLY crypto_keys
|
||||
ADD CONSTRAINT crypto_keys_secret_key_id_fkey FOREIGN KEY (secret_key_id) REFERENCES dbcrypt_keys(active_key_digest);
|
||||
|
||||
|
||||
@@ -7,8 +7,6 @@ type ForeignKeyConstraint string
|
||||
// ForeignKeyConstraint enums.
|
||||
const (
|
||||
ForeignKeyAPIKeysUserIDUUID ForeignKeyConstraint = "api_keys_user_id_uuid_fkey" // ALTER TABLE ONLY api_keys ADD CONSTRAINT api_keys_user_id_uuid_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
ForeignKeyChatMessagesChatID ForeignKeyConstraint = "chat_messages_chat_id_fkey" // ALTER TABLE ONLY chat_messages ADD CONSTRAINT chat_messages_chat_id_fkey FOREIGN KEY (chat_id) REFERENCES chats(id) ON DELETE CASCADE;
|
||||
ForeignKeyChatsOwnerID ForeignKeyConstraint = "chats_owner_id_fkey" // ALTER TABLE ONLY chats ADD CONSTRAINT chats_owner_id_fkey FOREIGN KEY (owner_id) REFERENCES users(id) ON DELETE CASCADE;
|
||||
ForeignKeyCryptoKeysSecretKeyID ForeignKeyConstraint = "crypto_keys_secret_key_id_fkey" // ALTER TABLE ONLY crypto_keys ADD CONSTRAINT crypto_keys_secret_key_id_fkey FOREIGN KEY (secret_key_id) REFERENCES dbcrypt_keys(active_key_digest);
|
||||
ForeignKeyGitAuthLinksOauthAccessTokenKeyID ForeignKeyConstraint = "git_auth_links_oauth_access_token_key_id_fkey" // ALTER TABLE ONLY external_auth_links ADD CONSTRAINT git_auth_links_oauth_access_token_key_id_fkey FOREIGN KEY (oauth_access_token_key_id) REFERENCES dbcrypt_keys(active_key_digest);
|
||||
ForeignKeyGitAuthLinksOauthRefreshTokenKeyID ForeignKeyConstraint = "git_auth_links_oauth_refresh_token_key_id_fkey" // ALTER TABLE ONLY external_auth_links ADD CONSTRAINT git_auth_links_oauth_refresh_token_key_id_fkey FOREIGN KEY (oauth_refresh_token_key_id) REFERENCES dbcrypt_keys(active_key_digest);
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
-- noop
|
||||
@@ -0,0 +1,2 @@
|
||||
DROP TABLE IF EXISTS chat_messages;
|
||||
DROP TABLE IF EXISTS chats;
|
||||
@@ -611,8 +611,3 @@ func (m WorkspaceAgentVolumeResourceMonitor) Debounce(
|
||||
|
||||
return m.DebouncedUntil, false
|
||||
}
|
||||
|
||||
func (c Chat) RBACObject() rbac.Object {
|
||||
return rbac.ResourceChat.WithID(c.ID).
|
||||
WithOwner(c.OwnerID.String())
|
||||
}
|
||||
|
||||
@@ -478,6 +478,7 @@ func (q *sqlQuerier) GetAuthorizedUsers(ctx context.Context, arg GetUsersParams,
|
||||
|
||||
type auditLogQuerier interface {
|
||||
GetAuthorizedAuditLogsOffset(ctx context.Context, arg GetAuditLogsOffsetParams, prepared rbac.PreparedAuthorized) ([]GetAuditLogsOffsetRow, error)
|
||||
CountAuthorizedAuditLogs(ctx context.Context, arg CountAuditLogsParams, prepared rbac.PreparedAuthorized) (int64, error)
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg GetAuditLogsOffsetParams, prepared rbac.PreparedAuthorized) ([]GetAuditLogsOffsetRow, error) {
|
||||
@@ -548,7 +549,6 @@ func (q *sqlQuerier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg GetAu
|
||||
&i.OrganizationName,
|
||||
&i.OrganizationDisplayName,
|
||||
&i.OrganizationIcon,
|
||||
&i.Count,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -563,6 +563,54 @@ func (q *sqlQuerier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg GetAu
|
||||
return items, nil
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) CountAuthorizedAuditLogs(ctx context.Context, arg CountAuditLogsParams, prepared rbac.PreparedAuthorized) (int64, error) {
|
||||
authorizedFilter, err := prepared.CompileToSQL(ctx, regosql.ConvertConfig{
|
||||
VariableConverter: regosql.AuditLogConverter(),
|
||||
})
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("compile authorized filter: %w", err)
|
||||
}
|
||||
|
||||
filtered, err := insertAuthorizedFilter(countAuditLogs, fmt.Sprintf(" AND %s", authorizedFilter))
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("insert authorized filter: %w", err)
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("-- name: CountAuthorizedAuditLogs :one\n%s", filtered)
|
||||
|
||||
rows, err := q.db.QueryContext(ctx, query,
|
||||
arg.ResourceType,
|
||||
arg.ResourceID,
|
||||
arg.OrganizationID,
|
||||
arg.ResourceTarget,
|
||||
arg.Action,
|
||||
arg.UserID,
|
||||
arg.Username,
|
||||
arg.Email,
|
||||
arg.DateFrom,
|
||||
arg.DateTo,
|
||||
arg.BuildReason,
|
||||
arg.RequestID,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var count int64
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func insertAuthorizedFilter(query string, replaceWith string) (string, error) {
|
||||
if !strings.Contains(query, authorizedQueryPlaceholder) {
|
||||
return "", xerrors.Errorf("query does not contain authorized replace string, this is not an authorized query")
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
@@ -54,3 +57,41 @@ func TestWorkspaceTableConvert(t *testing.T) {
|
||||
"'workspace.WorkspaceTable()' is not missing at least 1 field when converting to 'WorkspaceTable'. "+
|
||||
"To resolve this, go to the 'func (w Workspace) WorkspaceTable()' and ensure all fields are converted.")
|
||||
}
|
||||
|
||||
// TestAuditLogsQueryConsistency ensures that GetAuditLogsOffset and CountAuditLogs
|
||||
// have identical WHERE clauses to prevent filtering inconsistencies.
|
||||
// This test is a guard rail to prevent developer oversight mistakes.
|
||||
func TestAuditLogsQueryConsistency(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
getWhereClause := extractWhereClause(getAuditLogsOffset)
|
||||
require.NotEmpty(t, getWhereClause, "failed to extract WHERE clause from GetAuditLogsOffset")
|
||||
|
||||
countWhereClause := extractWhereClause(countAuditLogs)
|
||||
require.NotEmpty(t, countWhereClause, "failed to extract WHERE clause from CountAuditLogs")
|
||||
|
||||
// Compare the WHERE clauses
|
||||
if diff := cmp.Diff(getWhereClause, countWhereClause); diff != "" {
|
||||
t.Errorf("GetAuditLogsOffset and CountAuditLogs WHERE clauses must be identical to ensure consistent filtering.\nDiff:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
// extractWhereClause extracts the WHERE clause from a SQL query string
|
||||
func extractWhereClause(query string) string {
|
||||
// Find WHERE and get everything after it
|
||||
wherePattern := regexp.MustCompile(`(?is)WHERE\s+(.*)`)
|
||||
whereMatches := wherePattern.FindStringSubmatch(query)
|
||||
if len(whereMatches) < 2 {
|
||||
return ""
|
||||
}
|
||||
|
||||
whereClause := whereMatches[1]
|
||||
|
||||
// Remove ORDER BY, LIMIT, OFFSET clauses from the end
|
||||
whereClause = regexp.MustCompile(`(?is)\s+(ORDER BY|LIMIT|OFFSET).*$`).ReplaceAllString(whereClause, "")
|
||||
|
||||
// Remove SQL comments
|
||||
whereClause = regexp.MustCompile(`(?m)--.*$`).ReplaceAllString(whereClause, "")
|
||||
|
||||
return strings.TrimSpace(whereClause)
|
||||
}
|
||||
|
||||
@@ -2781,23 +2781,6 @@ type AuditLog struct {
|
||||
ResourceIcon string `db:"resource_icon" json:"resource_icon"`
|
||||
}
|
||||
|
||||
type Chat struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
Title string `db:"title" json:"title"`
|
||||
}
|
||||
|
||||
type ChatMessage struct {
|
||||
ID int64 `db:"id" json:"id"`
|
||||
ChatID uuid.UUID `db:"chat_id" json:"chat_id"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
Model string `db:"model" json:"model"`
|
||||
Provider string `db:"provider" json:"provider"`
|
||||
Content json.RawMessage `db:"content" json:"content"`
|
||||
}
|
||||
|
||||
type CryptoKey struct {
|
||||
Feature CryptoKeyFeature `db:"feature" json:"feature"`
|
||||
Sequence int32 `db:"sequence" json:"sequence"`
|
||||
|
||||
@@ -64,6 +64,7 @@ type sqlcQuerier interface {
|
||||
CleanTailnetCoordinators(ctx context.Context) error
|
||||
CleanTailnetLostPeers(ctx context.Context) error
|
||||
CleanTailnetTunnels(ctx context.Context) error
|
||||
CountAuditLogs(ctx context.Context, arg CountAuditLogsParams) (int64, error)
|
||||
// CountInProgressPrebuilds returns the number of in-progress prebuilds, grouped by preset ID and transition.
|
||||
// Prebuild considered in-progress if it's in the "starting", "stopping", or "deleting" state.
|
||||
CountInProgressPrebuilds(ctx context.Context) ([]CountInProgressPrebuildsRow, error)
|
||||
@@ -79,7 +80,6 @@ type sqlcQuerier interface {
|
||||
// be recreated.
|
||||
DeleteAllWebpushSubscriptions(ctx context.Context) error
|
||||
DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error
|
||||
DeleteChat(ctx context.Context, id uuid.UUID) error
|
||||
DeleteCoordinator(ctx context.Context, id uuid.UUID) error
|
||||
DeleteCryptoKey(ctx context.Context, arg DeleteCryptoKeyParams) (CryptoKey, error)
|
||||
DeleteCustomRole(ctx context.Context, arg DeleteCustomRoleParams) error
|
||||
@@ -124,6 +124,11 @@ type sqlcQuerier interface {
|
||||
// of the test-only in-memory database. Do not use this in new code.
|
||||
DisableForeignKeysAndTriggers(ctx context.Context) error
|
||||
EnqueueNotificationMessage(ctx context.Context, arg EnqueueNotificationMessageParams) error
|
||||
// Firstly, collect api_keys owned by the prebuilds user that correlate
|
||||
// to workspaces no longer owned by the prebuilds user.
|
||||
// Next, collect api_keys that belong to the prebuilds user but have no token name.
|
||||
// These were most likely created via 'coder login' as the prebuilds user.
|
||||
ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error
|
||||
FavoriteWorkspace(ctx context.Context, id uuid.UUID) error
|
||||
FetchMemoryResourceMonitorsByAgentID(ctx context.Context, agentID uuid.UUID) (WorkspaceAgentMemoryResourceMonitor, error)
|
||||
FetchMemoryResourceMonitorsUpdatedAfter(ctx context.Context, updatedAt time.Time) ([]WorkspaceAgentMemoryResourceMonitor, error)
|
||||
@@ -154,9 +159,6 @@ type sqlcQuerier interface {
|
||||
// This function returns roles for authorization purposes. Implied member roles
|
||||
// are included.
|
||||
GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUID) (GetAuthorizationUserRolesRow, error)
|
||||
GetChatByID(ctx context.Context, id uuid.UUID) (Chat, error)
|
||||
GetChatMessagesByChatID(ctx context.Context, chatID uuid.UUID) ([]ChatMessage, error)
|
||||
GetChatsByOwnerID(ctx context.Context, ownerID uuid.UUID) ([]Chat, error)
|
||||
GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error)
|
||||
GetCryptoKeyByFeatureAndSequence(ctx context.Context, arg GetCryptoKeyByFeatureAndSequenceParams) (CryptoKey, error)
|
||||
GetCryptoKeys(ctx context.Context) ([]CryptoKey, error)
|
||||
@@ -472,8 +474,6 @@ type sqlcQuerier interface {
|
||||
// every member of the org.
|
||||
InsertAllUsersGroup(ctx context.Context, organizationID uuid.UUID) (Group, error)
|
||||
InsertAuditLog(ctx context.Context, arg InsertAuditLogParams) (AuditLog, error)
|
||||
InsertChat(ctx context.Context, arg InsertChatParams) (Chat, error)
|
||||
InsertChatMessages(ctx context.Context, arg InsertChatMessagesParams) ([]ChatMessage, error)
|
||||
InsertCryptoKey(ctx context.Context, arg InsertCryptoKeyParams) (CryptoKey, error)
|
||||
InsertCustomRole(ctx context.Context, arg InsertCustomRoleParams) (CustomRole, error)
|
||||
InsertDBCryptKey(ctx context.Context, arg InsertDBCryptKeyParams) error
|
||||
@@ -567,7 +567,6 @@ type sqlcQuerier interface {
|
||||
UnarchiveTemplateVersion(ctx context.Context, arg UnarchiveTemplateVersionParams) error
|
||||
UnfavoriteWorkspace(ctx context.Context, id uuid.UUID) error
|
||||
UpdateAPIKeyByID(ctx context.Context, arg UpdateAPIKeyByIDParams) error
|
||||
UpdateChatByID(ctx context.Context, arg UpdateChatByIDParams) error
|
||||
UpdateCryptoKeyDeletesAt(ctx context.Context, arg UpdateCryptoKeyDeletesAtParams) (CryptoKey, error)
|
||||
UpdateCustomRole(ctx context.Context, arg UpdateCustomRoleParams) (CustomRole, error)
|
||||
UpdateExternalAuthLink(ctx context.Context, arg UpdateExternalAuthLinkParams) (ExternalAuthLink, error)
|
||||
|
||||
@@ -1567,6 +1567,26 @@ func TestAuditLogDefaultLimit(t *testing.T) {
|
||||
require.Len(t, rows, 100)
|
||||
}
|
||||
|
||||
func TestAuditLogCount(t *testing.T) {
|
||||
t.Parallel()
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
sqlDB := testSQLDB(t)
|
||||
err := migrations.Up(sqlDB)
|
||||
require.NoError(t, err)
|
||||
db := database.New(sqlDB)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
dbgen.AuditLog(t, db, database.AuditLog{})
|
||||
|
||||
count, err := db.CountAuditLogs(ctx, database.CountAuditLogsParams{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), count)
|
||||
}
|
||||
|
||||
func TestWorkspaceQuotas(t *testing.T) {
|
||||
t.Parallel()
|
||||
orgMemberIDs := func(o database.OrganizationMember) uuid.UUID {
|
||||
@@ -1947,9 +1967,13 @@ func TestAuthorizedAuditLogs(t *testing.T) {
|
||||
})
|
||||
|
||||
// When: The user queries for audit logs
|
||||
count, err := db.CountAuditLogs(memberCtx, database.CountAuditLogsParams{})
|
||||
require.NoError(t, err)
|
||||
logs, err := db.GetAuditLogsOffset(memberCtx, database.GetAuditLogsOffsetParams{})
|
||||
require.NoError(t, err)
|
||||
// Then: No logs returned
|
||||
|
||||
// Then: No logs returned and count is 0
|
||||
require.Equal(t, int64(0), count, "count should be 0")
|
||||
require.Len(t, logs, 0, "no logs should be returned")
|
||||
})
|
||||
|
||||
@@ -1965,10 +1989,14 @@ func TestAuthorizedAuditLogs(t *testing.T) {
|
||||
})
|
||||
|
||||
// When: the auditor queries for audit logs
|
||||
count, err := db.CountAuditLogs(siteAuditorCtx, database.CountAuditLogsParams{})
|
||||
require.NoError(t, err)
|
||||
logs, err := db.GetAuditLogsOffset(siteAuditorCtx, database.GetAuditLogsOffsetParams{})
|
||||
require.NoError(t, err)
|
||||
// Then: All logs are returned
|
||||
require.ElementsMatch(t, auditOnlyIDs(allLogs), auditOnlyIDs(logs))
|
||||
|
||||
// Then: All logs are returned and count matches
|
||||
require.Equal(t, int64(len(allLogs)), count, "count should match total number of logs")
|
||||
require.ElementsMatch(t, auditOnlyIDs(allLogs), auditOnlyIDs(logs), "all logs should be returned")
|
||||
})
|
||||
|
||||
t.Run("SingleOrgAuditor", func(t *testing.T) {
|
||||
@@ -1984,10 +2012,14 @@ func TestAuthorizedAuditLogs(t *testing.T) {
|
||||
})
|
||||
|
||||
// When: The auditor queries for audit logs
|
||||
count, err := db.CountAuditLogs(orgAuditCtx, database.CountAuditLogsParams{})
|
||||
require.NoError(t, err)
|
||||
logs, err := db.GetAuditLogsOffset(orgAuditCtx, database.GetAuditLogsOffsetParams{})
|
||||
require.NoError(t, err)
|
||||
// Then: Only the logs for the organization are returned
|
||||
require.ElementsMatch(t, orgAuditLogs[orgID], auditOnlyIDs(logs))
|
||||
|
||||
// Then: Only the logs for the organization are returned and count matches
|
||||
require.Equal(t, int64(len(orgAuditLogs[orgID])), count, "count should match organization logs")
|
||||
require.ElementsMatch(t, orgAuditLogs[orgID], auditOnlyIDs(logs), "only organization logs should be returned")
|
||||
})
|
||||
|
||||
t.Run("TwoOrgAuditors", func(t *testing.T) {
|
||||
@@ -2004,10 +2036,16 @@ func TestAuthorizedAuditLogs(t *testing.T) {
|
||||
})
|
||||
|
||||
// When: The user queries for audit logs
|
||||
count, err := db.CountAuditLogs(multiOrgAuditCtx, database.CountAuditLogsParams{})
|
||||
require.NoError(t, err)
|
||||
logs, err := db.GetAuditLogsOffset(multiOrgAuditCtx, database.GetAuditLogsOffsetParams{})
|
||||
require.NoError(t, err)
|
||||
// Then: All logs for both organizations are returned
|
||||
require.ElementsMatch(t, append(orgAuditLogs[first], orgAuditLogs[second]...), auditOnlyIDs(logs))
|
||||
|
||||
// Then: All logs for both organizations are returned and count matches
|
||||
expectedLogs := append([]uuid.UUID{}, orgAuditLogs[first]...)
|
||||
expectedLogs = append(expectedLogs, orgAuditLogs[second]...)
|
||||
require.Equal(t, int64(len(expectedLogs)), count, "count should match sum of both organizations")
|
||||
require.ElementsMatch(t, expectedLogs, auditOnlyIDs(logs), "logs from both organizations should be returned")
|
||||
})
|
||||
|
||||
t.Run("ErroneousOrg", func(t *testing.T) {
|
||||
@@ -2022,9 +2060,13 @@ func TestAuthorizedAuditLogs(t *testing.T) {
|
||||
})
|
||||
|
||||
// When: The user queries for audit logs
|
||||
count, err := db.CountAuditLogs(userCtx, database.CountAuditLogsParams{})
|
||||
require.NoError(t, err)
|
||||
logs, err := db.GetAuditLogsOffset(userCtx, database.GetAuditLogsOffsetParams{})
|
||||
require.NoError(t, err)
|
||||
// Then: No logs are returned
|
||||
|
||||
// Then: No logs are returned and count is 0
|
||||
require.Equal(t, int64(0), count, "count should be 0")
|
||||
require.Len(t, logs, 0, "no logs should be returned")
|
||||
})
|
||||
}
|
||||
|
||||
+269
-311
@@ -144,6 +144,46 @@ func (q *sqlQuerier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context
|
||||
return err
|
||||
}
|
||||
|
||||
const expirePrebuildsAPIKeys = `-- name: ExpirePrebuildsAPIKeys :exec
|
||||
WITH unexpired_prebuilds_workspace_session_tokens AS (
|
||||
SELECT id, SUBSTRING(token_name FROM 38 FOR 36)::uuid AS workspace_id
|
||||
FROM api_keys
|
||||
WHERE user_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
|
||||
AND expires_at > $1::timestamptz
|
||||
AND token_name SIMILAR TO 'c42fdf75-3097-471c-8c33-fb52454d81c0_[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}_session_token'
|
||||
),
|
||||
stale_prebuilds_workspace_session_tokens AS (
|
||||
SELECT upwst.id
|
||||
FROM unexpired_prebuilds_workspace_session_tokens upwst
|
||||
LEFT JOIN workspaces w
|
||||
ON w.id = upwst.workspace_id
|
||||
WHERE w.owner_id <> 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
|
||||
),
|
||||
unnamed_prebuilds_api_keys AS (
|
||||
SELECT id
|
||||
FROM api_keys
|
||||
WHERE user_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
|
||||
AND token_name = ''
|
||||
AND expires_at > $1::timestamptz
|
||||
)
|
||||
UPDATE api_keys
|
||||
SET expires_at = $1::timestamptz
|
||||
WHERE id IN (
|
||||
SELECT id FROM stale_prebuilds_workspace_session_tokens
|
||||
UNION
|
||||
SELECT id FROM unnamed_prebuilds_api_keys
|
||||
)
|
||||
`
|
||||
|
||||
// Firstly, collect api_keys owned by the prebuilds user that correlate
|
||||
// to workspaces no longer owned by the prebuilds user.
|
||||
// Next, collect api_keys that belong to the prebuilds user but have no token name.
|
||||
// These were most likely created via 'coder login' as the prebuilds user.
|
||||
func (q *sqlQuerier) ExpirePrebuildsAPIKeys(ctx context.Context, now time.Time) error {
|
||||
_, err := q.db.ExecContext(ctx, expirePrebuildsAPIKeys, now)
|
||||
return err
|
||||
}
|
||||
|
||||
const getAPIKeyByID = `-- name: GetAPIKeyByID :one
|
||||
SELECT
|
||||
id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name
|
||||
@@ -441,140 +481,241 @@ func (q *sqlQuerier) UpdateAPIKeyByID(ctx context.Context, arg UpdateAPIKeyByIDP
|
||||
return err
|
||||
}
|
||||
|
||||
const getAuditLogsOffset = `-- name: GetAuditLogsOffset :many
|
||||
SELECT
|
||||
audit_logs.id, audit_logs.time, audit_logs.user_id, audit_logs.organization_id, audit_logs.ip, audit_logs.user_agent, audit_logs.resource_type, audit_logs.resource_id, audit_logs.resource_target, audit_logs.action, audit_logs.diff, audit_logs.status_code, audit_logs.additional_fields, audit_logs.request_id, audit_logs.resource_icon,
|
||||
-- sqlc.embed(users) would be nice but it does not seem to play well with
|
||||
-- left joins.
|
||||
users.username AS user_username,
|
||||
users.name AS user_name,
|
||||
users.email AS user_email,
|
||||
users.created_at AS user_created_at,
|
||||
users.updated_at AS user_updated_at,
|
||||
users.last_seen_at AS user_last_seen_at,
|
||||
users.status AS user_status,
|
||||
users.login_type AS user_login_type,
|
||||
users.rbac_roles AS user_roles,
|
||||
users.avatar_url AS user_avatar_url,
|
||||
users.deleted AS user_deleted,
|
||||
users.quiet_hours_schedule AS user_quiet_hours_schedule,
|
||||
COALESCE(organizations.name, '') AS organization_name,
|
||||
COALESCE(organizations.display_name, '') AS organization_display_name,
|
||||
COALESCE(organizations.icon, '') AS organization_icon,
|
||||
COUNT(audit_logs.*) OVER () AS count
|
||||
FROM
|
||||
audit_logs
|
||||
LEFT JOIN users ON audit_logs.user_id = users.id
|
||||
LEFT JOIN
|
||||
-- First join on workspaces to get the initial workspace create
|
||||
-- to workspace build 1 id. This is because the first create is
|
||||
-- is a different audit log than subsequent starts.
|
||||
workspaces ON
|
||||
audit_logs.resource_type = 'workspace' AND
|
||||
audit_logs.resource_id = workspaces.id
|
||||
LEFT JOIN
|
||||
workspace_builds ON
|
||||
-- Get the reason from the build if the resource type
|
||||
-- is a workspace_build
|
||||
(
|
||||
audit_logs.resource_type = 'workspace_build'
|
||||
AND audit_logs.resource_id = workspace_builds.id
|
||||
)
|
||||
OR
|
||||
-- Get the reason from the build #1 if this is the first
|
||||
-- workspace create.
|
||||
(
|
||||
audit_logs.resource_type = 'workspace' AND
|
||||
audit_logs.action = 'create' AND
|
||||
workspaces.id = workspace_builds.workspace_id AND
|
||||
workspace_builds.build_number = 1
|
||||
)
|
||||
LEFT JOIN organizations ON audit_logs.organization_id = organizations.id
|
||||
const countAuditLogs = `-- name: CountAuditLogs :one
|
||||
SELECT COUNT(*)
|
||||
FROM audit_logs
|
||||
LEFT JOIN users ON audit_logs.user_id = users.id
|
||||
LEFT JOIN organizations ON audit_logs.organization_id = organizations.id
|
||||
-- First join on workspaces to get the initial workspace create
|
||||
-- to workspace build 1 id. This is because the first create is
|
||||
-- is a different audit log than subsequent starts.
|
||||
LEFT JOIN workspaces ON audit_logs.resource_type = 'workspace'
|
||||
AND audit_logs.resource_id = workspaces.id
|
||||
-- Get the reason from the build if the resource type
|
||||
-- is a workspace_build
|
||||
LEFT JOIN workspace_builds wb_build ON audit_logs.resource_type = 'workspace_build'
|
||||
AND audit_logs.resource_id = wb_build.id
|
||||
-- Get the reason from the build #1 if this is the first
|
||||
-- workspace create.
|
||||
LEFT JOIN workspace_builds wb_workspace ON audit_logs.resource_type = 'workspace'
|
||||
AND audit_logs.action = 'create'
|
||||
AND workspaces.id = wb_workspace.workspace_id
|
||||
AND wb_workspace.build_number = 1
|
||||
WHERE
|
||||
-- Filter resource_type
|
||||
-- Filter resource_type
|
||||
CASE
|
||||
WHEN $1 :: text != '' THEN
|
||||
resource_type = $1 :: resource_type
|
||||
WHEN $1::text != '' THEN resource_type = $1::resource_type
|
||||
ELSE true
|
||||
END
|
||||
-- Filter resource_id
|
||||
AND CASE
|
||||
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
resource_id = $2
|
||||
WHEN $2::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN resource_id = $2
|
||||
ELSE true
|
||||
END
|
||||
-- Filter organization_id
|
||||
AND CASE
|
||||
WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
audit_logs.organization_id = $3
|
||||
-- Filter organization_id
|
||||
AND CASE
|
||||
WHEN $3::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN audit_logs.organization_id = $3
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by resource_target
|
||||
AND CASE
|
||||
WHEN $4 :: text != '' THEN
|
||||
resource_target = $4
|
||||
WHEN $4::text != '' THEN resource_target = $4
|
||||
ELSE true
|
||||
END
|
||||
-- Filter action
|
||||
AND CASE
|
||||
WHEN $5 :: text != '' THEN
|
||||
action = $5 :: audit_action
|
||||
WHEN $5::text != '' THEN action = $5::audit_action
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by user_id
|
||||
AND CASE
|
||||
WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
user_id = $6
|
||||
WHEN $6::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN user_id = $6
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by username
|
||||
AND CASE
|
||||
WHEN $7 :: text != '' THEN
|
||||
user_id = (SELECT id FROM users WHERE lower(username) = lower($7) AND deleted = false)
|
||||
WHEN $7::text != '' THEN user_id = (
|
||||
SELECT id
|
||||
FROM users
|
||||
WHERE lower(username) = lower($7)
|
||||
AND deleted = false
|
||||
)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by user_email
|
||||
AND CASE
|
||||
WHEN $8 :: text != '' THEN
|
||||
users.email = $8
|
||||
WHEN $8::text != '' THEN users.email = $8
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by date_from
|
||||
AND CASE
|
||||
WHEN $9 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
||||
"time" >= $9
|
||||
WHEN $9::timestamp with time zone != '0001-01-01 00:00:00Z' THEN "time" >= $9
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by date_to
|
||||
AND CASE
|
||||
WHEN $10 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
||||
"time" <= $10
|
||||
WHEN $10::timestamp with time zone != '0001-01-01 00:00:00Z' THEN "time" <= $10
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by build_reason
|
||||
AND CASE
|
||||
WHEN $11::text != '' THEN COALESCE(wb_build.reason::text, wb_workspace.reason::text) = $11
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by build_reason
|
||||
AND CASE
|
||||
WHEN $11::text != '' THEN
|
||||
workspace_builds.reason::text = $11
|
||||
ELSE true
|
||||
END
|
||||
-- Filter request_id
|
||||
AND CASE
|
||||
WHEN $12 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
audit_logs.request_id = $12
|
||||
WHEN $12::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN audit_logs.request_id = $12
|
||||
ELSE true
|
||||
END
|
||||
-- Authorize Filter clause will be injected below in CountAuthorizedAuditLogs
|
||||
-- @authorize_filter
|
||||
`
|
||||
|
||||
type CountAuditLogsParams struct {
|
||||
ResourceType string `db:"resource_type" json:"resource_type"`
|
||||
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
||||
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
||||
ResourceTarget string `db:"resource_target" json:"resource_target"`
|
||||
Action string `db:"action" json:"action"`
|
||||
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
||||
Username string `db:"username" json:"username"`
|
||||
Email string `db:"email" json:"email"`
|
||||
DateFrom time.Time `db:"date_from" json:"date_from"`
|
||||
DateTo time.Time `db:"date_to" json:"date_to"`
|
||||
BuildReason string `db:"build_reason" json:"build_reason"`
|
||||
RequestID uuid.UUID `db:"request_id" json:"request_id"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) CountAuditLogs(ctx context.Context, arg CountAuditLogsParams) (int64, error) {
|
||||
row := q.db.QueryRowContext(ctx, countAuditLogs,
|
||||
arg.ResourceType,
|
||||
arg.ResourceID,
|
||||
arg.OrganizationID,
|
||||
arg.ResourceTarget,
|
||||
arg.Action,
|
||||
arg.UserID,
|
||||
arg.Username,
|
||||
arg.Email,
|
||||
arg.DateFrom,
|
||||
arg.DateTo,
|
||||
arg.BuildReason,
|
||||
arg.RequestID,
|
||||
)
|
||||
var count int64
|
||||
err := row.Scan(&count)
|
||||
return count, err
|
||||
}
|
||||
|
||||
const getAuditLogsOffset = `-- name: GetAuditLogsOffset :many
|
||||
SELECT audit_logs.id, audit_logs.time, audit_logs.user_id, audit_logs.organization_id, audit_logs.ip, audit_logs.user_agent, audit_logs.resource_type, audit_logs.resource_id, audit_logs.resource_target, audit_logs.action, audit_logs.diff, audit_logs.status_code, audit_logs.additional_fields, audit_logs.request_id, audit_logs.resource_icon,
|
||||
-- sqlc.embed(users) would be nice but it does not seem to play well with
|
||||
-- left joins.
|
||||
users.username AS user_username,
|
||||
users.name AS user_name,
|
||||
users.email AS user_email,
|
||||
users.created_at AS user_created_at,
|
||||
users.updated_at AS user_updated_at,
|
||||
users.last_seen_at AS user_last_seen_at,
|
||||
users.status AS user_status,
|
||||
users.login_type AS user_login_type,
|
||||
users.rbac_roles AS user_roles,
|
||||
users.avatar_url AS user_avatar_url,
|
||||
users.deleted AS user_deleted,
|
||||
users.quiet_hours_schedule AS user_quiet_hours_schedule,
|
||||
COALESCE(organizations.name, '') AS organization_name,
|
||||
COALESCE(organizations.display_name, '') AS organization_display_name,
|
||||
COALESCE(organizations.icon, '') AS organization_icon
|
||||
FROM audit_logs
|
||||
LEFT JOIN users ON audit_logs.user_id = users.id
|
||||
LEFT JOIN organizations ON audit_logs.organization_id = organizations.id
|
||||
-- First join on workspaces to get the initial workspace create
|
||||
-- to workspace build 1 id. This is because the first create is
|
||||
-- is a different audit log than subsequent starts.
|
||||
LEFT JOIN workspaces ON audit_logs.resource_type = 'workspace'
|
||||
AND audit_logs.resource_id = workspaces.id
|
||||
-- Get the reason from the build if the resource type
|
||||
-- is a workspace_build
|
||||
LEFT JOIN workspace_builds wb_build ON audit_logs.resource_type = 'workspace_build'
|
||||
AND audit_logs.resource_id = wb_build.id
|
||||
-- Get the reason from the build #1 if this is the first
|
||||
-- workspace create.
|
||||
LEFT JOIN workspace_builds wb_workspace ON audit_logs.resource_type = 'workspace'
|
||||
AND audit_logs.action = 'create'
|
||||
AND workspaces.id = wb_workspace.workspace_id
|
||||
AND wb_workspace.build_number = 1
|
||||
WHERE
|
||||
-- Filter resource_type
|
||||
CASE
|
||||
WHEN $1::text != '' THEN resource_type = $1::resource_type
|
||||
ELSE true
|
||||
END
|
||||
-- Filter resource_id
|
||||
AND CASE
|
||||
WHEN $2::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN resource_id = $2
|
||||
ELSE true
|
||||
END
|
||||
-- Filter organization_id
|
||||
AND CASE
|
||||
WHEN $3::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN audit_logs.organization_id = $3
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by resource_target
|
||||
AND CASE
|
||||
WHEN $4::text != '' THEN resource_target = $4
|
||||
ELSE true
|
||||
END
|
||||
-- Filter action
|
||||
AND CASE
|
||||
WHEN $5::text != '' THEN action = $5::audit_action
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by user_id
|
||||
AND CASE
|
||||
WHEN $6::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN user_id = $6
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by username
|
||||
AND CASE
|
||||
WHEN $7::text != '' THEN user_id = (
|
||||
SELECT id
|
||||
FROM users
|
||||
WHERE lower(username) = lower($7)
|
||||
AND deleted = false
|
||||
)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by user_email
|
||||
AND CASE
|
||||
WHEN $8::text != '' THEN users.email = $8
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by date_from
|
||||
AND CASE
|
||||
WHEN $9::timestamp with time zone != '0001-01-01 00:00:00Z' THEN "time" >= $9
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by date_to
|
||||
AND CASE
|
||||
WHEN $10::timestamp with time zone != '0001-01-01 00:00:00Z' THEN "time" <= $10
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by build_reason
|
||||
AND CASE
|
||||
WHEN $11::text != '' THEN COALESCE(wb_build.reason::text, wb_workspace.reason::text) = $11
|
||||
ELSE true
|
||||
END
|
||||
-- Filter request_id
|
||||
AND CASE
|
||||
WHEN $12::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN audit_logs.request_id = $12
|
||||
ELSE true
|
||||
END
|
||||
-- Authorize Filter clause will be injected below in GetAuthorizedAuditLogsOffset
|
||||
-- @authorize_filter
|
||||
ORDER BY
|
||||
"time" DESC
|
||||
LIMIT
|
||||
-- a limit of 0 means "no limit". The audit log table is unbounded
|
||||
ORDER BY "time" DESC
|
||||
LIMIT -- a limit of 0 means "no limit". The audit log table is unbounded
|
||||
-- in size, and is expected to be quite large. Implement a default
|
||||
-- limit of 100 to prevent accidental excessively large queries.
|
||||
COALESCE(NULLIF($14 :: int, 0), 100)
|
||||
OFFSET
|
||||
$13
|
||||
COALESCE(NULLIF($14::int, 0), 100) OFFSET $13
|
||||
`
|
||||
|
||||
type GetAuditLogsOffsetParams struct {
|
||||
@@ -611,7 +752,6 @@ type GetAuditLogsOffsetRow struct {
|
||||
OrganizationName string `db:"organization_name" json:"organization_name"`
|
||||
OrganizationDisplayName string `db:"organization_display_name" json:"organization_display_name"`
|
||||
OrganizationIcon string `db:"organization_icon" json:"organization_icon"`
|
||||
Count int64 `db:"count" json:"count"`
|
||||
}
|
||||
|
||||
// GetAuditLogsBefore retrieves `row_limit` number of audit logs before the provided
|
||||
@@ -671,7 +811,6 @@ func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOff
|
||||
&i.OrganizationName,
|
||||
&i.OrganizationDisplayName,
|
||||
&i.OrganizationIcon,
|
||||
&i.Count,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -687,26 +826,41 @@ func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOff
|
||||
}
|
||||
|
||||
const insertAuditLog = `-- name: InsertAuditLog :one
|
||||
INSERT INTO
|
||||
audit_logs (
|
||||
id,
|
||||
"time",
|
||||
user_id,
|
||||
organization_id,
|
||||
ip,
|
||||
user_agent,
|
||||
resource_type,
|
||||
resource_id,
|
||||
resource_target,
|
||||
action,
|
||||
diff,
|
||||
status_code,
|
||||
additional_fields,
|
||||
request_id,
|
||||
resource_icon
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) RETURNING id, time, user_id, organization_id, ip, user_agent, resource_type, resource_id, resource_target, action, diff, status_code, additional_fields, request_id, resource_icon
|
||||
INSERT INTO audit_logs (
|
||||
id,
|
||||
"time",
|
||||
user_id,
|
||||
organization_id,
|
||||
ip,
|
||||
user_agent,
|
||||
resource_type,
|
||||
resource_id,
|
||||
resource_target,
|
||||
action,
|
||||
diff,
|
||||
status_code,
|
||||
additional_fields,
|
||||
request_id,
|
||||
resource_icon
|
||||
)
|
||||
VALUES (
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
$4,
|
||||
$5,
|
||||
$6,
|
||||
$7,
|
||||
$8,
|
||||
$9,
|
||||
$10,
|
||||
$11,
|
||||
$12,
|
||||
$13,
|
||||
$14,
|
||||
$15
|
||||
)
|
||||
RETURNING id, time, user_id, organization_id, ip, user_agent, resource_type, resource_id, resource_target, action, diff, status_code, additional_fields, request_id, resource_icon
|
||||
`
|
||||
|
||||
type InsertAuditLogParams struct {
|
||||
@@ -766,207 +920,6 @@ func (q *sqlQuerier) InsertAuditLog(ctx context.Context, arg InsertAuditLogParam
|
||||
return i, err
|
||||
}
|
||||
|
||||
const deleteChat = `-- name: DeleteChat :exec
|
||||
DELETE FROM chats WHERE id = $1
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) DeleteChat(ctx context.Context, id uuid.UUID) error {
|
||||
_, err := q.db.ExecContext(ctx, deleteChat, id)
|
||||
return err
|
||||
}
|
||||
|
||||
const getChatByID = `-- name: GetChatByID :one
|
||||
SELECT id, owner_id, created_at, updated_at, title FROM chats
|
||||
WHERE id = $1
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetChatByID(ctx context.Context, id uuid.UUID) (Chat, error) {
|
||||
row := q.db.QueryRowContext(ctx, getChatByID, id)
|
||||
var i Chat
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.OwnerID,
|
||||
&i.CreatedAt,
|
||||
&i.UpdatedAt,
|
||||
&i.Title,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getChatMessagesByChatID = `-- name: GetChatMessagesByChatID :many
|
||||
SELECT id, chat_id, created_at, model, provider, content FROM chat_messages
|
||||
WHERE chat_id = $1
|
||||
ORDER BY created_at ASC
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetChatMessagesByChatID(ctx context.Context, chatID uuid.UUID) ([]ChatMessage, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getChatMessagesByChatID, chatID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []ChatMessage
|
||||
for rows.Next() {
|
||||
var i ChatMessage
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.ChatID,
|
||||
&i.CreatedAt,
|
||||
&i.Model,
|
||||
&i.Provider,
|
||||
&i.Content,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getChatsByOwnerID = `-- name: GetChatsByOwnerID :many
|
||||
SELECT id, owner_id, created_at, updated_at, title FROM chats
|
||||
WHERE owner_id = $1
|
||||
ORDER BY created_at DESC
|
||||
`
|
||||
|
||||
func (q *sqlQuerier) GetChatsByOwnerID(ctx context.Context, ownerID uuid.UUID) ([]Chat, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getChatsByOwnerID, ownerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Chat
|
||||
for rows.Next() {
|
||||
var i Chat
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.OwnerID,
|
||||
&i.CreatedAt,
|
||||
&i.UpdatedAt,
|
||||
&i.Title,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const insertChat = `-- name: InsertChat :one
|
||||
INSERT INTO chats (owner_id, created_at, updated_at, title)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id, owner_id, created_at, updated_at, title
|
||||
`
|
||||
|
||||
type InsertChatParams struct {
|
||||
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
Title string `db:"title" json:"title"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) InsertChat(ctx context.Context, arg InsertChatParams) (Chat, error) {
|
||||
row := q.db.QueryRowContext(ctx, insertChat,
|
||||
arg.OwnerID,
|
||||
arg.CreatedAt,
|
||||
arg.UpdatedAt,
|
||||
arg.Title,
|
||||
)
|
||||
var i Chat
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.OwnerID,
|
||||
&i.CreatedAt,
|
||||
&i.UpdatedAt,
|
||||
&i.Title,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const insertChatMessages = `-- name: InsertChatMessages :many
|
||||
INSERT INTO chat_messages (chat_id, created_at, model, provider, content)
|
||||
SELECT
|
||||
$1 :: uuid AS chat_id,
|
||||
$2 :: timestamptz AS created_at,
|
||||
$3 :: VARCHAR(127) AS model,
|
||||
$4 :: VARCHAR(127) AS provider,
|
||||
jsonb_array_elements($5 :: jsonb) AS content
|
||||
RETURNING chat_messages.id, chat_messages.chat_id, chat_messages.created_at, chat_messages.model, chat_messages.provider, chat_messages.content
|
||||
`
|
||||
|
||||
type InsertChatMessagesParams struct {
|
||||
ChatID uuid.UUID `db:"chat_id" json:"chat_id"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
Model string `db:"model" json:"model"`
|
||||
Provider string `db:"provider" json:"provider"`
|
||||
Content json.RawMessage `db:"content" json:"content"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) InsertChatMessages(ctx context.Context, arg InsertChatMessagesParams) ([]ChatMessage, error) {
|
||||
rows, err := q.db.QueryContext(ctx, insertChatMessages,
|
||||
arg.ChatID,
|
||||
arg.CreatedAt,
|
||||
arg.Model,
|
||||
arg.Provider,
|
||||
arg.Content,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []ChatMessage
|
||||
for rows.Next() {
|
||||
var i ChatMessage
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.ChatID,
|
||||
&i.CreatedAt,
|
||||
&i.Model,
|
||||
&i.Provider,
|
||||
&i.Content,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const updateChatByID = `-- name: UpdateChatByID :exec
|
||||
UPDATE chats
|
||||
SET title = $2, updated_at = $3
|
||||
WHERE id = $1
|
||||
`
|
||||
|
||||
type UpdateChatByIDParams struct {
|
||||
ID uuid.UUID `db:"id" json:"id"`
|
||||
Title string `db:"title" json:"title"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpdateChatByID(ctx context.Context, arg UpdateChatByIDParams) error {
|
||||
_, err := q.db.ExecContext(ctx, updateChatByID, arg.ID, arg.Title, arg.UpdatedAt)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteCryptoKey = `-- name: DeleteCryptoKey :one
|
||||
UPDATE crypto_keys
|
||||
SET secret = NULL, secret_key_id = NULL
|
||||
@@ -19559,7 +19512,12 @@ WHERE
|
||||
provisioner_jobs.completed_at IS NOT NULL AND
|
||||
($1 :: timestamptz) - provisioner_jobs.completed_at > (INTERVAL '1 millisecond' * (templates.failure_ttl / 1000000))
|
||||
)
|
||||
) AND workspaces.deleted = 'false'
|
||||
)
|
||||
AND workspaces.deleted = 'false'
|
||||
-- Prebuilt workspaces (identified by having the prebuilds system user as owner_id)
|
||||
-- should not be considered by the lifecycle executor, as they are handled by the
|
||||
-- prebuilds reconciliation loop.
|
||||
AND workspaces.owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID
|
||||
`
|
||||
|
||||
type GetWorkspacesEligibleForTransitionRow struct {
|
||||
|
||||
@@ -83,3 +83,37 @@ DELETE FROM
|
||||
api_keys
|
||||
WHERE
|
||||
user_id = $1;
|
||||
|
||||
-- name: ExpirePrebuildsAPIKeys :exec
|
||||
-- Firstly, collect api_keys owned by the prebuilds user that correlate
|
||||
-- to workspaces no longer owned by the prebuilds user.
|
||||
WITH unexpired_prebuilds_workspace_session_tokens AS (
|
||||
SELECT id, SUBSTRING(token_name FROM 38 FOR 36)::uuid AS workspace_id
|
||||
FROM api_keys
|
||||
WHERE user_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
|
||||
AND expires_at > @now::timestamptz
|
||||
AND token_name SIMILAR TO 'c42fdf75-3097-471c-8c33-fb52454d81c0_[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}_session_token'
|
||||
),
|
||||
stale_prebuilds_workspace_session_tokens AS (
|
||||
SELECT upwst.id
|
||||
FROM unexpired_prebuilds_workspace_session_tokens upwst
|
||||
LEFT JOIN workspaces w
|
||||
ON w.id = upwst.workspace_id
|
||||
WHERE w.owner_id <> 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
|
||||
),
|
||||
-- Next, collect api_keys that belong to the prebuilds user but have no token name.
|
||||
-- These were most likely created via 'coder login' as the prebuilds user.
|
||||
unnamed_prebuilds_api_keys AS (
|
||||
SELECT id
|
||||
FROM api_keys
|
||||
WHERE user_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid
|
||||
AND token_name = ''
|
||||
AND expires_at > @now::timestamptz
|
||||
)
|
||||
UPDATE api_keys
|
||||
SET expires_at = @now::timestamptz
|
||||
WHERE id IN (
|
||||
SELECT id FROM stale_prebuilds_workspace_session_tokens
|
||||
UNION
|
||||
SELECT id FROM unnamed_prebuilds_api_keys
|
||||
);
|
||||
|
||||
@@ -1,158 +1,239 @@
|
||||
-- GetAuditLogsBefore retrieves `row_limit` number of audit logs before the provided
|
||||
-- ID.
|
||||
-- name: GetAuditLogsOffset :many
|
||||
SELECT
|
||||
sqlc.embed(audit_logs),
|
||||
-- sqlc.embed(users) would be nice but it does not seem to play well with
|
||||
-- left joins.
|
||||
users.username AS user_username,
|
||||
users.name AS user_name,
|
||||
users.email AS user_email,
|
||||
users.created_at AS user_created_at,
|
||||
users.updated_at AS user_updated_at,
|
||||
users.last_seen_at AS user_last_seen_at,
|
||||
users.status AS user_status,
|
||||
users.login_type AS user_login_type,
|
||||
users.rbac_roles AS user_roles,
|
||||
users.avatar_url AS user_avatar_url,
|
||||
users.deleted AS user_deleted,
|
||||
users.quiet_hours_schedule AS user_quiet_hours_schedule,
|
||||
COALESCE(organizations.name, '') AS organization_name,
|
||||
COALESCE(organizations.display_name, '') AS organization_display_name,
|
||||
COALESCE(organizations.icon, '') AS organization_icon,
|
||||
COUNT(audit_logs.*) OVER () AS count
|
||||
FROM
|
||||
audit_logs
|
||||
LEFT JOIN users ON audit_logs.user_id = users.id
|
||||
LEFT JOIN
|
||||
-- First join on workspaces to get the initial workspace create
|
||||
-- to workspace build 1 id. This is because the first create is
|
||||
-- is a different audit log than subsequent starts.
|
||||
workspaces ON
|
||||
audit_logs.resource_type = 'workspace' AND
|
||||
audit_logs.resource_id = workspaces.id
|
||||
LEFT JOIN
|
||||
workspace_builds ON
|
||||
-- Get the reason from the build if the resource type
|
||||
-- is a workspace_build
|
||||
(
|
||||
audit_logs.resource_type = 'workspace_build'
|
||||
AND audit_logs.resource_id = workspace_builds.id
|
||||
)
|
||||
OR
|
||||
-- Get the reason from the build #1 if this is the first
|
||||
-- workspace create.
|
||||
(
|
||||
audit_logs.resource_type = 'workspace' AND
|
||||
audit_logs.action = 'create' AND
|
||||
workspaces.id = workspace_builds.workspace_id AND
|
||||
workspace_builds.build_number = 1
|
||||
)
|
||||
LEFT JOIN organizations ON audit_logs.organization_id = organizations.id
|
||||
SELECT sqlc.embed(audit_logs),
|
||||
-- sqlc.embed(users) would be nice but it does not seem to play well with
|
||||
-- left joins.
|
||||
users.username AS user_username,
|
||||
users.name AS user_name,
|
||||
users.email AS user_email,
|
||||
users.created_at AS user_created_at,
|
||||
users.updated_at AS user_updated_at,
|
||||
users.last_seen_at AS user_last_seen_at,
|
||||
users.status AS user_status,
|
||||
users.login_type AS user_login_type,
|
||||
users.rbac_roles AS user_roles,
|
||||
users.avatar_url AS user_avatar_url,
|
||||
users.deleted AS user_deleted,
|
||||
users.quiet_hours_schedule AS user_quiet_hours_schedule,
|
||||
COALESCE(organizations.name, '') AS organization_name,
|
||||
COALESCE(organizations.display_name, '') AS organization_display_name,
|
||||
COALESCE(organizations.icon, '') AS organization_icon
|
||||
FROM audit_logs
|
||||
LEFT JOIN users ON audit_logs.user_id = users.id
|
||||
LEFT JOIN organizations ON audit_logs.organization_id = organizations.id
|
||||
-- First join on workspaces to get the initial workspace create
|
||||
-- to workspace build 1 id. This is because the first create is
|
||||
-- is a different audit log than subsequent starts.
|
||||
LEFT JOIN workspaces ON audit_logs.resource_type = 'workspace'
|
||||
AND audit_logs.resource_id = workspaces.id
|
||||
-- Get the reason from the build if the resource type
|
||||
-- is a workspace_build
|
||||
LEFT JOIN workspace_builds wb_build ON audit_logs.resource_type = 'workspace_build'
|
||||
AND audit_logs.resource_id = wb_build.id
|
||||
-- Get the reason from the build #1 if this is the first
|
||||
-- workspace create.
|
||||
LEFT JOIN workspace_builds wb_workspace ON audit_logs.resource_type = 'workspace'
|
||||
AND audit_logs.action = 'create'
|
||||
AND workspaces.id = wb_workspace.workspace_id
|
||||
AND wb_workspace.build_number = 1
|
||||
WHERE
|
||||
-- Filter resource_type
|
||||
-- Filter resource_type
|
||||
CASE
|
||||
WHEN @resource_type :: text != '' THEN
|
||||
resource_type = @resource_type :: resource_type
|
||||
WHEN @resource_type::text != '' THEN resource_type = @resource_type::resource_type
|
||||
ELSE true
|
||||
END
|
||||
-- Filter resource_id
|
||||
AND CASE
|
||||
WHEN @resource_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
resource_id = @resource_id
|
||||
WHEN @resource_id::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN resource_id = @resource_id
|
||||
ELSE true
|
||||
END
|
||||
-- Filter organization_id
|
||||
AND CASE
|
||||
WHEN @organization_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
audit_logs.organization_id = @organization_id
|
||||
-- Filter organization_id
|
||||
AND CASE
|
||||
WHEN @organization_id::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN audit_logs.organization_id = @organization_id
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by resource_target
|
||||
AND CASE
|
||||
WHEN @resource_target :: text != '' THEN
|
||||
resource_target = @resource_target
|
||||
WHEN @resource_target::text != '' THEN resource_target = @resource_target
|
||||
ELSE true
|
||||
END
|
||||
-- Filter action
|
||||
AND CASE
|
||||
WHEN @action :: text != '' THEN
|
||||
action = @action :: audit_action
|
||||
WHEN @action::text != '' THEN action = @action::audit_action
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by user_id
|
||||
AND CASE
|
||||
WHEN @user_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
user_id = @user_id
|
||||
WHEN @user_id::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN user_id = @user_id
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by username
|
||||
AND CASE
|
||||
WHEN @username :: text != '' THEN
|
||||
user_id = (SELECT id FROM users WHERE lower(username) = lower(@username) AND deleted = false)
|
||||
WHEN @username::text != '' THEN user_id = (
|
||||
SELECT id
|
||||
FROM users
|
||||
WHERE lower(username) = lower(@username)
|
||||
AND deleted = false
|
||||
)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by user_email
|
||||
AND CASE
|
||||
WHEN @email :: text != '' THEN
|
||||
users.email = @email
|
||||
WHEN @email::text != '' THEN users.email = @email
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by date_from
|
||||
AND CASE
|
||||
WHEN @date_from :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
||||
"time" >= @date_from
|
||||
WHEN @date_from::timestamp with time zone != '0001-01-01 00:00:00Z' THEN "time" >= @date_from
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by date_to
|
||||
AND CASE
|
||||
WHEN @date_to :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
||||
"time" <= @date_to
|
||||
WHEN @date_to::timestamp with time zone != '0001-01-01 00:00:00Z' THEN "time" <= @date_to
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by build_reason
|
||||
AND CASE
|
||||
WHEN @build_reason::text != '' THEN COALESCE(wb_build.reason::text, wb_workspace.reason::text) = @build_reason
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by build_reason
|
||||
AND CASE
|
||||
WHEN @build_reason::text != '' THEN
|
||||
workspace_builds.reason::text = @build_reason
|
||||
ELSE true
|
||||
END
|
||||
-- Filter request_id
|
||||
AND CASE
|
||||
WHEN @request_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
audit_logs.request_id = @request_id
|
||||
WHEN @request_id::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN audit_logs.request_id = @request_id
|
||||
ELSE true
|
||||
END
|
||||
|
||||
-- Authorize Filter clause will be injected below in GetAuthorizedAuditLogsOffset
|
||||
-- @authorize_filter
|
||||
ORDER BY
|
||||
"time" DESC
|
||||
LIMIT
|
||||
-- a limit of 0 means "no limit". The audit log table is unbounded
|
||||
ORDER BY "time" DESC
|
||||
LIMIT -- a limit of 0 means "no limit". The audit log table is unbounded
|
||||
-- in size, and is expected to be quite large. Implement a default
|
||||
-- limit of 100 to prevent accidental excessively large queries.
|
||||
COALESCE(NULLIF(@limit_opt :: int, 0), 100)
|
||||
OFFSET
|
||||
@offset_opt;
|
||||
COALESCE(NULLIF(@limit_opt::int, 0), 100) OFFSET @offset_opt;
|
||||
|
||||
-- name: InsertAuditLog :one
|
||||
INSERT INTO
|
||||
audit_logs (
|
||||
id,
|
||||
"time",
|
||||
user_id,
|
||||
organization_id,
|
||||
ip,
|
||||
user_agent,
|
||||
resource_type,
|
||||
resource_id,
|
||||
resource_target,
|
||||
action,
|
||||
diff,
|
||||
status_code,
|
||||
additional_fields,
|
||||
request_id,
|
||||
resource_icon
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) RETURNING *;
|
||||
INSERT INTO audit_logs (
|
||||
id,
|
||||
"time",
|
||||
user_id,
|
||||
organization_id,
|
||||
ip,
|
||||
user_agent,
|
||||
resource_type,
|
||||
resource_id,
|
||||
resource_target,
|
||||
action,
|
||||
diff,
|
||||
status_code,
|
||||
additional_fields,
|
||||
request_id,
|
||||
resource_icon
|
||||
)
|
||||
VALUES (
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
$4,
|
||||
$5,
|
||||
$6,
|
||||
$7,
|
||||
$8,
|
||||
$9,
|
||||
$10,
|
||||
$11,
|
||||
$12,
|
||||
$13,
|
||||
$14,
|
||||
$15
|
||||
)
|
||||
RETURNING *;
|
||||
|
||||
-- name: CountAuditLogs :one
|
||||
SELECT COUNT(*)
|
||||
FROM audit_logs
|
||||
LEFT JOIN users ON audit_logs.user_id = users.id
|
||||
LEFT JOIN organizations ON audit_logs.organization_id = organizations.id
|
||||
-- First join on workspaces to get the initial workspace create
|
||||
-- to workspace build 1 id. This is because the first create is
|
||||
-- is a different audit log than subsequent starts.
|
||||
LEFT JOIN workspaces ON audit_logs.resource_type = 'workspace'
|
||||
AND audit_logs.resource_id = workspaces.id
|
||||
-- Get the reason from the build if the resource type
|
||||
-- is a workspace_build
|
||||
LEFT JOIN workspace_builds wb_build ON audit_logs.resource_type = 'workspace_build'
|
||||
AND audit_logs.resource_id = wb_build.id
|
||||
-- Get the reason from the build #1 if this is the first
|
||||
-- workspace create.
|
||||
LEFT JOIN workspace_builds wb_workspace ON audit_logs.resource_type = 'workspace'
|
||||
AND audit_logs.action = 'create'
|
||||
AND workspaces.id = wb_workspace.workspace_id
|
||||
AND wb_workspace.build_number = 1
|
||||
WHERE
|
||||
-- Filter resource_type
|
||||
CASE
|
||||
WHEN @resource_type::text != '' THEN resource_type = @resource_type::resource_type
|
||||
ELSE true
|
||||
END
|
||||
-- Filter resource_id
|
||||
AND CASE
|
||||
WHEN @resource_id::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN resource_id = @resource_id
|
||||
ELSE true
|
||||
END
|
||||
-- Filter organization_id
|
||||
AND CASE
|
||||
WHEN @organization_id::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN audit_logs.organization_id = @organization_id
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by resource_target
|
||||
AND CASE
|
||||
WHEN @resource_target::text != '' THEN resource_target = @resource_target
|
||||
ELSE true
|
||||
END
|
||||
-- Filter action
|
||||
AND CASE
|
||||
WHEN @action::text != '' THEN action = @action::audit_action
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by user_id
|
||||
AND CASE
|
||||
WHEN @user_id::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN user_id = @user_id
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by username
|
||||
AND CASE
|
||||
WHEN @username::text != '' THEN user_id = (
|
||||
SELECT id
|
||||
FROM users
|
||||
WHERE lower(username) = lower(@username)
|
||||
AND deleted = false
|
||||
)
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by user_email
|
||||
AND CASE
|
||||
WHEN @email::text != '' THEN users.email = @email
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by date_from
|
||||
AND CASE
|
||||
WHEN @date_from::timestamp with time zone != '0001-01-01 00:00:00Z' THEN "time" >= @date_from
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by date_to
|
||||
AND CASE
|
||||
WHEN @date_to::timestamp with time zone != '0001-01-01 00:00:00Z' THEN "time" <= @date_to
|
||||
ELSE true
|
||||
END
|
||||
-- Filter by build_reason
|
||||
AND CASE
|
||||
WHEN @build_reason::text != '' THEN COALESCE(wb_build.reason::text, wb_workspace.reason::text) = @build_reason
|
||||
ELSE true
|
||||
END
|
||||
-- Filter request_id
|
||||
AND CASE
|
||||
WHEN @request_id::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN audit_logs.request_id = @request_id
|
||||
ELSE true
|
||||
END
|
||||
-- Authorize Filter clause will be injected below in CountAuthorizedAuditLogs
|
||||
-- @authorize_filter
|
||||
;
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
-- name: InsertChat :one
|
||||
INSERT INTO chats (owner_id, created_at, updated_at, title)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING *;
|
||||
|
||||
-- name: UpdateChatByID :exec
|
||||
UPDATE chats
|
||||
SET title = $2, updated_at = $3
|
||||
WHERE id = $1;
|
||||
|
||||
-- name: GetChatsByOwnerID :many
|
||||
SELECT * FROM chats
|
||||
WHERE owner_id = $1
|
||||
ORDER BY created_at DESC;
|
||||
|
||||
-- name: GetChatByID :one
|
||||
SELECT * FROM chats
|
||||
WHERE id = $1;
|
||||
|
||||
-- name: InsertChatMessages :many
|
||||
INSERT INTO chat_messages (chat_id, created_at, model, provider, content)
|
||||
SELECT
|
||||
@chat_id :: uuid AS chat_id,
|
||||
@created_at :: timestamptz AS created_at,
|
||||
@model :: VARCHAR(127) AS model,
|
||||
@provider :: VARCHAR(127) AS provider,
|
||||
jsonb_array_elements(@content :: jsonb) AS content
|
||||
RETURNING chat_messages.*;
|
||||
|
||||
-- name: GetChatMessagesByChatID :many
|
||||
SELECT * FROM chat_messages
|
||||
WHERE chat_id = $1
|
||||
ORDER BY created_at ASC;
|
||||
|
||||
-- name: DeleteChat :exec
|
||||
DELETE FROM chats WHERE id = $1;
|
||||
@@ -758,7 +758,12 @@ WHERE
|
||||
provisioner_jobs.completed_at IS NOT NULL AND
|
||||
(@now :: timestamptz) - provisioner_jobs.completed_at > (INTERVAL '1 millisecond' * (templates.failure_ttl / 1000000))
|
||||
)
|
||||
) AND workspaces.deleted = 'false';
|
||||
)
|
||||
AND workspaces.deleted = 'false'
|
||||
-- Prebuilt workspaces (identified by having the prebuilds system user as owner_id)
|
||||
-- should not be considered by the lifecycle executor, as they are handled by the
|
||||
-- prebuilds reconciliation loop.
|
||||
AND workspaces.owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::UUID;
|
||||
|
||||
-- name: UpdateWorkspaceDormantDeletingAt :one
|
||||
UPDATE
|
||||
|
||||
@@ -9,8 +9,6 @@ const (
|
||||
UniqueAgentStatsPkey UniqueConstraint = "agent_stats_pkey" // ALTER TABLE ONLY workspace_agent_stats ADD CONSTRAINT agent_stats_pkey PRIMARY KEY (id);
|
||||
UniqueAPIKeysPkey UniqueConstraint = "api_keys_pkey" // ALTER TABLE ONLY api_keys ADD CONSTRAINT api_keys_pkey PRIMARY KEY (id);
|
||||
UniqueAuditLogsPkey UniqueConstraint = "audit_logs_pkey" // ALTER TABLE ONLY audit_logs ADD CONSTRAINT audit_logs_pkey PRIMARY KEY (id);
|
||||
UniqueChatMessagesPkey UniqueConstraint = "chat_messages_pkey" // ALTER TABLE ONLY chat_messages ADD CONSTRAINT chat_messages_pkey PRIMARY KEY (id);
|
||||
UniqueChatsPkey UniqueConstraint = "chats_pkey" // ALTER TABLE ONLY chats ADD CONSTRAINT chats_pkey PRIMARY KEY (id);
|
||||
UniqueCryptoKeysPkey UniqueConstraint = "crypto_keys_pkey" // ALTER TABLE ONLY crypto_keys ADD CONSTRAINT crypto_keys_pkey PRIMARY KEY (feature, sequence);
|
||||
UniqueCustomRolesUniqueKey UniqueConstraint = "custom_roles_unique_key" // ALTER TABLE ONLY custom_roles ADD CONSTRAINT custom_roles_unique_key UNIQUE (name, organization_id);
|
||||
UniqueDbcryptKeysActiveKeyDigestKey UniqueConstraint = "dbcrypt_keys_active_key_digest_key" // ALTER TABLE ONLY dbcrypt_keys ADD CONSTRAINT dbcrypt_keys_active_key_digest_key UNIQUE (active_key_digest);
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package coderd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/kylecarbs/aisdk-go"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
@@ -87,25 +84,3 @@ func buildInfoHandler(resp codersdk.BuildInfoResponse) http.HandlerFunc {
|
||||
func (api *API) sshConfig(rw http.ResponseWriter, r *http.Request) {
|
||||
httpapi.Write(r.Context(), rw, http.StatusOK, api.SSHConfig)
|
||||
}
|
||||
|
||||
type LanguageModel struct {
|
||||
codersdk.LanguageModel
|
||||
Provider func(ctx context.Context, messages []aisdk.Message, thinking bool) (aisdk.DataStream, error)
|
||||
}
|
||||
|
||||
// @Summary Get language models
|
||||
// @ID get-language-models
|
||||
// @Security CoderSessionToken
|
||||
// @Produce json
|
||||
// @Tags General
|
||||
// @Success 200 {object} codersdk.LanguageModelConfig
|
||||
// @Router /deployment/llms [get]
|
||||
func (api *API) deploymentLLMs(rw http.ResponseWriter, r *http.Request) {
|
||||
models := make([]codersdk.LanguageModel, 0, len(api.LanguageModels))
|
||||
for _, model := range api.LanguageModels {
|
||||
models = append(models, model.LanguageModel)
|
||||
}
|
||||
httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.LanguageModelConfig{
|
||||
Models: models,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
//go:generate mockgen -destination ./rendermock.go -package rendermock github.com/coder/coder/v2/coderd/dynamicparameters Renderer
|
||||
package rendermock
|
||||
@@ -0,0 +1,71 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/coder/coder/v2/coderd/dynamicparameters (interfaces: Renderer)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -destination ./rendermock.go -package rendermock github.com/coder/coder/v2/coderd/dynamicparameters Renderer
|
||||
//
|
||||
|
||||
// Package rendermock is a generated GoMock package.
|
||||
package rendermock
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
preview "github.com/coder/preview"
|
||||
uuid "github.com/google/uuid"
|
||||
hcl "github.com/hashicorp/hcl/v2"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// MockRenderer is a mock of Renderer interface.
|
||||
type MockRenderer struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockRendererMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockRendererMockRecorder is the mock recorder for MockRenderer.
|
||||
type MockRendererMockRecorder struct {
|
||||
mock *MockRenderer
|
||||
}
|
||||
|
||||
// NewMockRenderer creates a new mock instance.
|
||||
func NewMockRenderer(ctrl *gomock.Controller) *MockRenderer {
|
||||
mock := &MockRenderer{ctrl: ctrl}
|
||||
mock.recorder = &MockRendererMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockRenderer) EXPECT() *MockRendererMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Close mocks base method.
|
||||
func (m *MockRenderer) Close() {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "Close")
|
||||
}
|
||||
|
||||
// Close indicates an expected call of Close.
|
||||
func (mr *MockRendererMockRecorder) Close() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockRenderer)(nil).Close))
|
||||
}
|
||||
|
||||
// Render mocks base method.
|
||||
func (m *MockRenderer) Render(ctx context.Context, ownerID uuid.UUID, values map[string]string) (*preview.Output, hcl.Diagnostics) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Render", ctx, ownerID, values)
|
||||
ret0, _ := ret[0].(*preview.Output)
|
||||
ret1, _ := ret[1].(hcl.Diagnostics)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Render indicates an expected call of Render.
|
||||
func (mr *MockRendererMockRecorder) Render(ctx, ownerID, values any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Render", reflect.TypeOf((*MockRenderer)(nil).Render), ctx, ownerID, values)
|
||||
}
|
||||
@@ -169,9 +169,15 @@ func ResolveParameters(
|
||||
parameterNames[parameter.Name] = struct{}{}
|
||||
|
||||
if !firstBuild && !parameter.Mutable {
|
||||
originalValue, ok := originalValues[parameter.Name]
|
||||
// Immutable parameters should not be changed after the first build.
|
||||
// They can match the original value though!
|
||||
if parameter.Value.AsString() != originalValues[parameter.Name].Value {
|
||||
// If the value matches the original value, that is fine.
|
||||
//
|
||||
// If the original value is not set, that means this is a new parameter. New
|
||||
// immutable parameters are allowed. This is an opinionated choice to prevent
|
||||
// workspaces failing to update or delete. Ideally we would block this, as
|
||||
// immutable parameters should only be able to be set at creation time.
|
||||
if ok && parameter.Value.AsString() != originalValue.Value {
|
||||
var src *hcl.Range
|
||||
if parameter.Source != nil {
|
||||
src = ¶meter.Source.HCLBlock().TypeRange
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
package dynamicparameters_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/dynamicparameters"
|
||||
"github.com/coder/coder/v2/coderd/dynamicparameters/rendermock"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/preview"
|
||||
previewtypes "github.com/coder/preview/types"
|
||||
"github.com/coder/terraform-provider-coder/v2/provider"
|
||||
)
|
||||
|
||||
func TestResolveParameters(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("NewImmutable", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
render := rendermock.NewMockRenderer(ctrl)
|
||||
|
||||
// A single immutable parameter with no previous value.
|
||||
render.EXPECT().
|
||||
Render(gomock.Any(), gomock.Any(), gomock.Any()).
|
||||
AnyTimes().
|
||||
Return(&preview.Output{
|
||||
Parameters: []previewtypes.Parameter{
|
||||
{
|
||||
ParameterData: previewtypes.ParameterData{
|
||||
Name: "immutable",
|
||||
Type: previewtypes.ParameterTypeString,
|
||||
FormType: provider.ParameterFormTypeInput,
|
||||
Mutable: false,
|
||||
DefaultValue: previewtypes.StringLiteral("foo"),
|
||||
Required: true,
|
||||
},
|
||||
Value: previewtypes.StringLiteral("foo"),
|
||||
Diagnostics: nil,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
values, err := dynamicparameters.ResolveParameters(ctx, uuid.New(), render, false,
|
||||
[]database.WorkspaceBuildParameter{}, // No previous values
|
||||
[]codersdk.WorkspaceBuildParameter{}, // No new build values
|
||||
[]database.TemplateVersionPresetParameter{}, // No preset values
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, map[string]string{"immutable": "foo"}, values)
|
||||
})
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package httpmw
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
type chatContextKey struct{}
|
||||
|
||||
func ChatParam(r *http.Request) database.Chat {
|
||||
chat, ok := r.Context().Value(chatContextKey{}).(database.Chat)
|
||||
if !ok {
|
||||
panic("developer error: chat param middleware not provided")
|
||||
}
|
||||
return chat
|
||||
}
|
||||
|
||||
func ExtractChatParam(db database.Store) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
arg := chi.URLParam(r, "chat")
|
||||
if arg == "" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "\"chat\" must be provided.",
|
||||
})
|
||||
return
|
||||
}
|
||||
chatID, err := uuid.Parse(arg)
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid chat ID.",
|
||||
})
|
||||
return
|
||||
}
|
||||
chat, err := db.GetChatByID(ctx, chatID)
|
||||
if httpapi.Is404Error(err) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to get chat.",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
ctx = context.WithValue(ctx, chatContextKey{}, chat)
|
||||
next.ServeHTTP(rw, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
package httpmw_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func TestExtractChat(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
setupAuthentication := func(db database.Store) (*http.Request, database.User) {
|
||||
r := httptest.NewRequest("GET", "/", nil)
|
||||
|
||||
user := dbgen.User(t, db, database.User{
|
||||
ID: uuid.New(),
|
||||
})
|
||||
_, token := dbgen.APIKey(t, db, database.APIKey{
|
||||
UserID: user.ID,
|
||||
})
|
||||
r.Header.Set(codersdk.SessionTokenHeader, token)
|
||||
r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, chi.NewRouteContext()))
|
||||
return r, user
|
||||
}
|
||||
|
||||
t.Run("None", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
db, _ = dbtestutil.NewDB(t)
|
||||
rw = httptest.NewRecorder()
|
||||
r, _ = setupAuthentication(db)
|
||||
rtr = chi.NewRouter()
|
||||
)
|
||||
rtr.Use(
|
||||
httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: db,
|
||||
RedirectToLogin: false,
|
||||
}),
|
||||
httpmw.ExtractChatParam(db),
|
||||
)
|
||||
rtr.Get("/", nil)
|
||||
rtr.ServeHTTP(rw, r)
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("InvalidUUID", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
db, _ = dbtestutil.NewDB(t)
|
||||
rw = httptest.NewRecorder()
|
||||
r, _ = setupAuthentication(db)
|
||||
rtr = chi.NewRouter()
|
||||
)
|
||||
chi.RouteContext(r.Context()).URLParams.Add("chat", "not-a-uuid")
|
||||
rtr.Use(
|
||||
httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: db,
|
||||
RedirectToLogin: false,
|
||||
}),
|
||||
httpmw.ExtractChatParam(db),
|
||||
)
|
||||
rtr.Get("/", nil)
|
||||
rtr.ServeHTTP(rw, r)
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusBadRequest, res.StatusCode) // Changed from NotFound in org test to BadRequest as per chat.go
|
||||
})
|
||||
|
||||
t.Run("NotFound", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
db, _ = dbtestutil.NewDB(t)
|
||||
rw = httptest.NewRecorder()
|
||||
r, _ = setupAuthentication(db)
|
||||
rtr = chi.NewRouter()
|
||||
)
|
||||
chi.RouteContext(r.Context()).URLParams.Add("chat", uuid.NewString())
|
||||
rtr.Use(
|
||||
httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: db,
|
||||
RedirectToLogin: false,
|
||||
}),
|
||||
httpmw.ExtractChatParam(db),
|
||||
)
|
||||
rtr.Get("/", nil)
|
||||
rtr.ServeHTTP(rw, r)
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
})
|
||||
|
||||
t.Run("Success", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
db, _ = dbtestutil.NewDB(t)
|
||||
rw = httptest.NewRecorder()
|
||||
r, user = setupAuthentication(db)
|
||||
rtr = chi.NewRouter()
|
||||
)
|
||||
|
||||
// Create a test chat
|
||||
testChat := dbgen.Chat(t, db, database.Chat{
|
||||
ID: uuid.New(),
|
||||
OwnerID: user.ID,
|
||||
CreatedAt: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
Title: "Test Chat",
|
||||
})
|
||||
|
||||
rtr.Use(
|
||||
httpmw.ExtractAPIKeyMW(httpmw.ExtractAPIKeyConfig{
|
||||
DB: db,
|
||||
RedirectToLogin: false,
|
||||
}),
|
||||
httpmw.ExtractChatParam(db),
|
||||
)
|
||||
rtr.Get("/", func(rw http.ResponseWriter, r *http.Request) {
|
||||
chat := httpmw.ChatParam(r)
|
||||
require.NotZero(t, chat)
|
||||
assert.Equal(t, testChat.ID, chat.ID)
|
||||
assert.WithinDuration(t, testChat.CreatedAt, chat.CreatedAt, time.Second)
|
||||
assert.WithinDuration(t, testChat.UpdatedAt, chat.UpdatedAt, time.Second)
|
||||
assert.Equal(t, testChat.Title, chat.Title)
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
// Try by ID
|
||||
chi.RouteContext(r.Context()).URLParams.Add("chat", testChat.ID.String())
|
||||
rtr.ServeHTTP(rw, r)
|
||||
res := rw.Result()
|
||||
defer res.Body.Close()
|
||||
require.Equal(t, http.StatusOK, res.StatusCode, "by id")
|
||||
})
|
||||
}
|
||||
@@ -2708,15 +2708,23 @@ func InsertWorkspaceResource(ctx context.Context, db database.Store, jobID uuid.
|
||||
return nil
|
||||
}
|
||||
|
||||
func workspaceSessionTokenName(workspace database.Workspace) string {
|
||||
return fmt.Sprintf("%s_%s_session_token", workspace.OwnerID, workspace.ID)
|
||||
func WorkspaceSessionTokenName(ownerID, workspaceID uuid.UUID) string {
|
||||
return fmt.Sprintf("%s_%s_session_token", ownerID, workspaceID)
|
||||
}
|
||||
|
||||
func (s *server) regenerateSessionToken(ctx context.Context, user database.User, workspace database.Workspace) (string, error) {
|
||||
// NOTE(Cian): Once a workspace is claimed, there's no reason for the session token to be valid any longer.
|
||||
// Not generating any session token at all for a system user may unintentionally break existing templates,
|
||||
// which we want to avoid. If there's no session token for the workspace belonging to the prebuilds user,
|
||||
// then there's nothing for us to worry about here.
|
||||
// TODO(Cian): Update this to handle _all_ system users. At the time of writing, only one system user exists.
|
||||
if err := deleteSessionTokenForUserAndWorkspace(ctx, s.Database, database.PrebuildsSystemUserID, workspace.ID); err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
s.Logger.Error(ctx, "failed to delete prebuilds session token", slog.Error(err), slog.F("workspace_id", workspace.ID))
|
||||
}
|
||||
newkey, sessionToken, err := apikey.Generate(apikey.CreateParams{
|
||||
UserID: user.ID,
|
||||
LoginType: user.LoginType,
|
||||
TokenName: workspaceSessionTokenName(workspace),
|
||||
TokenName: WorkspaceSessionTokenName(workspace.OwnerID, workspace.ID),
|
||||
DefaultLifetime: s.DeploymentValues.Sessions.DefaultTokenDuration.Value(),
|
||||
LifetimeSeconds: int64(s.DeploymentValues.Sessions.MaximumTokenDuration.Value().Seconds()),
|
||||
})
|
||||
@@ -2744,10 +2752,14 @@ func (s *server) regenerateSessionToken(ctx context.Context, user database.User,
|
||||
}
|
||||
|
||||
func deleteSessionToken(ctx context.Context, db database.Store, workspace database.Workspace) error {
|
||||
return deleteSessionTokenForUserAndWorkspace(ctx, db, workspace.OwnerID, workspace.ID)
|
||||
}
|
||||
|
||||
func deleteSessionTokenForUserAndWorkspace(ctx context.Context, db database.Store, userID, workspaceID uuid.UUID) error {
|
||||
err := db.InTx(func(tx database.Store) error {
|
||||
key, err := tx.GetAPIKeyByName(ctx, database.GetAPIKeyByNameParams{
|
||||
UserID: workspace.OwnerID,
|
||||
TokenName: workspaceSessionTokenName(workspace),
|
||||
UserID: userID,
|
||||
TokenName: WorkspaceSessionTokenName(userID, workspaceID),
|
||||
})
|
||||
if err == nil {
|
||||
err = tx.DeleteAPIKeyByID(ctx, key.ID)
|
||||
|
||||
@@ -3576,6 +3576,70 @@ func TestNotifications(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_ExpirePrebuildsSessionToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: a prebuilt workspace where an API key was previously created for the prebuilds user.
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
srv, db, ps, pd = setup(t, false, nil)
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
template = dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: pd.OrganizationID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
version = dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
TemplateID: uuid.NullUUID{UUID: template.ID, Valid: true},
|
||||
OrganizationID: pd.OrganizationID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace = dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OrganizationID: pd.OrganizationID,
|
||||
TemplateID: template.ID,
|
||||
OwnerID: database.PrebuildsSystemUserID,
|
||||
})
|
||||
workspaceBuildID = uuid.New()
|
||||
buildJob = dbgen.ProvisionerJob(t, db, ps, database.ProvisionerJob{
|
||||
OrganizationID: pd.OrganizationID,
|
||||
FileID: dbgen.File(t, db, database.File{CreatedBy: user.ID}).ID,
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
Input: must(json.Marshal(provisionerdserver.WorkspaceProvisionJob{
|
||||
WorkspaceBuildID: workspaceBuildID,
|
||||
})),
|
||||
InitiatorID: database.PrebuildsSystemUserID,
|
||||
Tags: pd.Tags,
|
||||
})
|
||||
_ = dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
ID: workspaceBuildID,
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: version.ID,
|
||||
JobID: buildJob.ID,
|
||||
Transition: database.WorkspaceTransitionStart,
|
||||
InitiatorID: database.PrebuildsSystemUserID,
|
||||
})
|
||||
existingKey, _ = dbgen.APIKey(t, db, database.APIKey{
|
||||
UserID: database.PrebuildsSystemUserID,
|
||||
TokenName: provisionerdserver.WorkspaceSessionTokenName(database.PrebuildsSystemUserID, workspace.ID),
|
||||
})
|
||||
)
|
||||
|
||||
// When: the prebuild claim job is acquired
|
||||
fs := newFakeStream(ctx)
|
||||
err := srv.AcquireJobWithCancel(fs)
|
||||
require.NoError(t, err)
|
||||
job, err := fs.waitForJob()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, job)
|
||||
workspaceBuildJob := job.Type.(*proto.AcquiredJob_WorkspaceBuild_).WorkspaceBuild
|
||||
require.NotNil(t, workspaceBuildJob.Metadata)
|
||||
|
||||
// Assert test invariant: we acquired the expected build job
|
||||
require.Equal(t, workspaceBuildID.String(), workspaceBuildJob.WorkspaceBuildId)
|
||||
// Then: The session token should be deleted
|
||||
_, err = db.GetAPIKeyByID(ctx, existingKey.ID)
|
||||
require.ErrorIs(t, err, sql.ErrNoRows, "api key for prebuilds user should be deleted")
|
||||
}
|
||||
|
||||
type overrides struct {
|
||||
ctx context.Context
|
||||
deploymentValues *codersdk.DeploymentValues
|
||||
|
||||
@@ -54,16 +54,6 @@ var (
|
||||
Type: "audit_log",
|
||||
}
|
||||
|
||||
// ResourceChat
|
||||
// Valid Actions
|
||||
// - "ActionCreate" :: create a chat
|
||||
// - "ActionDelete" :: delete a chat
|
||||
// - "ActionRead" :: read a chat
|
||||
// - "ActionUpdate" :: update a chat
|
||||
ResourceChat = Object{
|
||||
Type: "chat",
|
||||
}
|
||||
|
||||
// ResourceCryptoKey
|
||||
// Valid Actions
|
||||
// - "ActionCreate" :: create crypto keys
|
||||
@@ -378,7 +368,6 @@ func AllResources() []Objecter {
|
||||
ResourceAssignOrgRole,
|
||||
ResourceAssignRole,
|
||||
ResourceAuditLog,
|
||||
ResourceChat,
|
||||
ResourceCryptoKey,
|
||||
ResourceDebugInfo,
|
||||
ResourceDeploymentConfig,
|
||||
|
||||
@@ -124,14 +124,6 @@ var RBACPermissions = map[string]PermissionDefinition{
|
||||
ActionRead: actDef("read and use a workspace proxy"),
|
||||
},
|
||||
},
|
||||
"chat": {
|
||||
Actions: map[Action]ActionDefinition{
|
||||
ActionCreate: actDef("create a chat"),
|
||||
ActionRead: actDef("read a chat"),
|
||||
ActionDelete: actDef("delete a chat"),
|
||||
ActionUpdate: actDef("update a chat"),
|
||||
},
|
||||
},
|
||||
"license": {
|
||||
Actions: map[Action]ActionDefinition{
|
||||
ActionCreate: actDef("create a license"),
|
||||
|
||||
@@ -305,8 +305,6 @@ func ReloadBuiltinRoles(opts *RoleOptions) {
|
||||
ResourceOrganizationMember.Type: {policy.ActionRead},
|
||||
// Users can create provisioner daemons scoped to themselves.
|
||||
ResourceProvisionerDaemon.Type: {policy.ActionRead, policy.ActionCreate, policy.ActionRead, policy.ActionUpdate},
|
||||
// Users can create, read, update, and delete their own agentic chat messages.
|
||||
ResourceChat.Type: {policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
|
||||
})...,
|
||||
),
|
||||
}.withCachedRegoValue()
|
||||
|
||||
@@ -849,37 +849,6 @@ func TestRolePermissions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
// Members may read their own chats.
|
||||
{
|
||||
Name: "CreateReadUpdateDeleteMyChats",
|
||||
Actions: []policy.Action{policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
|
||||
Resource: rbac.ResourceChat.WithOwner(currentUser.String()),
|
||||
AuthorizeMap: map[bool][]hasAuthSubjects{
|
||||
true: {memberMe, orgMemberMe, owner},
|
||||
false: {
|
||||
userAdmin, orgUserAdmin, templateAdmin,
|
||||
orgAuditor, orgTemplateAdmin,
|
||||
otherOrgMember, otherOrgAuditor, otherOrgUserAdmin, otherOrgTemplateAdmin,
|
||||
orgAdmin, otherOrgAdmin,
|
||||
},
|
||||
},
|
||||
},
|
||||
// Only owners can create, read, update, and delete other users' chats.
|
||||
{
|
||||
Name: "CreateReadUpdateDeleteOtherUserChats",
|
||||
Actions: []policy.Action{policy.ActionCreate, policy.ActionRead, policy.ActionUpdate, policy.ActionDelete},
|
||||
Resource: rbac.ResourceChat.WithOwner(uuid.NewString()), // some other user
|
||||
AuthorizeMap: map[bool][]hasAuthSubjects{
|
||||
true: {owner},
|
||||
false: {
|
||||
memberMe, orgMemberMe,
|
||||
userAdmin, orgUserAdmin, templateAdmin,
|
||||
orgAuditor, orgTemplateAdmin,
|
||||
otherOrgMember, otherOrgAuditor, otherOrgUserAdmin, otherOrgTemplateAdmin,
|
||||
orgAdmin, otherOrgAdmin,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// We expect every permission to be tested above.
|
||||
|
||||
@@ -33,6 +33,8 @@ func NextAutostart(at time.Time, wsSchedule string, templateSchedule TemplateSch
|
||||
return zonedTransition, allowed
|
||||
}
|
||||
|
||||
// NextAllowedAutostart returns the next valid autostart time after 'at', based on the workspace's
|
||||
// cron schedule and the template's allowed days. It searches up to 7 days ahead to find a match.
|
||||
func NextAllowedAutostart(at time.Time, wsSchedule string, templateSchedule TemplateScheduleOptions) (time.Time, error) {
|
||||
next := at
|
||||
|
||||
|
||||
@@ -33,7 +33,9 @@ import (
|
||||
// - resource_type: string (enum)
|
||||
// - action: string (enum)
|
||||
// - build_reason: string (enum)
|
||||
func AuditLogs(ctx context.Context, db database.Store, query string) (database.GetAuditLogsOffsetParams, []codersdk.ValidationError) {
|
||||
func AuditLogs(ctx context.Context, db database.Store, query string) (database.GetAuditLogsOffsetParams,
|
||||
database.CountAuditLogsParams, []codersdk.ValidationError,
|
||||
) {
|
||||
// Always lowercase for all searches.
|
||||
query = strings.ToLower(query)
|
||||
values, errors := searchTerms(query, func(term string, values url.Values) error {
|
||||
@@ -41,7 +43,8 @@ func AuditLogs(ctx context.Context, db database.Store, query string) (database.G
|
||||
return nil
|
||||
})
|
||||
if len(errors) > 0 {
|
||||
return database.GetAuditLogsOffsetParams{}, errors
|
||||
// nolint:exhaustruct // We don't need to initialize these structs because we return an error.
|
||||
return database.GetAuditLogsOffsetParams{}, database.CountAuditLogsParams{}, errors
|
||||
}
|
||||
|
||||
const dateLayout = "2006-01-02"
|
||||
@@ -63,8 +66,24 @@ func AuditLogs(ctx context.Context, db database.Store, query string) (database.G
|
||||
filter.DateTo = filter.DateTo.Add(23*time.Hour + 59*time.Minute + 59*time.Second)
|
||||
}
|
||||
|
||||
// Prepare the count filter, which uses the same parameters as the GetAuditLogsOffsetParams.
|
||||
// nolint:exhaustruct // UserID is not obtained from the query parameters.
|
||||
countFilter := database.CountAuditLogsParams{
|
||||
RequestID: filter.RequestID,
|
||||
ResourceID: filter.ResourceID,
|
||||
ResourceTarget: filter.ResourceTarget,
|
||||
Username: filter.Username,
|
||||
Email: filter.Email,
|
||||
DateFrom: filter.DateFrom,
|
||||
DateTo: filter.DateTo,
|
||||
OrganizationID: filter.OrganizationID,
|
||||
ResourceType: filter.ResourceType,
|
||||
Action: filter.Action,
|
||||
BuildReason: filter.BuildReason,
|
||||
}
|
||||
|
||||
parser.ErrorExcessParams(values)
|
||||
return filter, parser.Errors
|
||||
return filter, countFilter, parser.Errors
|
||||
}
|
||||
|
||||
func Users(query string) (database.GetUsersParams, []codersdk.ValidationError) {
|
||||
|
||||
@@ -343,6 +343,7 @@ func TestSearchAudit(t *testing.T) {
|
||||
Name string
|
||||
Query string
|
||||
Expected database.GetAuditLogsOffsetParams
|
||||
ExpectedCountParams database.CountAuditLogsParams
|
||||
ExpectedErrorContains string
|
||||
}{
|
||||
{
|
||||
@@ -372,6 +373,9 @@ func TestSearchAudit(t *testing.T) {
|
||||
Expected: database.GetAuditLogsOffsetParams{
|
||||
ResourceTarget: "foo",
|
||||
},
|
||||
ExpectedCountParams: database.CountAuditLogsParams{
|
||||
ResourceTarget: "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "RequestID",
|
||||
@@ -386,7 +390,7 @@ func TestSearchAudit(t *testing.T) {
|
||||
// Do not use a real database, this is only used for an
|
||||
// organization lookup.
|
||||
db := dbmem.New()
|
||||
values, errs := searchquery.AuditLogs(context.Background(), db, c.Query)
|
||||
values, countValues, errs := searchquery.AuditLogs(context.Background(), db, c.Query)
|
||||
if c.ExpectedErrorContains != "" {
|
||||
require.True(t, len(errs) > 0, "expect some errors")
|
||||
var s strings.Builder
|
||||
@@ -397,6 +401,7 @@ func TestSearchAudit(t *testing.T) {
|
||||
} else {
|
||||
require.Len(t, errs, 0, "expected no error")
|
||||
require.Equal(t, c.Expected, values, "expected values")
|
||||
require.Equal(t, c.ExpectedCountParams, countValues, "expected count values")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -687,10 +687,6 @@ func (r *remoteReporter) createSnapshot() (*Snapshot, error) {
|
||||
return nil
|
||||
})
|
||||
eg.Go(func() error {
|
||||
if !r.options.Experiments.Enabled(codersdk.ExperimentWorkspacePrebuilds) {
|
||||
return nil
|
||||
}
|
||||
|
||||
metrics, err := r.options.Database.GetPrebuildMetrics(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get prebuild metrics: %w", err)
|
||||
|
||||
@@ -408,7 +408,6 @@ func TestPrebuiltWorkspacesTelemetry(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
experimentEnabled bool
|
||||
storeFn func(store database.Store) database.Store
|
||||
expectedSnapshotEntries int
|
||||
expectedCreated int
|
||||
@@ -416,8 +415,7 @@ func TestPrebuiltWorkspacesTelemetry(t *testing.T) {
|
||||
expectedClaimed int
|
||||
}{
|
||||
{
|
||||
name: "experiment enabled",
|
||||
experimentEnabled: true,
|
||||
name: "prebuilds enabled",
|
||||
storeFn: func(store database.Store) database.Store {
|
||||
return &mockDB{Store: store}
|
||||
},
|
||||
@@ -427,19 +425,11 @@ func TestPrebuiltWorkspacesTelemetry(t *testing.T) {
|
||||
expectedClaimed: 3,
|
||||
},
|
||||
{
|
||||
name: "experiment enabled, prebuilds not used",
|
||||
experimentEnabled: true,
|
||||
name: "prebuilds not used",
|
||||
storeFn: func(store database.Store) database.Store {
|
||||
return &emptyMockDB{Store: store}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "experiment disabled",
|
||||
experimentEnabled: false,
|
||||
storeFn: func(store database.Store) database.Store {
|
||||
return &mockDB{Store: store}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
@@ -448,11 +438,6 @@ func TestPrebuiltWorkspacesTelemetry(t *testing.T) {
|
||||
|
||||
deployment, snapshot := collectSnapshot(ctx, t, db, func(opts telemetry.Options) telemetry.Options {
|
||||
opts.Database = tc.storeFn(db)
|
||||
if tc.experimentEnabled {
|
||||
opts.Experiments = codersdk.Experiments{
|
||||
codersdk.ExperimentWorkspacePrebuilds,
|
||||
}
|
||||
}
|
||||
return opts
|
||||
})
|
||||
|
||||
|
||||
+4
-1
@@ -542,7 +542,10 @@ func (api *API) deleteUser(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
workspaces, err := api.Database.GetWorkspaces(ctx, database.GetWorkspacesParams{
|
||||
// This query is ONLY done to get the workspace count, so we use a system
|
||||
// context to return ALL workspaces. Not just workspaces the user can view.
|
||||
// nolint:gocritic
|
||||
workspaces, err := api.Database.GetWorkspaces(dbauthz.AsSystemRestricted(ctx), database.GetWorkspacesParams{
|
||||
OwnerID: user.ID,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -378,6 +378,43 @@ func TestDeleteUser(t *testing.T) {
|
||||
require.ErrorAs(t, err, &apiErr, "should be a coderd error")
|
||||
require.Equal(t, http.StatusForbidden, apiErr.StatusCode(), "should be forbidden")
|
||||
})
|
||||
t.Run("CountCheckIncludesAllWorkspaces", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client, _ := coderdtest.NewWithProvisionerCloser(t, nil)
|
||||
firstUser := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Create a target user who will own a workspace
|
||||
targetUserClient, targetUser := coderdtest.CreateAnotherUser(t, client, firstUser.OrganizationID)
|
||||
|
||||
// Create a User Admin who should not have permission to see the target user's workspace
|
||||
userAdminClient, userAdmin := coderdtest.CreateAnotherUser(t, client, firstUser.OrganizationID)
|
||||
|
||||
// Grant User Admin role to the userAdmin
|
||||
userAdmin, err := client.UpdateUserRoles(context.Background(), userAdmin.ID.String(), codersdk.UpdateRoles{
|
||||
Roles: []string{rbac.RoleUserAdmin().String()},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a template and workspace owned by the target user
|
||||
version := coderdtest.CreateTemplateVersion(t, client, firstUser.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, firstUser.OrganizationID, version.ID)
|
||||
_ = coderdtest.CreateWorkspace(t, targetUserClient, template.ID)
|
||||
|
||||
workspaces, err := userAdminClient.Workspaces(context.Background(), codersdk.WorkspaceFilter{
|
||||
Owner: targetUser.Username,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, workspaces.Workspaces, 0)
|
||||
|
||||
// Attempt to delete the target user - this should fail because the
|
||||
// user has a workspace not visible to the deleting user.
|
||||
err = userAdminClient.DeleteUser(context.Background(), targetUser.ID)
|
||||
var apiErr *codersdk.Error
|
||||
require.ErrorAs(t, err, &apiErr)
|
||||
require.Equal(t, http.StatusExpectationFailed, apiErr.StatusCode())
|
||||
require.Contains(t, apiErr.Message, "has workspaces")
|
||||
})
|
||||
}
|
||||
|
||||
func TestNotifyUserStatusChanged(t *testing.T) {
|
||||
|
||||
@@ -905,19 +905,19 @@ func (api *API) workspaceAgentListContainers(rw http.ResponseWriter, r *http.Req
|
||||
// @Tags Agents
|
||||
// @Produce json
|
||||
// @Param workspaceagent path string true "Workspace agent ID" format(uuid)
|
||||
// @Param container path string true "Container ID or name"
|
||||
// @Param devcontainer path string true "Devcontainer ID"
|
||||
// @Success 202 {object} codersdk.Response
|
||||
// @Router /workspaceagents/{workspaceagent}/containers/devcontainers/container/{container}/recreate [post]
|
||||
// @Router /workspaceagents/{workspaceagent}/containers/devcontainers/{devcontainer}/recreate [post]
|
||||
func (api *API) workspaceAgentRecreateDevcontainer(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
workspaceAgent := httpmw.WorkspaceAgentParam(r)
|
||||
|
||||
container := chi.URLParam(r, "container")
|
||||
if container == "" {
|
||||
devcontainer := chi.URLParam(r, "devcontainer")
|
||||
if devcontainer == "" {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Container ID or name is required.",
|
||||
Message: "Devcontainer ID is required.",
|
||||
Validations: []codersdk.ValidationError{
|
||||
{Field: "container", Detail: "Container ID or name is required."},
|
||||
{Field: "devcontainer", Detail: "Devcontainer ID is required."},
|
||||
},
|
||||
})
|
||||
return
|
||||
@@ -961,7 +961,7 @@ func (api *API) workspaceAgentRecreateDevcontainer(rw http.ResponseWriter, r *ht
|
||||
}
|
||||
defer release()
|
||||
|
||||
m, err := agentConn.RecreateDevcontainer(ctx, container)
|
||||
m, err := agentConn.RecreateDevcontainer(ctx, devcontainer)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
httpapi.Write(ctx, rw, http.StatusRequestTimeout, codersdk.Response{
|
||||
|
||||
@@ -1396,63 +1396,62 @@ func TestWorkspaceAgentRecreateDevcontainer(t *testing.T) {
|
||||
var (
|
||||
workspaceFolder = t.TempDir()
|
||||
configFile = filepath.Join(workspaceFolder, ".devcontainer", "devcontainer.json")
|
||||
dcLabels = map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: workspaceFolder,
|
||||
agentcontainers.DevcontainerConfigFileLabel: configFile,
|
||||
}
|
||||
devcontainerID = uuid.New()
|
||||
|
||||
// Create a container that would be associated with the devcontainer
|
||||
devContainer = codersdk.WorkspaceAgentContainer{
|
||||
ID: uuid.NewString(),
|
||||
CreatedAt: dbtime.Now(),
|
||||
FriendlyName: testutil.GetRandomName(t),
|
||||
Image: "busybox:latest",
|
||||
Labels: dcLabels,
|
||||
Running: true,
|
||||
Status: "running",
|
||||
Labels: map[string]string{
|
||||
agentcontainers.DevcontainerLocalFolderLabel: workspaceFolder,
|
||||
agentcontainers.DevcontainerConfigFileLabel: configFile,
|
||||
},
|
||||
Running: true,
|
||||
Status: "running",
|
||||
}
|
||||
plainContainer = codersdk.WorkspaceAgentContainer{
|
||||
ID: uuid.NewString(),
|
||||
CreatedAt: dbtime.Now(),
|
||||
FriendlyName: testutil.GetRandomName(t),
|
||||
Image: "busybox:latest",
|
||||
Labels: map[string]string{},
|
||||
Running: true,
|
||||
Status: "running",
|
||||
|
||||
devcontainer = codersdk.WorkspaceAgentDevcontainer{
|
||||
ID: devcontainerID,
|
||||
Name: "test-devcontainer",
|
||||
WorkspaceFolder: workspaceFolder,
|
||||
ConfigPath: configFile,
|
||||
Status: codersdk.WorkspaceAgentDevcontainerStatusRunning,
|
||||
Container: &devContainer,
|
||||
}
|
||||
)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
setupMock func(mccli *acmock.MockContainerCLI, mdccli *acmock.MockDevcontainerCLI) (status int)
|
||||
name string
|
||||
devcontainerID string
|
||||
setupDevcontainers []codersdk.WorkspaceAgentDevcontainer
|
||||
setupMock func(mccli *acmock.MockContainerCLI, mdccli *acmock.MockDevcontainerCLI) (status int)
|
||||
}{
|
||||
{
|
||||
name: "Recreate",
|
||||
name: "Recreate",
|
||||
devcontainerID: devcontainerID.String(),
|
||||
setupDevcontainers: []codersdk.WorkspaceAgentDevcontainer{devcontainer},
|
||||
setupMock: func(mccli *acmock.MockContainerCLI, mdccli *acmock.MockDevcontainerCLI) int {
|
||||
mccli.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{devContainer},
|
||||
}, nil).AnyTimes()
|
||||
// DetectArchitecture always returns "<none>" for this test to disable agent injection.
|
||||
mccli.EXPECT().DetectArchitecture(gomock.Any(), devContainer.ID).Return("<none>", nil).AnyTimes()
|
||||
mdccli.EXPECT().ReadConfig(gomock.Any(), workspaceFolder, configFile, gomock.Any()).Return(agentcontainers.DevcontainerConfig{}, nil).Times(1)
|
||||
mdccli.EXPECT().ReadConfig(gomock.Any(), workspaceFolder, configFile, gomock.Any()).Return(agentcontainers.DevcontainerConfig{}, nil).AnyTimes()
|
||||
mdccli.EXPECT().Up(gomock.Any(), workspaceFolder, configFile, gomock.Any()).Return("someid", nil).Times(1)
|
||||
return 0
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Container does not exist",
|
||||
name: "Devcontainer does not exist",
|
||||
devcontainerID: uuid.NewString(),
|
||||
setupDevcontainers: nil,
|
||||
setupMock: func(mccli *acmock.MockContainerCLI, mdccli *acmock.MockDevcontainerCLI) int {
|
||||
mccli.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{}, nil).AnyTimes()
|
||||
return http.StatusNotFound
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Not a devcontainer",
|
||||
setupMock: func(mccli *acmock.MockContainerCLI, mdccli *acmock.MockDevcontainerCLI) int {
|
||||
mccli.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
|
||||
Containers: []codersdk.WorkspaceAgentContainer{plainContainer},
|
||||
}, nil).AnyTimes()
|
||||
return http.StatusNotFound
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -1472,16 +1471,21 @@ func TestWorkspaceAgentRecreateDevcontainer(t *testing.T) {
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
return agents
|
||||
}).Do()
|
||||
|
||||
devcontainerAPIOptions := []agentcontainers.Option{
|
||||
agentcontainers.WithContainerCLI(mccli),
|
||||
agentcontainers.WithDevcontainerCLI(mdccli),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
}
|
||||
if tc.setupDevcontainers != nil {
|
||||
devcontainerAPIOptions = append(devcontainerAPIOptions,
|
||||
agentcontainers.WithDevcontainers(tc.setupDevcontainers, nil))
|
||||
}
|
||||
|
||||
_ = agenttest.New(t, client.URL, r.AgentToken, func(o *agent.Options) {
|
||||
o.Logger = logger.Named("agent")
|
||||
o.Devcontainers = true
|
||||
o.DevcontainerAPIOptions = append(
|
||||
o.DevcontainerAPIOptions,
|
||||
agentcontainers.WithContainerCLI(mccli),
|
||||
agentcontainers.WithDevcontainerCLI(mdccli),
|
||||
agentcontainers.WithWatcher(watcher.NewNoop()),
|
||||
agentcontainers.WithContainerLabelIncludeFilter(agentcontainers.DevcontainerLocalFolderLabel, workspaceFolder),
|
||||
)
|
||||
o.DevcontainerAPIOptions = devcontainerAPIOptions
|
||||
})
|
||||
resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).Wait()
|
||||
require.Len(t, resources, 1, "expected one resource")
|
||||
@@ -1490,7 +1494,7 @@ func TestWorkspaceAgentRecreateDevcontainer(t *testing.T) {
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
_, err := client.WorkspaceAgentRecreateDevcontainer(ctx, agentID, devContainer.ID)
|
||||
_, err := client.WorkspaceAgentRecreateDevcontainer(ctx, agentID, tc.devcontainerID)
|
||||
if wantStatus > 0 {
|
||||
cerr, ok := codersdk.AsError(err)
|
||||
require.True(t, ok, "expected error to be a coder error")
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
package codersdk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/kylecarbs/aisdk-go"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// CreateChat creates a new chat.
|
||||
func (c *Client) CreateChat(ctx context.Context) (Chat, error) {
|
||||
res, err := c.Request(ctx, http.MethodPost, "/api/v2/chats", nil)
|
||||
if err != nil {
|
||||
return Chat{}, xerrors.Errorf("execute request: %w", err)
|
||||
}
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
return Chat{}, ReadBodyAsError(res)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
var chat Chat
|
||||
return chat, json.NewDecoder(res.Body).Decode(&chat)
|
||||
}
|
||||
|
||||
type Chat struct {
|
||||
ID uuid.UUID `json:"id" format:"uuid"`
|
||||
CreatedAt time.Time `json:"created_at" format:"date-time"`
|
||||
UpdatedAt time.Time `json:"updated_at" format:"date-time"`
|
||||
Title string `json:"title"`
|
||||
}
|
||||
|
||||
// ListChats lists all chats.
|
||||
func (c *Client) ListChats(ctx context.Context) ([]Chat, error) {
|
||||
res, err := c.Request(ctx, http.MethodGet, "/api/v2/chats", nil)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("execute request: %w", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, ReadBodyAsError(res)
|
||||
}
|
||||
|
||||
var chats []Chat
|
||||
return chats, json.NewDecoder(res.Body).Decode(&chats)
|
||||
}
|
||||
|
||||
// Chat returns a chat by ID.
|
||||
func (c *Client) Chat(ctx context.Context, id uuid.UUID) (Chat, error) {
|
||||
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/chats/%s", id), nil)
|
||||
if err != nil {
|
||||
return Chat{}, xerrors.Errorf("execute request: %w", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return Chat{}, ReadBodyAsError(res)
|
||||
}
|
||||
var chat Chat
|
||||
return chat, json.NewDecoder(res.Body).Decode(&chat)
|
||||
}
|
||||
|
||||
// ChatMessages returns the messages of a chat.
|
||||
func (c *Client) ChatMessages(ctx context.Context, id uuid.UUID) ([]ChatMessage, error) {
|
||||
res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/chats/%s/messages", id), nil)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("execute request: %w", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, ReadBodyAsError(res)
|
||||
}
|
||||
var messages []ChatMessage
|
||||
return messages, json.NewDecoder(res.Body).Decode(&messages)
|
||||
}
|
||||
|
||||
type ChatMessage = aisdk.Message
|
||||
|
||||
type CreateChatMessageRequest struct {
|
||||
Model string `json:"model"`
|
||||
Message ChatMessage `json:"message"`
|
||||
Thinking bool `json:"thinking"`
|
||||
}
|
||||
|
||||
// CreateChatMessage creates a new chat message and streams the response.
|
||||
// If the provided message has a conflicting ID with an existing message,
|
||||
// it will be overwritten.
|
||||
func (c *Client) CreateChatMessage(ctx context.Context, id uuid.UUID, req CreateChatMessageRequest) (<-chan aisdk.DataStreamPart, error) {
|
||||
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/v2/chats/%s/messages", id), req)
|
||||
defer func() {
|
||||
if res != nil && res.Body != nil {
|
||||
_ = res.Body.Close()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("execute request: %w", err)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, ReadBodyAsError(res)
|
||||
}
|
||||
nextEvent := ServerSentEventReader(ctx, res.Body)
|
||||
|
||||
wc := make(chan aisdk.DataStreamPart, 256)
|
||||
go func() {
|
||||
defer close(wc)
|
||||
defer res.Body.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
sse, err := nextEvent()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if sse.Type != ServerSentEventTypeData {
|
||||
continue
|
||||
}
|
||||
var part aisdk.DataStreamPart
|
||||
b, ok := sse.Data.([]byte)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
err = json.Unmarshal(b, &part)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case wc <- part:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
func (c *Client) DeleteChat(ctx context.Context, id uuid.UUID) error {
|
||||
res, err := c.Request(ctx, http.MethodDelete, fmt.Sprintf("/api/v2/chats/%s", id), nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("execute request: %w", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusNoContent {
|
||||
return ReadBodyAsError(res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
+1
-59
@@ -383,7 +383,6 @@ type DeploymentValues struct {
|
||||
DisablePasswordAuth serpent.Bool `json:"disable_password_auth,omitempty" typescript:",notnull"`
|
||||
Support SupportConfig `json:"support,omitempty" typescript:",notnull"`
|
||||
ExternalAuthConfigs serpent.Struct[[]ExternalAuthConfig] `json:"external_auth,omitempty" typescript:",notnull"`
|
||||
AI serpent.Struct[AIConfig] `json:"ai,omitempty" typescript:",notnull"`
|
||||
SSHConfig SSHConfig `json:"config_ssh,omitempty" typescript:",notnull"`
|
||||
WgtunnelHost serpent.String `json:"wgtunnel_host,omitempty" typescript:",notnull"`
|
||||
DisableOwnerWorkspaceExec serpent.Bool `json:"disable_owner_workspace_exec,omitempty" typescript:",notnull"`
|
||||
@@ -2681,15 +2680,6 @@ Write out the current server config as YAML to stdout.`,
|
||||
Value: &c.Support.Links,
|
||||
Hidden: false,
|
||||
},
|
||||
{
|
||||
// Env handling is done in cli.ReadAIProvidersFromEnv
|
||||
Name: "AI",
|
||||
Description: "Configure AI providers.",
|
||||
YAML: "ai",
|
||||
Value: &c.AI,
|
||||
// Hidden because this is experimental.
|
||||
Hidden: true,
|
||||
},
|
||||
{
|
||||
// Env handling is done in cli.ReadGitAuthFromEnvironment
|
||||
Name: "External Auth Providers",
|
||||
@@ -3080,7 +3070,6 @@ Write out the current server config as YAML to stdout.`,
|
||||
Group: &deploymentGroupPrebuilds,
|
||||
YAML: "reconciliation_interval",
|
||||
Annotations: serpent.Annotations{}.Mark(annotationFormatDuration, "true"),
|
||||
Hidden: ExperimentsSafe.Enabled(ExperimentWorkspacePrebuilds), // Hide setting while this feature is experimental.
|
||||
},
|
||||
{
|
||||
Name: "Reconciliation Backoff Interval",
|
||||
@@ -3132,21 +3121,6 @@ Write out the current server config as YAML to stdout.`,
|
||||
return opts
|
||||
}
|
||||
|
||||
type AIProviderConfig struct {
|
||||
// Type is the type of the API provider.
|
||||
Type string `json:"type" yaml:"type"`
|
||||
// APIKey is the API key to use for the API provider.
|
||||
APIKey string `json:"-" yaml:"api_key"`
|
||||
// Models is the list of models to use for the API provider.
|
||||
Models []string `json:"models" yaml:"models"`
|
||||
// BaseURL is the base URL to use for the API provider.
|
||||
BaseURL string `json:"base_url" yaml:"base_url"`
|
||||
}
|
||||
|
||||
type AIConfig struct {
|
||||
Providers []AIProviderConfig `json:"providers,omitempty" yaml:"providers,omitempty"`
|
||||
}
|
||||
|
||||
type SupportConfig struct {
|
||||
Links serpent.Struct[[]LinkConfig] `json:"links" typescript:",notnull"`
|
||||
}
|
||||
@@ -3367,8 +3341,6 @@ const (
|
||||
ExperimentNotifications Experiment = "notifications" // Sends notifications via SMTP and webhooks following certain events.
|
||||
ExperimentWorkspaceUsage Experiment = "workspace-usage" // Enables the new workspace usage tracking.
|
||||
ExperimentWebPush Experiment = "web-push" // Enables web push notifications through the browser.
|
||||
ExperimentWorkspacePrebuilds Experiment = "workspace-prebuilds" // Enables the new workspace prebuilds feature.
|
||||
ExperimentAgenticChat Experiment = "agentic-chat" // Enables the new agentic AI chat feature.
|
||||
)
|
||||
|
||||
// ExperimentsKnown should include all experiments defined above.
|
||||
@@ -3378,17 +3350,13 @@ var ExperimentsKnown = Experiments{
|
||||
ExperimentNotifications,
|
||||
ExperimentWorkspaceUsage,
|
||||
ExperimentWebPush,
|
||||
ExperimentWorkspacePrebuilds,
|
||||
ExperimentAgenticChat,
|
||||
}
|
||||
|
||||
// ExperimentsSafe should include all experiments that are safe for
|
||||
// users to opt-in to via --experimental='*'.
|
||||
// Experiments that are not ready for consumption by all users should
|
||||
// not be included here and will be essentially hidden.
|
||||
var ExperimentsSafe = Experiments{
|
||||
ExperimentWorkspacePrebuilds,
|
||||
}
|
||||
var ExperimentsSafe = Experiments{}
|
||||
|
||||
// Experiments is a list of experiments.
|
||||
// Multiple experiments may be enabled at the same time.
|
||||
@@ -3597,32 +3565,6 @@ func (c *Client) SSHConfiguration(ctx context.Context) (SSHConfigResponse, error
|
||||
return sshConfig, json.NewDecoder(res.Body).Decode(&sshConfig)
|
||||
}
|
||||
|
||||
type LanguageModelConfig struct {
|
||||
Models []LanguageModel `json:"models"`
|
||||
}
|
||||
|
||||
// LanguageModel is a language model that can be used for chat.
|
||||
type LanguageModel struct {
|
||||
// ID is used by the provider to identify the LLM.
|
||||
ID string `json:"id"`
|
||||
DisplayName string `json:"display_name"`
|
||||
// Provider is the provider of the LLM. e.g. openai, anthropic, etc.
|
||||
Provider string `json:"provider"`
|
||||
}
|
||||
|
||||
func (c *Client) LanguageModelConfig(ctx context.Context) (LanguageModelConfig, error) {
|
||||
res, err := c.Request(ctx, http.MethodGet, "/api/v2/deployment/llms", nil)
|
||||
if err != nil {
|
||||
return LanguageModelConfig{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return LanguageModelConfig{}, ReadBodyAsError(res)
|
||||
}
|
||||
var llms LanguageModelConfig
|
||||
return llms, json.NewDecoder(res.Body).Decode(&llms)
|
||||
}
|
||||
|
||||
type CryptoKeyFeature string
|
||||
|
||||
const (
|
||||
|
||||
@@ -9,7 +9,6 @@ const (
|
||||
ResourceAssignOrgRole RBACResource = "assign_org_role"
|
||||
ResourceAssignRole RBACResource = "assign_role"
|
||||
ResourceAuditLog RBACResource = "audit_log"
|
||||
ResourceChat RBACResource = "chat"
|
||||
ResourceCryptoKey RBACResource = "crypto_key"
|
||||
ResourceDebugInfo RBACResource = "debug_info"
|
||||
ResourceDeploymentConfig RBACResource = "deployment_config"
|
||||
@@ -73,7 +72,6 @@ var RBACResourceActions = map[RBACResource][]RBACAction{
|
||||
ResourceAssignOrgRole: {ActionAssign, ActionCreate, ActionDelete, ActionRead, ActionUnassign, ActionUpdate},
|
||||
ResourceAssignRole: {ActionAssign, ActionRead, ActionUnassign},
|
||||
ResourceAuditLog: {ActionCreate, ActionRead},
|
||||
ResourceChat: {ActionCreate, ActionDelete, ActionRead, ActionUpdate},
|
||||
ResourceCryptoKey: {ActionCreate, ActionDelete, ActionRead, ActionUpdate},
|
||||
ResourceDebugInfo: {ActionRead},
|
||||
ResourceDeploymentConfig: {ActionRead, ActionUpdate},
|
||||
|
||||
@@ -8,9 +8,10 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/kylecarbs/aisdk-go"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/aisdk-go"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
|
||||
@@ -10,11 +10,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/kylecarbs/aisdk-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
"github.com/coder/aisdk-go"
|
||||
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
|
||||
@@ -519,8 +519,8 @@ func (c *Client) WorkspaceAgentListContainers(ctx context.Context, agentID uuid.
|
||||
}
|
||||
|
||||
// WorkspaceAgentRecreateDevcontainer recreates the devcontainer with the given ID.
|
||||
func (c *Client) WorkspaceAgentRecreateDevcontainer(ctx context.Context, agentID uuid.UUID, containerIDOrName string) (Response, error) {
|
||||
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/v2/workspaceagents/%s/containers/devcontainers/container/%s/recreate", agentID, containerIDOrName), nil)
|
||||
func (c *Client) WorkspaceAgentRecreateDevcontainer(ctx context.Context, agentID uuid.UUID, devcontainerID string) (Response, error) {
|
||||
res, err := c.Request(ctx, http.MethodPost, fmt.Sprintf("/api/v2/workspaceagents/%s/containers/devcontainers/%s/recreate", agentID, devcontainerID), nil)
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
|
||||
@@ -37,15 +37,18 @@ const (
|
||||
type BuildReason string
|
||||
|
||||
const (
|
||||
// "initiator" is used when a workspace build is triggered by a user.
|
||||
// BuildReasonInitiator "initiator" is used when a workspace build is triggered by a user.
|
||||
// Combined with the initiator id/username, it indicates which user initiated the build.
|
||||
BuildReasonInitiator BuildReason = "initiator"
|
||||
// "autostart" is used when a build to start a workspace is triggered by Autostart.
|
||||
// BuildReasonAutostart "autostart" is used when a build to start a workspace is triggered by Autostart.
|
||||
// The initiator id/username in this case is the workspace owner and can be ignored.
|
||||
BuildReasonAutostart BuildReason = "autostart"
|
||||
// "autostop" is used when a build to stop a workspace is triggered by Autostop.
|
||||
// BuildReasonAutostop "autostop" is used when a build to stop a workspace is triggered by Autostop.
|
||||
// The initiator id/username in this case is the workspace owner and can be ignored.
|
||||
BuildReasonAutostop BuildReason = "autostop"
|
||||
// BuildReasonDormancy "dormancy" is used when a build to stop a workspace is triggered due to inactivity (dormancy).
|
||||
// The initiator id/username in this case is the workspace owner and can be ignored.
|
||||
BuildReasonDormancy BuildReason = "dormancy"
|
||||
)
|
||||
|
||||
// WorkspaceBuild is an at-point representation of a workspace state.
|
||||
|
||||
@@ -389,10 +389,10 @@ func (c *AgentConn) ListContainers(ctx context.Context) (codersdk.WorkspaceAgent
|
||||
|
||||
// RecreateDevcontainer recreates a devcontainer with the given container.
|
||||
// This is a blocking call and will wait for the container to be recreated.
|
||||
func (c *AgentConn) RecreateDevcontainer(ctx context.Context, containerIDOrName string) (codersdk.Response, error) {
|
||||
func (c *AgentConn) RecreateDevcontainer(ctx context.Context, devcontainerID string) (codersdk.Response, error) {
|
||||
ctx, span := tracing.StartSpan(ctx)
|
||||
defer span.End()
|
||||
res, err := c.apiRequest(ctx, http.MethodPost, "/api/v0/containers/devcontainers/container/"+containerIDOrName+"/recreate", nil)
|
||||
res, err := c.apiRequest(ctx, http.MethodPost, "/api/v0/containers/devcontainers/"+devcontainerID+"/recreate", nil)
|
||||
if err != nil {
|
||||
return codersdk.Response{}, xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user