Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 630ed643cb | |||
| 129dd40781 | |||
| d448c49652 | |||
| 5359fb31f5 | |||
| b655768798 | |||
| 2d37eb42e7 | |||
| e7033b34dc | |||
| 01c6266e3e | |||
| f9011dcba2 | |||
| ae1be27ba6 | |||
| c4a01a42ce | |||
| c0aeb2fc2e | |||
| 908d236a19 | |||
| f519db88fb | |||
| e996e8b7e8 | |||
| da60671b33 | |||
| 963a1404c0 | |||
| 002110228c |
Generated
+82
@@ -397,6 +397,66 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/debug/derp/traffic": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Debug"
|
||||
],
|
||||
"summary": "Debug DERP traffic",
|
||||
"operationId": "debug-derp-traffic",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/derp.BytesSentRecv"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-apidocgen": {
|
||||
"skip": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"/debug/expvar": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Debug"
|
||||
],
|
||||
"summary": "Debug expvar",
|
||||
"operationId": "debug-expvar",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-apidocgen": {
|
||||
"skip": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"/debug/health": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -13019,6 +13079,25 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"derp.BytesSentRecv": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "Key is the public key of the client which sent/received these bytes.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/key.NodePublic"
|
||||
}
|
||||
]
|
||||
},
|
||||
"recv": {
|
||||
"type": "integer"
|
||||
},
|
||||
"sent": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"derp.ServerInfoMessage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -13524,6 +13603,9 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"key.NodePublic": {
|
||||
"type": "object"
|
||||
},
|
||||
"netcheck.Report": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
Generated
+74
@@ -337,6 +337,58 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/debug/derp/traffic": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Debug"],
|
||||
"summary": "Debug DERP traffic",
|
||||
"operationId": "debug-derp-traffic",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/derp.BytesSentRecv"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-apidocgen": {
|
||||
"skip": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"/debug/expvar": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"produces": ["application/json"],
|
||||
"tags": ["Debug"],
|
||||
"summary": "Debug expvar",
|
||||
"operationId": "debug-expvar",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-apidocgen": {
|
||||
"skip": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"/debug/health": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -11852,6 +11904,25 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"derp.BytesSentRecv": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "Key is the public key of the client which sent/received these bytes.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/key.NodePublic"
|
||||
}
|
||||
]
|
||||
},
|
||||
"recv": {
|
||||
"type": "integer"
|
||||
},
|
||||
"sent": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"derp.ServerInfoMessage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -12321,6 +12392,9 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"key.NodePublic": {
|
||||
"type": "object"
|
||||
},
|
||||
"netcheck.Report": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"expvar"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -84,6 +85,8 @@ func init() {
|
||||
globalHTTPSwaggerHandler = httpSwagger.Handler(httpSwagger.URL("/swagger/doc.json"))
|
||||
}
|
||||
|
||||
var expDERPOnce = sync.Once{}
|
||||
|
||||
// Options are requires parameters for Coder to start.
|
||||
type Options struct {
|
||||
AccessURL *url.URL
|
||||
@@ -559,6 +562,16 @@ func New(options *Options) *API {
|
||||
|
||||
derpHandler := derphttp.Handler(api.DERPServer)
|
||||
derpHandler, api.derpCloseFunc = tailnet.WithWebsocketSupport(api.DERPServer, derpHandler)
|
||||
// Register DERP on expvar HTTP handler, which we serve below in the router, c.f. expvar.Handler()
|
||||
// These are the metrics the DERP server exposes.
|
||||
// TODO: export via prometheus
|
||||
expDERPOnce.Do(func() {
|
||||
// We need to do this via a global Once because expvar registry is global and panics if we
|
||||
// register multiple times. In production there is only one Coderd and one DERP server per
|
||||
// process, but in testing, we create multiple of both, so the Once protects us from
|
||||
// panicking.
|
||||
expvar.Publish("derp", api.DERPServer.ExpVar())
|
||||
})
|
||||
cors := httpmw.Cors(options.DeploymentValues.Dangerous.AllowAllCors.Value())
|
||||
prometheusMW := httpmw.Prometheus(options.PrometheusRegistry)
|
||||
|
||||
@@ -1028,6 +1041,10 @@ func New(options *Options) *API {
|
||||
r.Use(httpmw.ExtractUserParam(options.Database))
|
||||
r.Get("/debug-link", api.userDebugOIDC)
|
||||
})
|
||||
r.Route("/derp", func(r chi.Router) {
|
||||
r.Get("/traffic", options.DERPServer.ServeDebugTraffic)
|
||||
})
|
||||
r.Method("GET", "/expvar", expvar.Handler()) // contains DERP metrics as well as cmdline and memstats
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
+21
-1
@@ -276,7 +276,7 @@ func validateHealthSettings(settings codersdk.HealthSettings) error {
|
||||
}
|
||||
|
||||
// For some reason the swagger docs need to be attached to a function.
|
||||
//
|
||||
|
||||
// @Summary Debug Info Websocket Test
|
||||
// @ID debug-info-websocket-test
|
||||
// @Security CoderSessionToken
|
||||
@@ -287,6 +287,26 @@ func validateHealthSettings(settings codersdk.HealthSettings) error {
|
||||
// @x-apidocgen {"skip": true}
|
||||
func _debugws(http.ResponseWriter, *http.Request) {} //nolint:unused
|
||||
|
||||
// @Summary Debug DERP traffic
|
||||
// @ID debug-derp-traffic
|
||||
// @Security CoderSessionToken
|
||||
// @Produce json
|
||||
// @Success 200 {array} derp.BytesSentRecv
|
||||
// @Tags Debug
|
||||
// @Router /debug/derp/traffic [get]
|
||||
// @x-apidocgen {"skip": true}
|
||||
func _debugDERPTraffic(http.ResponseWriter, *http.Request) {} //nolint:unused
|
||||
|
||||
// @Summary Debug expvar
|
||||
// @ID debug-expvar
|
||||
// @Security CoderSessionToken
|
||||
// @Produce json
|
||||
// @Tags Debug
|
||||
// @Success 200 {object} map[string]any
|
||||
// @Router /debug/expvar [get]
|
||||
// @x-apidocgen {"skip": true}
|
||||
func _debugExpVar(http.ResponseWriter, *http.Request) {} //nolint:unused
|
||||
|
||||
func loadDismissedHealthchecks(ctx context.Context, db database.Store, logger slog.Logger) []codersdk.HealthSection {
|
||||
dismissedHealthchecks := []codersdk.HealthSection{}
|
||||
settingsJSON, err := db.GetHealthSettings(ctx)
|
||||
|
||||
+17
-22
@@ -136,28 +136,8 @@ func NewServerTailnet(
|
||||
return nil, xerrors.Errorf("get initial multi agent: %w", err)
|
||||
}
|
||||
tn.agentConn.Store(&agentConn)
|
||||
|
||||
pn, err := tailnet.NodeToProto(conn.Node())
|
||||
if err != nil {
|
||||
tn.logger.Critical(context.Background(), "failed to convert node", slog.Error(err))
|
||||
} else {
|
||||
err = tn.getAgentConn().UpdateSelf(pn)
|
||||
if err != nil {
|
||||
tn.logger.Warn(context.Background(), "server tailnet update self", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
conn.SetNodeCallback(func(node *tailnet.Node) {
|
||||
pn, err := tailnet.NodeToProto(node)
|
||||
if err != nil {
|
||||
tn.logger.Critical(context.Background(), "failed to convert node", slog.Error(err))
|
||||
return
|
||||
}
|
||||
err = tn.getAgentConn().UpdateSelf(pn)
|
||||
if err != nil {
|
||||
tn.logger.Warn(context.Background(), "broadcast server node to agents", slog.Error(err))
|
||||
}
|
||||
})
|
||||
// registering the callback also triggers send of the initial node
|
||||
tn.coordinatee.SetNodeCallback(tn.nodeCallback)
|
||||
|
||||
// This is set to allow local DERP traffic to be proxied through memory
|
||||
// instead of needing to hit the external access URL. Don't use the ctx
|
||||
@@ -183,6 +163,18 @@ func NewServerTailnet(
|
||||
return tn, nil
|
||||
}
|
||||
|
||||
func (s *ServerTailnet) nodeCallback(node *tailnet.Node) {
|
||||
pn, err := tailnet.NodeToProto(node)
|
||||
if err != nil {
|
||||
s.logger.Critical(context.Background(), "failed to convert node", slog.Error(err))
|
||||
return
|
||||
}
|
||||
err = s.getAgentConn().UpdateSelf(pn)
|
||||
if err != nil {
|
||||
s.logger.Warn(context.Background(), "broadcast server node to agents", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ServerTailnet) Describe(descs chan<- *prometheus.Desc) {
|
||||
s.connsPerAgent.Describe(descs)
|
||||
s.totalConns.Describe(descs)
|
||||
@@ -285,6 +277,9 @@ func (s *ServerTailnet) reinitCoordinator() {
|
||||
continue
|
||||
}
|
||||
s.agentConn.Store(&agentConn)
|
||||
// reset the Node callback, which triggers the conn to send the node immediately, and also
|
||||
// register for updates
|
||||
s.coordinatee.SetNodeCallback(s.nodeCallback)
|
||||
|
||||
// Resubscribe to all of the agents we're tracking.
|
||||
for agentID := range s.agentConnectionTimes {
|
||||
|
||||
@@ -48,6 +48,7 @@ func TestServerTailnet_Reconnect(t *testing.T) {
|
||||
agentConnectionTimes: make(map[uuid.UUID]time.Time),
|
||||
}
|
||||
// reinit the Coordinator once, to load mMultiAgent0
|
||||
mCoord.EXPECT().SetNodeCallback(gomock.Any()).Times(1)
|
||||
uut.reinitCoordinator()
|
||||
|
||||
mMultiAgent0.EXPECT().NextUpdate(gomock.Any()).
|
||||
@@ -57,6 +58,7 @@ func TestServerTailnet_Reconnect(t *testing.T) {
|
||||
Times(1).
|
||||
Return(true) // this triggers reconnect
|
||||
setLost := mCoord.EXPECT().SetAllPeersLost().Times(1).After(closed0)
|
||||
mCoord.EXPECT().SetNodeCallback(gomock.Any()).Times(1).After(closed0)
|
||||
mMultiAgent1.EXPECT().NextUpdate(gomock.Any()).
|
||||
Times(1).
|
||||
After(setLost).
|
||||
|
||||
+10
-2
@@ -928,15 +928,23 @@ func (api *API) userOIDC(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if len(api.OIDCConfig.EmailDomain) > 0 {
|
||||
ok = false
|
||||
emailSp := strings.Split(email, "@")
|
||||
if len(emailSp) == 1 {
|
||||
httpapi.Write(ctx, rw, http.StatusForbidden, codersdk.Response{
|
||||
Message: fmt.Sprintf("Your email %q is not in domains %q!", email, api.OIDCConfig.EmailDomain),
|
||||
})
|
||||
return
|
||||
}
|
||||
userEmailDomain := emailSp[len(emailSp)-1]
|
||||
for _, domain := range api.OIDCConfig.EmailDomain {
|
||||
if strings.HasSuffix(strings.ToLower(email), strings.ToLower(domain)) {
|
||||
if strings.EqualFold(userEmailDomain, domain) {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
httpapi.Write(ctx, rw, http.StatusForbidden, codersdk.Response{
|
||||
Message: fmt.Sprintf("Your email %q is not in domains %q !", email, api.OIDCConfig.EmailDomain),
|
||||
Message: fmt.Sprintf("Your email %q is not in domains %q!", email, api.OIDCConfig.EmailDomain),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -757,6 +757,17 @@ func TestUserOIDC(t *testing.T) {
|
||||
"kwc.io",
|
||||
},
|
||||
StatusCode: http.StatusOK,
|
||||
}, {
|
||||
Name: "EmailDomainSubset",
|
||||
IDTokenClaims: jwt.MapClaims{
|
||||
"email": "colin@gmail.com",
|
||||
"email_verified": true,
|
||||
},
|
||||
AllowSignups: true,
|
||||
EmailDomain: []string{
|
||||
"mail.com",
|
||||
},
|
||||
StatusCode: http.StatusForbidden,
|
||||
}, {
|
||||
Name: "EmptyClaims",
|
||||
IDTokenClaims: jwt.MapClaims{},
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
package coderd_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/provisionersdk/proto"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestAgentAPI_LargeManifest(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
client, store := coderdtest.NewWithDatabase(t, nil)
|
||||
adminUser := coderdtest.CreateFirstUser(t, client)
|
||||
n := 512000
|
||||
longScript := make([]byte, n)
|
||||
for i := range longScript {
|
||||
longScript[i] = 'q'
|
||||
}
|
||||
r := dbfake.WorkspaceBuild(t, store, database.Workspace{
|
||||
OrganizationID: adminUser.OrganizationID,
|
||||
OwnerID: adminUser.UserID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
agents[0].Scripts = []*proto.Script{
|
||||
{
|
||||
Script: string(longScript),
|
||||
},
|
||||
}
|
||||
return agents
|
||||
}).Do()
|
||||
ac := agentsdk.New(client.URL)
|
||||
ac.SetSessionToken(r.AgentToken)
|
||||
conn, err := ac.ConnectRPC(ctx)
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
require.NoError(t, err)
|
||||
agentAPI := agentproto.NewDRPCAgentClient(conn)
|
||||
manifest, err := agentAPI.GetManifest(ctx, &agentproto.GetManifestRequest{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, manifest.Scripts, 1)
|
||||
require.Len(t, manifest.Scripts[0].Script, n)
|
||||
}
|
||||
@@ -688,6 +688,9 @@ func (c *wsNetConn) Close() error {
|
||||
// during read or write will cancel the context, but not close the
|
||||
// conn. Close should be called to release context resources.
|
||||
func websocketNetConn(ctx context.Context, conn *websocket.Conn, msgType websocket.MessageType) (context.Context, net.Conn) {
|
||||
// Set the read limit to 4 MiB -- about the limit for protobufs. This needs to be larger than
|
||||
// the default because some of our protocols can include large messages like startup scripts.
|
||||
conn.SetReadLimit(1 << 22)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
nc := websocket.NetConn(ctx, conn, msgType)
|
||||
return ctx, &wsNetConn{
|
||||
|
||||
@@ -286,8 +286,11 @@ func ProtoFromAppHealthsRequest(req PostAppHealthsRequest) (*proto.BatchUpdateAp
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("unknown app health: %s", h)
|
||||
}
|
||||
|
||||
var idCopy uuid.UUID
|
||||
copy(idCopy[:], id[:])
|
||||
pReq.Updates = append(pReq.Updates, &proto.BatchUpdateAppHealthRequest_HealthUpdate{
|
||||
Id: id[:],
|
||||
Id: idCopy[:],
|
||||
Health: proto.AppHealth(hp),
|
||||
})
|
||||
}
|
||||
|
||||
Generated
+28
@@ -7419,6 +7419,24 @@ If the schedule is empty, the user will be updated to use the default schedule.|
|
||||
| `count` | integer | false | | |
|
||||
| `workspaces` | array of [codersdk.Workspace](#codersdkworkspace) | false | | |
|
||||
|
||||
## derp.BytesSentRecv
|
||||
|
||||
```json
|
||||
{
|
||||
"key": {},
|
||||
"recv": 0,
|
||||
"sent": 0
|
||||
}
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
| Name | Type | Required | Restrictions | Description |
|
||||
| ------ | -------------------------------- | -------- | ------------ | -------------------------------------------------------------------- |
|
||||
| `key` | [key.NodePublic](#keynodepublic) | false | | Key is the public key of the client which sent/received these bytes. |
|
||||
| `recv` | integer | false | | |
|
||||
| `sent` | integer | false | | |
|
||||
|
||||
## derp.ServerInfoMessage
|
||||
|
||||
```json
|
||||
@@ -8532,6 +8550,16 @@ If the schedule is empty, the user will be updated to use the default schedule.|
|
||||
| `warnings` | array of [health.Message](#healthmessage) | false | | |
|
||||
| `workspace_proxies` | [codersdk.RegionsResponse-codersdk_WorkspaceProxy](#codersdkregionsresponse-codersdk_workspaceproxy) | false | | |
|
||||
|
||||
## key.NodePublic
|
||||
|
||||
```json
|
||||
{}
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
_None_
|
||||
|
||||
## netcheck.Report
|
||||
|
||||
```json
|
||||
|
||||
@@ -13,8 +13,8 @@ git checkout main; git pull; git fetch --all
|
||||
export CODER_IGNORE_MISSING_COMMIT_METADATA=1
|
||||
export BRANCH=main
|
||||
./scripts/release/generate_release_notes.sh \
|
||||
--old-version=v2.7.1 \
|
||||
--new-version=v2.7.2 \
|
||||
--old-version=v2.8.0 \
|
||||
--new-version=v2.9.0 \
|
||||
--ref=$(git rev-parse --short "${ref:-origin/$BRANCH}") \
|
||||
> ./docs/changelogs/v2.7.2.md
|
||||
> ./docs/changelogs/v2.9.0.md
|
||||
```
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 155 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 58 KiB |
@@ -0,0 +1,107 @@
|
||||
## Changelog
|
||||
|
||||
### Features
|
||||
|
||||
- Coder will now autofill parameters in the "create workspace" page based on the previous value (#11731) (@ammario)
|
||||

|
||||
- Change default Terraform version to v1.6 and relax max version constraint (#12027) (@johnstcn)
|
||||
- Show workspace name suggestions below the name field (#12001) (@aslilac)
|
||||
- Add favorite/unfavorite commands (@johnstcn)
|
||||
- Add .zip support for Coder templates (#11839) (#12032) (@mtojek)
|
||||
- Add support for `--wait` and scripts that block login (#10473) (@mafredri)
|
||||
- Users can mark workspaces as favorite to show up at the top of their list (#11875) (#11793) (#11878) (#11791) (@johnstcn)
|
||||

|
||||
- Add Prometheus metrics to servertailnet (#11988) (@coadler)
|
||||
- Add support for concurrent scenarios (#11753) (@mafredri)
|
||||
- Display user avatar on workspace page (#11893) (@BrunoQuaresma)
|
||||
- Do not show popover on update deadline (#11921) (@BrunoQuaresma)
|
||||
- Simplify "Create Template" form by removing advanced settings (#11918) (@BrunoQuaresma)
|
||||
- Show deprecation message on template page (#11996) (@BrunoQuaresma)
|
||||
- Check agent API version on connection (#11696)
|
||||
- Add "updated" search param to workspaces (#11714)
|
||||
- Add option to speedtest to dump a pcap of network traffic (#11848) (@coadler)
|
||||
- Add logging to client tailnet yamux (#11908) (@spikecurtis)
|
||||
- Add customizable upgrade message on client/server version mismatch (#11587) (@sreya)
|
||||
- Add logging to pgcoord subscribe/unsubscribe (#11952) (@spikecurtis)
|
||||
- Add logging to pgPubsub (#11953) (@spikecurtis)
|
||||
- Add logging to agent yamux session (#11912) (@spikecurtis)
|
||||
- Move agent v2 API connection monitoring to yamux layer (#11910) (@spikecurtis)
|
||||
- Add statsReporter for reporting stats on agent v2 API (#11920) (@spikecurtis)
|
||||
- Add metrics to PGPubsub (#11971) (@spikecurtis)
|
||||
- Add custom error message on signups disabled page (#11959) (@mtojek)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Make ServerTailnet set peers lost when it reconnects to the coordinator (#11682)
|
||||
- Properly auto-update workspace on SSH if template policy is set (#11773)
|
||||
- Allow template name length of 32 in template push and create (#11915) (@mafredri)
|
||||
- Alter return signature of convertWorkspace, add check for requesterID (#11796) (@johnstcn)
|
||||
- Fix limit in `GetUserWorkspaceBuildParameters` (#11954) (@mafredri)
|
||||
- Fix test flake in TestHeartbeat (#11808) (@johnstcn)
|
||||
- Do not cache context cancellation errors (#11840) (@johnstcn)
|
||||
- Fix startup script on workspace creation (#11958) (@matifali)
|
||||
- Fix startup script looping (#11972) (@code-asher)
|
||||
- Add ID to default columns in licenses list output (#11823) (@johnstcn)
|
||||
- Handle query canceled error in sendBeat() (#11794) (@johnstcn)
|
||||
- Fix proxy settings link (#11817) (@BrunoQuaresma)
|
||||
- Disable autostart and autostop according to template settings (#11809) (@Kira-Pilot)
|
||||
- Fix capitalized username (#11891) (@BrunoQuaresma)
|
||||
- Fix parameters' request upon template variables update (#11898) (@BrunoQuaresma)
|
||||
- Fix text overflow on batch ws deletion (#11981) (@BrunoQuaresma)
|
||||
- Fix parameter input icon shrink (#11995) (@BrunoQuaresma)
|
||||
- Use TSMP ping for reachability, not latency (#11749)
|
||||
- Display error when fetching OAuth2 provider apps (#11713)
|
||||
- Fix code-server path based forwarding, defer to code-server (#11759)
|
||||
- Use correct logger for lifecycle_executor (#11763)
|
||||
- Disable keepalives in workspaceapps transport (#11789) (@coadler)
|
||||
- Accept agent RPC connection without version query parameter (#11790) (@spikecurtis)
|
||||
- Stop spamming DERP map updates for equivalent maps (#11792) (@spikecurtis)
|
||||
- Check update permission to start workspace (#11798) (@mtojek)
|
||||
- Always attempt external auth refresh when fetching (#11762) (@Emyrk)
|
||||
- Stop running tests that exec sh scripts in parallel (#11834) (@spikecurtis)
|
||||
- Fix type error from theme update (#11844) (@aslilac)
|
||||
- Use new context after t.Parallel in TestOAuthAppSecrets (@spikecurtis)
|
||||
- Always attempt external auth refresh when fetching (#11762) (#11830) (@Emyrk)
|
||||
- Wait for new template version before promoting (#11874) (@spikecurtis)
|
||||
- Respect wait flag on ping (#11896) (@f0ssel)
|
||||
- Fix cliui prompt styling (#11899) (@aslilac)
|
||||
- Fix prevent agent_test.go from failing on error logs (#11909) (@spikecurtis)
|
||||
- Add timeout to listening ports request (#11935) (@coadler)
|
||||
- Only delete expired agents on success (#11940) (@coadler)
|
||||
- Close MultiAgentConn when coordinator closes (#11941) (@spikecurtis)
|
||||
- Strip timezone information from a date in dau response (#11962) (@Emyrk)
|
||||
- Improve click UX and styling for Auth Token page (#11863) (@Parkreiner)
|
||||
- Rewrite url to agent ip in single tailnet (#11810) (@coadler)
|
||||
- Avoid race in TestPGPubsub_Metrics by using Eventually (#11973) (@spikecurtis)
|
||||
- Always return a clean http client for promoauth (#11963) (@Emyrk)
|
||||
- Change build status colors (#11985) (@aslilac)
|
||||
- Only display xray results if vulns > 0 (#11989) (@sreya)
|
||||
- Use dark background in terminal, even when a light theme is selected (#12004) (@aslilac)
|
||||
- Fix graceful disconnect in DialWorkspaceAgent (#11993) (@spikecurtis)
|
||||
- Stop logging error on query canceled (#12017) (@spikecurtis)
|
||||
- Only display quota if it is higher than 0 (#11979) (@BrunoQuaresma)
|
||||
|
||||
### Documentation
|
||||
|
||||
- Using coder modules in offline deployments (#11788) (@matifali)
|
||||
- Simplify JFrog integration docs (#11787) (@matifali)
|
||||
- Add guide for azure federation (#11864) (@ericpaulsen)
|
||||
- Fix example template README 404s and semantics (#11903) (@ericpaulsen)
|
||||
- Update remote docker host docs (#11919) (@matifali)
|
||||
- Add FAQ for gateway reconnects (#12007) (@ericpaulsen)
|
||||
|
||||
### Code refactoring
|
||||
|
||||
- Apply cosmetic changes and remove ExternalAuth from settings page (#11756)
|
||||
- Minor improvements create workspace form (#11771)
|
||||
- Verify external auth before displaying workspace form (#11777) (@BrunoQuaresma)
|
||||
|
||||
Compare: [`v2.7.2...v2.7.3`](https://github.com/coder/coder/compare/v2.7.2...v2.7.3)
|
||||
|
||||
## Container image
|
||||
|
||||
- `docker pull ghcr.io/coder/coder:v2.7.3`
|
||||
|
||||
## Install/upgrade
|
||||
|
||||
Refer to our docs to [install](https://coder.com/docs/v2/latest/install) or [upgrade](https://coder.com/docs/v2/latest/admin/upgrade) Coder, or use a release asset below.
|
||||
@@ -0,0 +1,15 @@
|
||||
## Changelog
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Fixed an issue where workspace apps may never be marked as healthy.
|
||||
|
||||
Compare: [`v2.8.1...v2.8.2`](https://github.com/coder/coder/compare/v2.8.1...v2.8.2)
|
||||
|
||||
## Container image
|
||||
|
||||
- `docker pull ghcr.io/coder/coder:v2.8.2`
|
||||
|
||||
## Install/upgrade
|
||||
|
||||
Refer to our docs to [install](https://coder.com/docs/v2/latest/install) or [upgrade](https://coder.com/docs/v2/latest/admin/upgrade) Coder, or use a release asset below.
|
||||
@@ -0,0 +1,15 @@
|
||||
## Changelog
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Fixed an issue where some workspace apps were inaccessible.
|
||||
|
||||
Compare: [`v2.8.2...v2.8.3`](https://github.com/coder/coder/compare/v2.8.1...v2.8.2)
|
||||
|
||||
## Container image
|
||||
|
||||
- `docker pull ghcr.io/coder/coder:v2.8.3`
|
||||
|
||||
## Install/upgrade
|
||||
|
||||
Refer to our docs to [install](https://coder.com/docs/v2/latest/install) or [upgrade](https://coder.com/docs/v2/latest/admin/upgrade) Coder, or use a release asset below.
|
||||
@@ -0,0 +1,20 @@
|
||||
## Changelog
|
||||
|
||||
All users are recommended to upgrade to a version that patches
|
||||
[GHSA-7cc2-r658-7xpf](https://github.com/coder/coder/security/advisories/GHSA-7cc2-r658-7xpf)
|
||||
as soon as possible if they are using OIDC authentication with the
|
||||
`CODER_OIDC_EMAIL_DOMAIN` setting.
|
||||
|
||||
### Security
|
||||
|
||||
- Fixes [GHSA-7cc2-r658-7xpf](https://github.com/coder/coder/security/advisories/GHSA-7cc2-r658-7xpf)
|
||||
|
||||
Compare: [`v2.8.3...v2.8.4`](https://github.com/coder/coder/compare/v2.8.3...v2.8.4)
|
||||
|
||||
## Container image
|
||||
|
||||
- `docker pull ghcr.io/coder/coder:v2.8.4`
|
||||
|
||||
## Install/upgrade
|
||||
|
||||
Refer to our docs to [install](https://coder.com/docs/v2/latest/install) or [upgrade](https://coder.com/docs/v2/latest/admin/upgrade) Coder, or use a release asset below.
|
||||
@@ -0,0 +1,16 @@
|
||||
## Changelog
|
||||
|
||||
- Fixed an where some template updates caused workspaces to stop after 2 hours, even while in use (#12446)
|
||||
- Add DERP mesh health checking to workspace proxies (#12220) (#12222)
|
||||
- Improve clipboard support on HTTP connections and older browsers (#12178)
|
||||
|
||||
|
||||
Compare: [`v2.8.4...v2.8.5`](https://github.com/coder/coder/compare/v2.8.4...v2.8.5)
|
||||
|
||||
## Container image
|
||||
|
||||
- `docker pull ghcr.io/coder/coder:v2.8.4`
|
||||
|
||||
## Install/upgrade
|
||||
|
||||
Refer to our docs to [install](https://coder.com/docs/v2/latest/install) or [upgrade](https://coder.com/docs/v2/latest/admin/upgrade) Coder, or use a release asset below.
|
||||
@@ -244,7 +244,7 @@ func (r *RootCmd) proxyServer() *clibase.Cmd {
|
||||
closers.Add(closeFunc)
|
||||
}
|
||||
|
||||
proxy, err := wsproxy.New(ctx, &wsproxy.Options{
|
||||
options := &wsproxy.Options{
|
||||
Logger: logger,
|
||||
Experiments: coderd.ReadExperiments(logger, cfg.Experiments.Value()),
|
||||
HTTPClient: httpClient,
|
||||
@@ -263,7 +263,12 @@ func (r *RootCmd) proxyServer() *clibase.Cmd {
|
||||
DERPEnabled: cfg.DERP.Server.Enable.Value(),
|
||||
DERPOnly: derpOnly.Value(),
|
||||
DERPServerRelayAddress: cfg.DERP.Server.RelayURL.String(),
|
||||
})
|
||||
}
|
||||
if httpServers.TLSConfig != nil {
|
||||
options.TLSCertificates = httpServers.TLSConfig.Certificates
|
||||
}
|
||||
|
||||
proxy, err := wsproxy.New(ctx, options)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create workspace proxy: %w", err)
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@ package coderd
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -365,27 +363,9 @@ func New(ctx context.Context, options *Options) (_ *API, err error) {
|
||||
})
|
||||
}
|
||||
|
||||
meshRootCA := x509.NewCertPool()
|
||||
for _, certificate := range options.TLSCertificates {
|
||||
for _, certificatePart := range certificate.Certificate {
|
||||
certificate, err := x509.ParseCertificate(certificatePart)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse certificate %s: %w", certificate.Subject.CommonName, err)
|
||||
}
|
||||
meshRootCA.AddCert(certificate)
|
||||
}
|
||||
}
|
||||
// This TLS configuration spoofs access from the access URL hostname
|
||||
// assuming that the certificates provided will cover that hostname.
|
||||
//
|
||||
// Replica sync and DERP meshing require accessing replicas via their
|
||||
// internal IP addresses, and if TLS is configured we use the same
|
||||
// certificates.
|
||||
meshTLSConfig := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Certificates: options.TLSCertificates,
|
||||
RootCAs: meshRootCA,
|
||||
ServerName: options.AccessURL.Hostname(),
|
||||
meshTLSConfig, err := replicasync.CreateDERPMeshTLSConfig(options.AccessURL.Hostname(), options.TLSCertificates)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("create DERP mesh TLS config: %w", err)
|
||||
}
|
||||
api.replicaManager, err = replicasync.New(ctx, options.Logger, options.Database, options.Pubsub, &replicasync.Options{
|
||||
ID: api.AGPL.ID,
|
||||
|
||||
@@ -38,15 +38,32 @@ type ProxyOptions struct {
|
||||
// ProxyURL is optional
|
||||
ProxyURL *url.URL
|
||||
|
||||
// Token is optional. If specified, a new workspace proxy region will not be
|
||||
// created, and the proxy will become a replica of the existing proxy
|
||||
// region.
|
||||
Token string
|
||||
|
||||
// ReplicaPingCallback is optional.
|
||||
ReplicaPingCallback func(replicas []codersdk.Replica, err string)
|
||||
|
||||
// FlushStats is optional
|
||||
FlushStats chan chan<- struct{}
|
||||
}
|
||||
|
||||
// NewWorkspaceProxy will configure a wsproxy.Server with the given options.
|
||||
// The new wsproxy will register itself with the given coderd.API instance.
|
||||
// The first user owner client is required to create the wsproxy on the coderd
|
||||
// api server.
|
||||
func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Client, options *ProxyOptions) *wsproxy.Server {
|
||||
type WorkspaceProxy struct {
|
||||
*wsproxy.Server
|
||||
|
||||
ServerURL *url.URL
|
||||
}
|
||||
|
||||
// NewWorkspaceProxyReplica will configure a wsproxy.Server with the given
|
||||
// options. The new wsproxy replica will register itself with the given
|
||||
// coderd.API instance.
|
||||
//
|
||||
// If a token is not provided, a new workspace proxy region is created using the
|
||||
// owner client. If a token is provided, the proxy will become a replica of the
|
||||
// existing proxy region.
|
||||
func NewWorkspaceProxyReplica(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Client, options *ProxyOptions) WorkspaceProxy {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancelFunc)
|
||||
|
||||
@@ -107,11 +124,15 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie
|
||||
options.Name = namesgenerator.GetRandomName(1)
|
||||
}
|
||||
|
||||
proxyRes, err := owner.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{
|
||||
Name: options.Name,
|
||||
Icon: "/emojis/flag.png",
|
||||
})
|
||||
require.NoError(t, err, "failed to create workspace proxy")
|
||||
token := options.Token
|
||||
if token == "" {
|
||||
proxyRes, err := owner.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{
|
||||
Name: options.Name,
|
||||
Icon: "/emojis/flag.png",
|
||||
})
|
||||
require.NoError(t, err, "failed to create workspace proxy")
|
||||
token = proxyRes.ProxyToken
|
||||
}
|
||||
|
||||
// Inherit collector options from coderd, but keep the wsproxy reporter.
|
||||
statsCollectorOptions := coderdAPI.Options.WorkspaceAppsStatsCollectorOptions
|
||||
@@ -121,7 +142,7 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie
|
||||
}
|
||||
|
||||
wssrv, err := wsproxy.New(ctx, &wsproxy.Options{
|
||||
Logger: slogtest.Make(t, nil).Leveled(slog.LevelDebug),
|
||||
Logger: slogtest.Make(t, nil).Leveled(slog.LevelDebug).With(slog.F("server_url", serverURL.String())),
|
||||
Experiments: options.Experiments,
|
||||
DashboardURL: coderdAPI.AccessURL,
|
||||
AccessURL: accessURL,
|
||||
@@ -131,14 +152,15 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie
|
||||
Tracing: coderdAPI.TracerProvider,
|
||||
APIRateLimit: coderdAPI.APIRateLimit,
|
||||
SecureAuthCookie: coderdAPI.SecureAuthCookie,
|
||||
ProxySessionToken: proxyRes.ProxyToken,
|
||||
ProxySessionToken: token,
|
||||
DisablePathApps: options.DisablePathApps,
|
||||
// We need a new registry to not conflict with the coderd internal
|
||||
// proxy metrics.
|
||||
PrometheusRegistry: prometheus.NewRegistry(),
|
||||
DERPEnabled: !options.DerpDisabled,
|
||||
DERPOnly: options.DerpOnly,
|
||||
DERPServerRelayAddress: accessURL.String(),
|
||||
DERPServerRelayAddress: serverURL.String(),
|
||||
ReplicaErrCallback: options.ReplicaPingCallback,
|
||||
StatsCollectorOptions: statsCollectorOptions,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -151,5 +173,8 @@ func NewWorkspaceProxy(t *testing.T, coderdAPI *coderd.API, owner *codersdk.Clie
|
||||
handler = wssrv.Handler
|
||||
mutex.Unlock()
|
||||
|
||||
return wssrv
|
||||
return WorkspaceProxy{
|
||||
Server: wssrv,
|
||||
ServerURL: serverURL,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,15 +274,24 @@ func (s *EnterpriseTemplateScheduleStore) updateWorkspaceBuild(ctx context.Conte
|
||||
}
|
||||
|
||||
// If max deadline is before now()+2h, then set it to that.
|
||||
// This is intended to give ample warning to this workspace about an upcoming auto-stop.
|
||||
// If we were to omit this "grace" period, then this workspace could be set to be stopped "now".
|
||||
// The "2 hours" was an arbitrary decision for this window.
|
||||
now := s.now()
|
||||
if autostop.MaxDeadline.Before(now.Add(2 * time.Hour)) {
|
||||
if !autostop.MaxDeadline.IsZero() && autostop.MaxDeadline.Before(now.Add(2*time.Hour)) {
|
||||
autostop.MaxDeadline = now.Add(time.Hour * 2)
|
||||
}
|
||||
|
||||
// If the current deadline on the build is after the new max_deadline, then
|
||||
// set it to the max_deadline.
|
||||
autostop.Deadline = build.Deadline
|
||||
if autostop.Deadline.After(autostop.MaxDeadline) {
|
||||
if !autostop.MaxDeadline.IsZero() && autostop.Deadline.After(autostop.MaxDeadline) {
|
||||
autostop.Deadline = autostop.MaxDeadline
|
||||
}
|
||||
|
||||
// If there's a max_deadline but the deadline is 0, then set the deadline to
|
||||
// the max_deadline.
|
||||
if !autostop.MaxDeadline.IsZero() && autostop.Deadline.IsZero() {
|
||||
autostop.Deadline = autostop.MaxDeadline
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
agplschedule "github.com/coder/coder/v2/coderd/schedule"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
"github.com/coder/coder/v2/enterprise/coderd/schedule"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
@@ -27,30 +28,35 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
|
||||
var (
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
user = dbgen.User(t, db, database.User{})
|
||||
org = dbgen.Organization(t, db, database.Organization{})
|
||||
quietUser = dbgen.User(t, db, database.User{
|
||||
Username: "quiet",
|
||||
})
|
||||
noQuietUser = dbgen.User(t, db, database.User{
|
||||
Username: "no-quiet",
|
||||
})
|
||||
file = dbgen.File(t, db, database.File{
|
||||
CreatedBy: user.ID,
|
||||
CreatedBy: quietUser.ID,
|
||||
})
|
||||
templateJob = dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
|
||||
OrganizationID: org.ID,
|
||||
FileID: file.ID,
|
||||
InitiatorID: user.ID,
|
||||
InitiatorID: quietUser.ID,
|
||||
Tags: database.StringMap{
|
||||
"foo": "bar",
|
||||
},
|
||||
})
|
||||
templateVersion = dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
CreatedBy: quietUser.ID,
|
||||
JobID: templateJob.ID,
|
||||
})
|
||||
)
|
||||
|
||||
const userQuietHoursSchedule = "CRON_TZ=UTC 0 0 * * *" // midnight UTC
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
user, err := db.UpdateUserQuietHoursSchedule(ctx, database.UpdateUserQuietHoursScheduleParams{
|
||||
ID: user.ID,
|
||||
quietUser, err := db.UpdateUserQuietHoursSchedule(ctx, database.UpdateUserQuietHoursScheduleParams{
|
||||
ID: quietUser.ID,
|
||||
QuietHoursSchedule: userQuietHoursSchedule,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -62,12 +68,15 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
|
||||
// Workspace old max_deadline too soon
|
||||
cases := []struct {
|
||||
name string
|
||||
now time.Time
|
||||
deadline time.Time
|
||||
maxDeadline time.Time
|
||||
newDeadline time.Time // 0 for no change
|
||||
name string
|
||||
now time.Time
|
||||
deadline time.Time
|
||||
maxDeadline time.Time
|
||||
// Set to nil for no change.
|
||||
newDeadline *time.Time
|
||||
newMaxDeadline time.Time
|
||||
noQuietHours bool
|
||||
autostopReq *agplschedule.TemplateAutostopRequirement
|
||||
}{
|
||||
{
|
||||
name: "SkippedWorkspaceMaxDeadlineTooSoon",
|
||||
@@ -75,7 +84,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
deadline: buildTime,
|
||||
maxDeadline: buildTime.Add(1 * time.Hour),
|
||||
// Unchanged since the max deadline is too soon.
|
||||
newDeadline: time.Time{},
|
||||
newDeadline: nil,
|
||||
newMaxDeadline: buildTime.Add(1 * time.Hour),
|
||||
},
|
||||
{
|
||||
@@ -85,7 +94,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
deadline: buildTime,
|
||||
// Far into the future...
|
||||
maxDeadline: nextQuietHours.Add(24 * time.Hour),
|
||||
newDeadline: time.Time{},
|
||||
newDeadline: nil,
|
||||
// We will use now() + 2 hours if the newly calculated max deadline
|
||||
// from the workspace build time is before now.
|
||||
newMaxDeadline: nextQuietHours.Add(8 * time.Hour),
|
||||
@@ -97,7 +106,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
deadline: buildTime,
|
||||
// Far into the future...
|
||||
maxDeadline: nextQuietHours.Add(24 * time.Hour),
|
||||
newDeadline: time.Time{},
|
||||
newDeadline: nil,
|
||||
// We will use now() + 2 hours if the newly calculated max deadline
|
||||
// from the workspace build time is within the next 2 hours.
|
||||
newMaxDeadline: nextQuietHours.Add(1 * time.Hour),
|
||||
@@ -109,7 +118,7 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
deadline: buildTime,
|
||||
// Far into the future...
|
||||
maxDeadline: nextQuietHours.Add(24 * time.Hour),
|
||||
newDeadline: time.Time{},
|
||||
newDeadline: nil,
|
||||
newMaxDeadline: nextQuietHours,
|
||||
},
|
||||
{
|
||||
@@ -120,7 +129,56 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
deadline: nextQuietHours.Add(24 * time.Hour),
|
||||
maxDeadline: nextQuietHours.Add(24 * time.Hour),
|
||||
// The deadline should match since it is after the new max deadline.
|
||||
newDeadline: nextQuietHours,
|
||||
newDeadline: ptr.Ref(nextQuietHours),
|
||||
newMaxDeadline: nextQuietHours,
|
||||
},
|
||||
{
|
||||
// There was a bug if a user has no quiet hours set, and autostop
|
||||
// req is not turned on, then the max deadline is set to `time.Time{}`.
|
||||
// This zero value was "in the past", so the workspace deadline would
|
||||
// be set to "now" + 2 hours.
|
||||
// This is a mistake because the max deadline being zero means
|
||||
// there is no max deadline.
|
||||
name: "MaxDeadlineShouldBeUnset",
|
||||
now: buildTime,
|
||||
deadline: buildTime.Add(time.Hour * 8),
|
||||
maxDeadline: time.Time{}, // No max set
|
||||
// Should be unchanged
|
||||
newDeadline: ptr.Ref(buildTime.Add(time.Hour * 8)),
|
||||
newMaxDeadline: time.Time{},
|
||||
noQuietHours: true,
|
||||
autostopReq: &agplschedule.TemplateAutostopRequirement{
|
||||
DaysOfWeek: 0,
|
||||
Weeks: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
// A bug existed where MaxDeadline could be set, but deadline was
|
||||
// `time.Time{}`. This is a logical inconsistency because the "max"
|
||||
// deadline was ignored.
|
||||
name: "NoDeadline",
|
||||
now: buildTime,
|
||||
deadline: time.Time{},
|
||||
maxDeadline: time.Time{}, // No max set
|
||||
// Should be unchanged
|
||||
newDeadline: ptr.Ref(time.Time{}),
|
||||
newMaxDeadline: time.Time{},
|
||||
noQuietHours: true,
|
||||
autostopReq: &agplschedule.TemplateAutostopRequirement{
|
||||
DaysOfWeek: 0,
|
||||
Weeks: 0,
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
// Similar to 'NoDeadline' test. This has a MaxDeadline set, so
|
||||
// the deadline of the workspace should now be set.
|
||||
name: "WorkspaceDeadlineNowSet",
|
||||
now: nextQuietHours.Add(-6 * time.Hour),
|
||||
// Start with unset times
|
||||
deadline: time.Time{},
|
||||
maxDeadline: time.Time{},
|
||||
newDeadline: ptr.Ref(nextQuietHours),
|
||||
newMaxDeadline: nextQuietHours,
|
||||
},
|
||||
}
|
||||
@@ -131,6 +189,11 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
user := quietUser
|
||||
if c.noQuietHours {
|
||||
user = noQuietUser
|
||||
}
|
||||
|
||||
t.Log("buildTime", buildTime)
|
||||
t.Log("nextQuietHours", nextQuietHours)
|
||||
t.Log("now", c.now)
|
||||
@@ -217,17 +280,22 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
templateScheduleStore.TimeNowFn = func() time.Time {
|
||||
return c.now
|
||||
}
|
||||
|
||||
autostopReq := agplschedule.TemplateAutostopRequirement{
|
||||
// Every day
|
||||
DaysOfWeek: 0b01111111,
|
||||
Weeks: 0,
|
||||
}
|
||||
if c.autostopReq != nil {
|
||||
autostopReq = *c.autostopReq
|
||||
}
|
||||
_, err = templateScheduleStore.Set(ctx, db, template, agplschedule.TemplateScheduleOptions{
|
||||
UserAutostartEnabled: false,
|
||||
UserAutostopEnabled: false,
|
||||
DefaultTTL: 0,
|
||||
MaxTTL: 0,
|
||||
UseMaxTTL: false,
|
||||
AutostopRequirement: agplschedule.TemplateAutostopRequirement{
|
||||
// Every day
|
||||
DaysOfWeek: 0b01111111,
|
||||
Weeks: 0,
|
||||
},
|
||||
UserAutostartEnabled: false,
|
||||
UserAutostopEnabled: false,
|
||||
DefaultTTL: 0,
|
||||
MaxTTL: 0,
|
||||
UseMaxTTL: false,
|
||||
AutostopRequirement: autostopReq,
|
||||
FailureTTL: 0,
|
||||
TimeTilDormant: 0,
|
||||
TimeTilDormantAutoDelete: 0,
|
||||
@@ -238,10 +306,10 @@ func TestTemplateUpdateBuildDeadlines(t *testing.T) {
|
||||
newBuild, err := db.GetWorkspaceBuildByID(ctx, wsBuild.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
if c.newDeadline.IsZero() {
|
||||
c.newDeadline = wsBuild.Deadline
|
||||
if c.newDeadline == nil {
|
||||
c.newDeadline = &wsBuild.Deadline
|
||||
}
|
||||
require.WithinDuration(t, c.newDeadline, newBuild.Deadline, time.Second)
|
||||
require.WithinDuration(t, *c.newDeadline, newBuild.Deadline, time.Second)
|
||||
require.WithinDuration(t, c.newMaxDeadline, newBuild.MaxDeadline, time.Second)
|
||||
|
||||
// Check that the new build has the same state as before.
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestRegions(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
const proxyName = "hello"
|
||||
_ = coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{
|
||||
_ = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: proxyName,
|
||||
AppHostname: appHostname + ".proxy",
|
||||
})
|
||||
@@ -734,7 +734,7 @@ func TestReconnectingPTYSignedToken(t *testing.T) {
|
||||
proxyURL, err := url.Parse(fmt.Sprintf("https://%s.com", namesgenerator.GetRandomName(1)))
|
||||
require.NoError(t, err)
|
||||
|
||||
_ = coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{
|
||||
_ = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: namesgenerator.GetRandomName(1),
|
||||
ProxyURL: proxyURL,
|
||||
AppHostname: "*.sub.example.com",
|
||||
|
||||
@@ -3,6 +3,7 @@ package replicasync
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -265,45 +266,35 @@ func (m *Manager) syncReplicas(ctx context.Context) error {
|
||||
},
|
||||
}
|
||||
defer client.CloseIdleConnections()
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
failed := make([]string, 0)
|
||||
for _, peer := range m.Regional() {
|
||||
wg.Add(1)
|
||||
|
||||
peers := m.Regional()
|
||||
errs := make(chan error, len(peers))
|
||||
for _, peer := range peers {
|
||||
go func(peer database.Replica) {
|
||||
defer wg.Done()
|
||||
ra, err := url.Parse(peer.RelayAddress)
|
||||
err := PingPeerReplica(ctx, client, peer.RelayAddress)
|
||||
if err != nil {
|
||||
m.logger.Warn(ctx, "could not parse relay address",
|
||||
slog.F("relay_address", peer.RelayAddress), slog.Error(err))
|
||||
errs <- xerrors.Errorf("ping sibling replica %s (%s): %w", peer.Hostname, peer.RelayAddress, err)
|
||||
m.logger.Warn(ctx, "failed to ping sibling replica, this could happen if the replica has shutdown",
|
||||
slog.F("replica_hostname", peer.Hostname),
|
||||
slog.F("replica_relay_address", peer.RelayAddress),
|
||||
slog.Error(err),
|
||||
)
|
||||
return
|
||||
}
|
||||
target, err := ra.Parse("/derp/latency-check")
|
||||
if err != nil {
|
||||
m.logger.Warn(ctx, "could not resolve /derp/latency-check endpoint",
|
||||
slog.F("relay_address", peer.RelayAddress), slog.Error(err))
|
||||
return
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, target.String(), nil)
|
||||
if err != nil {
|
||||
m.logger.Warn(ctx, "create http request for relay probe",
|
||||
slog.F("relay_address", peer.RelayAddress), slog.Error(err))
|
||||
return
|
||||
}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
failed = append(failed, fmt.Sprintf("relay %s (%s): %s", peer.Hostname, peer.RelayAddress, err))
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
errs <- nil
|
||||
}(peer)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
replicaErrs := make([]string, 0, len(peers))
|
||||
for i := 0; i < len(peers); i++ {
|
||||
err := <-errs
|
||||
if err != nil {
|
||||
replicaErrs = append(replicaErrs, err.Error())
|
||||
}
|
||||
}
|
||||
replicaError := ""
|
||||
if len(failed) > 0 {
|
||||
replicaError = fmt.Sprintf("Failed to dial peers: %s", strings.Join(failed, ", "))
|
||||
if len(replicaErrs) > 0 {
|
||||
replicaError = fmt.Sprintf("Failed to dial peers: %s", strings.Join(replicaErrs, ", "))
|
||||
}
|
||||
|
||||
databaseLatency, err := m.db.Ping(ctx)
|
||||
@@ -363,6 +354,32 @@ func (m *Manager) syncReplicas(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PingPeerReplica pings a peer replica over it's internal relay address to
|
||||
// ensure it's reachable and alive for health purposes.
|
||||
func PingPeerReplica(ctx context.Context, client http.Client, relayAddress string) error {
|
||||
ra, err := url.Parse(relayAddress)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse relay address %q: %w", relayAddress, err)
|
||||
}
|
||||
target, err := ra.Parse("/derp/latency-check")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse latency-check URL: %w", err)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, target.String(), nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create request: %w", err)
|
||||
}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("do probe: %w", err)
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return xerrors.Errorf("unexpected status code: %d", res.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Self represents the current replica.
|
||||
func (m *Manager) Self() database.Replica {
|
||||
m.mutex.Lock()
|
||||
@@ -466,3 +483,29 @@ func (m *Manager) Close() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateDERPMeshTLSConfig creates a TLS configuration for connecting to peers
|
||||
// in the DERP mesh over private networking. It overrides the ServerName to be
|
||||
// the expected public hostname of the peer, and trusts all of the TLS server
|
||||
// certificates used by this replica (as we expect all replicas to use the same
|
||||
// TLS certificates).
|
||||
func CreateDERPMeshTLSConfig(hostname string, tlsCertificates []tls.Certificate) (*tls.Config, error) {
|
||||
meshRootCA := x509.NewCertPool()
|
||||
for _, certificate := range tlsCertificates {
|
||||
for _, certificatePart := range certificate.Certificate {
|
||||
parsedCert, err := x509.ParseCertificate(certificatePart)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse certificate %s: %w", parsedCert.Subject.CommonName, err)
|
||||
}
|
||||
meshRootCA.AddCert(parsedCert)
|
||||
}
|
||||
}
|
||||
|
||||
// This TLS configuration trusts the built-in TLS certificates and forces
|
||||
// the server name to be the public hostname.
|
||||
return &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
RootCAs: meshRootCA,
|
||||
ServerName: hostname,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -286,7 +286,7 @@ func (d *derpyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
d.Add(1)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusUpgradeRequired)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (d *derpyHandler) requireOnlyDERPPaths(t *testing.T) {
|
||||
|
||||
+118
-45
@@ -3,14 +3,15 @@ package wsproxy
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/sync/singleflight"
|
||||
"golang.org/x/xerrors"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/derp/derphttp"
|
||||
@@ -35,6 +37,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/workspaceapps"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/enterprise/derpmesh"
|
||||
"github.com/coder/coder/v2/enterprise/replicasync"
|
||||
"github.com/coder/coder/v2/enterprise/wsproxy/wsproxysdk"
|
||||
"github.com/coder/coder/v2/site"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
@@ -76,6 +79,10 @@ type Options struct {
|
||||
// provide access to workspace apps/terminal.
|
||||
DERPOnly bool
|
||||
|
||||
// ReplicaErrCallback is called when the proxy replica successfully or
|
||||
// unsuccessfully pings its peers in the mesh.
|
||||
ReplicaErrCallback func(replicas []codersdk.Replica, err string)
|
||||
|
||||
ProxySessionToken string
|
||||
// AllowAllCors will set all CORs headers to '*'.
|
||||
// By default, CORs is set to accept external requests
|
||||
@@ -121,14 +128,18 @@ type Server struct {
|
||||
SDKClient *wsproxysdk.Client
|
||||
|
||||
// DERP
|
||||
derpMesh *derpmesh.Mesh
|
||||
latestDERPMap atomic.Pointer[tailcfg.DERPMap]
|
||||
derpMesh *derpmesh.Mesh
|
||||
derpMeshTLSConfig *tls.Config
|
||||
replicaPingSingleflight singleflight.Group
|
||||
replicaErrMut sync.Mutex
|
||||
replicaErr string
|
||||
latestDERPMap atomic.Pointer[tailcfg.DERPMap]
|
||||
|
||||
// Used for graceful shutdown. Required for the dialer.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
derpCloseFunc func()
|
||||
registerDone <-chan struct{}
|
||||
registerLoop *wsproxysdk.RegisterWorkspaceProxyLoop
|
||||
}
|
||||
|
||||
// New creates a new workspace proxy server. This requires a primary coderd
|
||||
@@ -166,29 +177,10 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
|
||||
return nil, xerrors.Errorf("%q is a workspace proxy, not a primary coderd instance", opts.DashboardURL)
|
||||
}
|
||||
|
||||
meshRootCA := x509.NewCertPool()
|
||||
for _, certificate := range opts.TLSCertificates {
|
||||
for _, certificatePart := range certificate.Certificate {
|
||||
certificate, err := x509.ParseCertificate(certificatePart)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse certificate %s: %w", certificate.Subject.CommonName, err)
|
||||
}
|
||||
meshRootCA.AddCert(certificate)
|
||||
}
|
||||
meshTLSConfig, err := replicasync.CreateDERPMeshTLSConfig(opts.AccessURL.Hostname(), opts.TLSCertificates)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("create DERP mesh tls config: %w", err)
|
||||
}
|
||||
// This TLS configuration spoofs access from the access URL hostname
|
||||
// assuming that the certificates provided will cover that hostname.
|
||||
//
|
||||
// Replica sync and DERP meshing require accessing replicas via their
|
||||
// internal IP addresses, and if TLS is configured we use the same
|
||||
// certificates.
|
||||
meshTLSConfig := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Certificates: opts.TLSCertificates,
|
||||
RootCAs: meshRootCA,
|
||||
ServerName: opts.AccessURL.Hostname(),
|
||||
}
|
||||
|
||||
derpServer := derp.NewServer(key.NewNode(), tailnet.Logger(opts.Logger.Named("net.derp")))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -202,23 +194,22 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
|
||||
PrometheusRegistry: opts.PrometheusRegistry,
|
||||
SDKClient: client,
|
||||
derpMesh: derpmesh.New(opts.Logger.Named("net.derpmesh"), derpServer, meshTLSConfig),
|
||||
derpMeshTLSConfig: meshTLSConfig,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
// Register the workspace proxy with the primary coderd instance and start a
|
||||
// goroutine to periodically re-register.
|
||||
replicaID := uuid.New()
|
||||
osHostname := cliutil.Hostname()
|
||||
regResp, registerDone, err := client.RegisterWorkspaceProxyLoop(ctx, wsproxysdk.RegisterWorkspaceProxyLoopOpts{
|
||||
registerLoop, regResp, err := client.RegisterWorkspaceProxyLoop(ctx, wsproxysdk.RegisterWorkspaceProxyLoopOpts{
|
||||
Logger: opts.Logger,
|
||||
Request: wsproxysdk.RegisterWorkspaceProxyRequest{
|
||||
AccessURL: opts.AccessURL.String(),
|
||||
WildcardHostname: opts.AppHostname,
|
||||
DerpEnabled: opts.DERPEnabled,
|
||||
DerpOnly: opts.DERPOnly,
|
||||
ReplicaID: replicaID,
|
||||
ReplicaHostname: osHostname,
|
||||
ReplicaID: uuid.New(),
|
||||
ReplicaHostname: cliutil.Hostname(),
|
||||
ReplicaError: "",
|
||||
ReplicaRelayAddress: opts.DERPServerRelayAddress,
|
||||
Version: buildinfo.Version(),
|
||||
@@ -230,12 +221,13 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("register proxy: %w", err)
|
||||
}
|
||||
s.registerDone = registerDone
|
||||
err = s.handleRegister(ctx, regResp)
|
||||
s.registerLoop = registerLoop
|
||||
|
||||
derpServer.SetMeshKey(regResp.DERPMeshKey)
|
||||
err = s.handleRegister(regResp)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("handle register: %w", err)
|
||||
}
|
||||
derpServer.SetMeshKey(regResp.DERPMeshKey)
|
||||
|
||||
secKey, err := workspaceapps.KeyFromString(regResp.AppSecurityKey)
|
||||
if err != nil {
|
||||
@@ -409,16 +401,16 @@ func New(ctx context.Context, opts *Options) (*Server, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) RegisterNow() error {
|
||||
_, err := s.registerLoop.RegisterNow()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) Close() error {
|
||||
s.cancel()
|
||||
|
||||
var err error
|
||||
registerDoneWaitTicker := time.NewTicker(11 * time.Second) // the attempt timeout is 10s
|
||||
select {
|
||||
case <-registerDoneWaitTicker.C:
|
||||
err = multierror.Append(err, xerrors.New("timed out waiting for registerDone"))
|
||||
case <-s.registerDone:
|
||||
}
|
||||
s.registerLoop.Close()
|
||||
s.derpCloseFunc()
|
||||
appServerErr := s.AppServer.Close()
|
||||
if appServerErr != nil {
|
||||
@@ -432,23 +424,98 @@ func (s *Server) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (*Server) mutateRegister(_ *wsproxysdk.RegisterWorkspaceProxyRequest) {
|
||||
// TODO: we should probably ping replicas similarly to the replicasync
|
||||
// package in the primary and update req.ReplicaError accordingly.
|
||||
func (s *Server) mutateRegister(req *wsproxysdk.RegisterWorkspaceProxyRequest) {
|
||||
s.replicaErrMut.Lock()
|
||||
defer s.replicaErrMut.Unlock()
|
||||
req.ReplicaError = s.replicaErr
|
||||
}
|
||||
|
||||
func (s *Server) handleRegister(_ context.Context, res wsproxysdk.RegisterWorkspaceProxyResponse) error {
|
||||
func (s *Server) handleRegister(res wsproxysdk.RegisterWorkspaceProxyResponse) error {
|
||||
addresses := make([]string, len(res.SiblingReplicas))
|
||||
for i, replica := range res.SiblingReplicas {
|
||||
addresses[i] = replica.RelayAddress
|
||||
}
|
||||
s.Logger.Debug(s.ctx, "setting DERP mesh sibling addresses", slog.F("addresses", addresses))
|
||||
s.derpMesh.SetAddresses(addresses, false)
|
||||
|
||||
s.latestDERPMap.Store(res.DERPMap)
|
||||
|
||||
go s.pingSiblingReplicas(res.SiblingReplicas)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) pingSiblingReplicas(replicas []codersdk.Replica) {
|
||||
if len(replicas) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid pinging multiple times at once if the list hasn't changed.
|
||||
relayURLs := make([]string, len(replicas))
|
||||
for i, r := range replicas {
|
||||
relayURLs[i] = r.RelayAddress
|
||||
}
|
||||
slices.Sort(relayURLs)
|
||||
singleflightStr := strings.Join(relayURLs, " ") // URLs can't contain spaces.
|
||||
|
||||
//nolint:dogsled
|
||||
_, _, _ = s.replicaPingSingleflight.Do(singleflightStr, func() (any, error) {
|
||||
const (
|
||||
perReplicaTimeout = 3 * time.Second
|
||||
fullTimeout = 10 * time.Second
|
||||
)
|
||||
ctx, cancel := context.WithTimeout(s.ctx, fullTimeout)
|
||||
defer cancel()
|
||||
|
||||
client := http.Client{
|
||||
Timeout: perReplicaTimeout,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: s.derpMeshTLSConfig,
|
||||
DisableKeepAlives: true,
|
||||
},
|
||||
}
|
||||
defer client.CloseIdleConnections()
|
||||
|
||||
errs := make(chan error, len(replicas))
|
||||
for _, peer := range replicas {
|
||||
go func(peer codersdk.Replica) {
|
||||
err := replicasync.PingPeerReplica(ctx, client, peer.RelayAddress)
|
||||
if err != nil {
|
||||
errs <- xerrors.Errorf("ping sibling replica %s (%s): %w", peer.Hostname, peer.RelayAddress, err)
|
||||
s.Logger.Warn(ctx, "failed to ping sibling replica, this could happen if the replica has shutdown",
|
||||
slog.F("replica_hostname", peer.Hostname),
|
||||
slog.F("replica_relay_address", peer.RelayAddress),
|
||||
slog.Error(err),
|
||||
)
|
||||
return
|
||||
}
|
||||
errs <- nil
|
||||
}(peer)
|
||||
}
|
||||
|
||||
replicaErrs := make([]string, 0, len(replicas))
|
||||
for i := 0; i < len(replicas); i++ {
|
||||
err := <-errs
|
||||
if err != nil {
|
||||
replicaErrs = append(replicaErrs, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
s.replicaErrMut.Lock()
|
||||
defer s.replicaErrMut.Unlock()
|
||||
s.replicaErr = ""
|
||||
if len(replicaErrs) > 0 {
|
||||
s.replicaErr = fmt.Sprintf("Failed to dial peers: %s", strings.Join(replicaErrs, ", "))
|
||||
}
|
||||
if s.Options.ReplicaErrCallback != nil {
|
||||
s.Options.ReplicaErrCallback(replicas, s.replicaErr)
|
||||
}
|
||||
|
||||
//nolint:nilnil // we don't actually use the return value of the
|
||||
// singleflight here
|
||||
return nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) handleRegisterFailure(err error) {
|
||||
if s.ctx.Err() != nil {
|
||||
return
|
||||
@@ -517,8 +584,14 @@ func (s *Server) healthReport(rw http.ResponseWriter, r *http.Request) {
|
||||
fmt.Sprintf("version mismatch: primary coderd (%s) != workspace proxy (%s)", primaryBuild.Version, buildinfo.Version()))
|
||||
}
|
||||
|
||||
s.replicaErrMut.Lock()
|
||||
if s.replicaErr != "" {
|
||||
report.Errors = append(report.Errors, "High availability networking: it appears you are running more than one replica of the proxy, but the replicas are unable to establish a mesh for networking: "+s.replicaErr)
|
||||
}
|
||||
s.replicaErrMut.Unlock()
|
||||
|
||||
// TODO: We should hit the deployment config endpoint and do some config
|
||||
// checks. We can check the version from the X-CODER-BUILD-VERSION header
|
||||
// checks.
|
||||
|
||||
httpapi.Write(r.Context(), rw, http.StatusOK, report)
|
||||
}
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
package wsproxy_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/derp/derphttp"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
@@ -16,14 +23,17 @@ import (
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/healthcheck/derphealth"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/workspaceapps/apptest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/cryptorand"
|
||||
"github.com/coder/coder/v2/enterprise/coderd/coderdenttest"
|
||||
"github.com/coder/coder/v2/enterprise/coderd/license"
|
||||
"github.com/coder/coder/v2/enterprise/wsproxy/wsproxysdk"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
@@ -62,7 +72,7 @@ func TestDERPOnly(t *testing.T) {
|
||||
})
|
||||
|
||||
// Create an external proxy.
|
||||
_ = coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{
|
||||
_ = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "best-proxy",
|
||||
DerpOnly: true,
|
||||
})
|
||||
@@ -109,35 +119,34 @@ func TestDERP(t *testing.T) {
|
||||
})
|
||||
|
||||
// Create two running external proxies.
|
||||
proxyAPI1 := coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{
|
||||
proxyAPI1 := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "best-proxy",
|
||||
})
|
||||
proxyAPI2 := coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{
|
||||
proxyAPI2 := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "worst-proxy",
|
||||
})
|
||||
|
||||
// Create a running external proxy with DERP disabled.
|
||||
proxyAPI3 := coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{
|
||||
proxyAPI3 := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "no-derp-proxy",
|
||||
DerpDisabled: true,
|
||||
})
|
||||
|
||||
// Create a proxy that is never started.
|
||||
createProxyCtx := testutil.Context(t, testutil.WaitLong)
|
||||
_, err := client.CreateWorkspaceProxy(createProxyCtx, codersdk.CreateWorkspaceProxyRequest{
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
_, err := client.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{
|
||||
Name: "never-started-proxy",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for both running proxies to become healthy.
|
||||
require.Eventually(t, func() bool {
|
||||
healthCtx := testutil.Context(t, testutil.WaitLong)
|
||||
err := api.ProxyHealth.ForceUpdate(healthCtx)
|
||||
err := api.ProxyHealth.ForceUpdate(ctx)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
|
||||
regions, err := client.Regions(healthCtx)
|
||||
regions, err := client.Regions(ctx)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
@@ -259,7 +268,8 @@ resourceLoop:
|
||||
t.Run("ConnectDERP", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
connInfo, err := client.WorkspaceAgentConnectionInfo(testutil.Context(t, testutil.WaitLong), agentID)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
connInfo, err := client.WorkspaceAgentConnectionInfo(ctx, agentID)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, connInfo.DERPMap)
|
||||
require.Len(t, connInfo.DERPMap.Regions, 3+len(api.DeploymentValues.DERP.Server.STUNAddresses.Value()))
|
||||
@@ -282,7 +292,6 @@ resourceLoop:
|
||||
OmitDefaultRegions: true,
|
||||
}
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
report := derphealth.Report{}
|
||||
report.Run(ctx, &derphealth.ReportOptions{
|
||||
DERPMap: derpMap,
|
||||
@@ -340,19 +349,19 @@ func TestDERPEndToEnd(t *testing.T) {
|
||||
_ = closer.Close()
|
||||
})
|
||||
|
||||
coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{
|
||||
coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "best-proxy",
|
||||
})
|
||||
|
||||
// Wait for the proxy to become healthy.
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
require.Eventually(t, func() bool {
|
||||
healthCtx := testutil.Context(t, testutil.WaitLong)
|
||||
err := api.ProxyHealth.ForceUpdate(healthCtx)
|
||||
err := api.ProxyHealth.ForceUpdate(ctx)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
|
||||
regions, err := client.Regions(healthCtx)
|
||||
regions, err := client.Regions(ctx)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
@@ -408,7 +417,6 @@ resourceLoop:
|
||||
_ = coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
|
||||
// Connect to the workspace agent.
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
conn, err := client.DialWorkspaceAgent(ctx, agentID, &codersdk.DialWorkspaceAgentOptions{
|
||||
Logger: slogtest.Make(t, &slogtest.Options{
|
||||
IgnoreErrors: true,
|
||||
@@ -430,6 +438,342 @@ resourceLoop:
|
||||
require.False(t, p2p)
|
||||
}
|
||||
|
||||
// TestDERPMesh spawns 6 workspace proxy replicas and tries to connect to a
|
||||
// single DERP peer via every single one.
|
||||
func TestDERPMesh(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
deploymentValues := coderdtest.DeploymentValues(t)
|
||||
deploymentValues.Experiments = []string{
|
||||
"*",
|
||||
}
|
||||
|
||||
client, closer, api, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{
|
||||
Options: &coderdtest.Options{
|
||||
DeploymentValues: deploymentValues,
|
||||
AppHostname: "*.primary.test.coder.com",
|
||||
IncludeProvisionerDaemon: true,
|
||||
RealIPConfig: &httpmw.RealIPConfig{
|
||||
TrustedOrigins: []*net.IPNet{{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Mask: net.CIDRMask(8, 32),
|
||||
}},
|
||||
TrustedHeaders: []string{
|
||||
"CF-Connecting-IP",
|
||||
},
|
||||
},
|
||||
},
|
||||
LicenseOptions: &coderdenttest.LicenseOptions{
|
||||
Features: license.Features{
|
||||
codersdk.FeatureWorkspaceProxy: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
_ = closer.Close()
|
||||
})
|
||||
|
||||
proxyURL, err := url.Parse("https://proxy.test.coder.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create 6 proxy replicas.
|
||||
const count = 6
|
||||
var (
|
||||
sessionToken = ""
|
||||
proxies = [count]coderdenttest.WorkspaceProxy{}
|
||||
derpURLs = [count]string{}
|
||||
)
|
||||
for i := range proxies {
|
||||
proxies[i] = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "best-proxy",
|
||||
Token: sessionToken,
|
||||
ProxyURL: proxyURL,
|
||||
})
|
||||
if i == 0 {
|
||||
sessionToken = proxies[i].Options.ProxySessionToken
|
||||
}
|
||||
|
||||
derpURL := *proxies[i].ServerURL
|
||||
derpURL.Path = "/derp"
|
||||
derpURLs[i] = derpURL.String()
|
||||
}
|
||||
|
||||
// Force all proxies to re-register immediately. This ensures the DERP mesh
|
||||
// is up-to-date. In production this will happen automatically after about
|
||||
// 15 seconds.
|
||||
for i, proxy := range proxies {
|
||||
err := proxy.RegisterNow()
|
||||
require.NoErrorf(t, err, "failed to force proxy %d to re-register", i)
|
||||
}
|
||||
|
||||
// Generate cases. We have a case for:
|
||||
// - Each proxy to itself.
|
||||
// - Each proxy to each other proxy (one way, no duplicates).
|
||||
cases := [][2]string{}
|
||||
for i, derpURL := range derpURLs {
|
||||
cases = append(cases, [2]string{derpURL, derpURL})
|
||||
for j := i + 1; j < len(derpURLs); j++ {
|
||||
cases = append(cases, [2]string{derpURL, derpURLs[j]})
|
||||
}
|
||||
}
|
||||
require.Len(t, cases, (count*(count+1))/2) // triangle number
|
||||
|
||||
for i, c := range cases {
|
||||
i, c := i, c
|
||||
t.Run(fmt.Sprintf("Proxy%d", i), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Logf("derp1=%s, derp2=%s", c[0], c[1])
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
client1, client1Recv := createDERPClient(t, ctx, "client1", c[0])
|
||||
client2, client2Recv := createDERPClient(t, ctx, "client2", c[1])
|
||||
|
||||
// Send a packet from client 1 to client 2.
|
||||
testDERPSend(t, ctx, client2.SelfPublicKey(), client2Recv, client1)
|
||||
|
||||
// Send a packet from client 2 to client 1.
|
||||
testDERPSend(t, ctx, client1.SelfPublicKey(), client1Recv, client2)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWorkspaceProxyDERPMeshProbe ensures that each replica pings every other
|
||||
// replica in the same region as itself periodically.
|
||||
func TestWorkspaceProxyDERPMeshProbe(t *testing.T) {
|
||||
t.Parallel()
|
||||
createProxyRegion := func(ctx context.Context, t *testing.T, client *codersdk.Client, name string) codersdk.UpdateWorkspaceProxyResponse {
|
||||
t.Helper()
|
||||
proxyRes, err := client.CreateWorkspaceProxy(ctx, codersdk.CreateWorkspaceProxyRequest{
|
||||
Name: name,
|
||||
Icon: "/emojis/flag.png",
|
||||
})
|
||||
require.NoError(t, err, "failed to create workspace proxy")
|
||||
return proxyRes
|
||||
}
|
||||
|
||||
registerBrokenProxy := func(ctx context.Context, t *testing.T, primaryAccessURL *url.URL, accessURL, token string) {
|
||||
t.Helper()
|
||||
// Create a HTTP server that always replies with 500.
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
// Register a proxy.
|
||||
wsproxyClient := wsproxysdk.New(primaryAccessURL)
|
||||
wsproxyClient.SetSessionToken(token)
|
||||
|
||||
hostname, err := cryptorand.String(6)
|
||||
require.NoError(t, err)
|
||||
_, err = wsproxyClient.RegisterWorkspaceProxy(ctx, wsproxysdk.RegisterWorkspaceProxyRequest{
|
||||
AccessURL: accessURL,
|
||||
WildcardHostname: "",
|
||||
DerpEnabled: true,
|
||||
DerpOnly: false,
|
||||
ReplicaID: uuid.New(),
|
||||
ReplicaHostname: hostname,
|
||||
ReplicaError: "",
|
||||
ReplicaRelayAddress: srv.URL,
|
||||
Version: buildinfo.Version(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Run("ProbeOK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
deploymentValues := coderdtest.DeploymentValues(t)
|
||||
deploymentValues.Experiments = []string{
|
||||
"*",
|
||||
}
|
||||
|
||||
client, closer, api, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{
|
||||
Options: &coderdtest.Options{
|
||||
DeploymentValues: deploymentValues,
|
||||
AppHostname: "*.primary.test.coder.com",
|
||||
IncludeProvisionerDaemon: true,
|
||||
RealIPConfig: &httpmw.RealIPConfig{
|
||||
TrustedOrigins: []*net.IPNet{{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Mask: net.CIDRMask(8, 32),
|
||||
}},
|
||||
TrustedHeaders: []string{
|
||||
"CF-Connecting-IP",
|
||||
},
|
||||
},
|
||||
},
|
||||
LicenseOptions: &coderdenttest.LicenseOptions{
|
||||
Features: license.Features{
|
||||
codersdk.FeatureWorkspaceProxy: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
_ = closer.Close()
|
||||
})
|
||||
|
||||
// Register but don't start a proxy in a different region. This
|
||||
// shouldn't affect the mesh since it's in a different region.
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
fakeProxyRes := createProxyRegion(ctx, t, client, "fake-proxy")
|
||||
registerBrokenProxy(ctx, t, api.AccessURL, "https://fake-proxy.test.coder.com", fakeProxyRes.ProxyToken)
|
||||
|
||||
proxyURL, err := url.Parse("https://proxy1.test.coder.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create 6 proxy replicas.
|
||||
const count = 6
|
||||
var (
|
||||
sessionToken = ""
|
||||
proxies = [count]coderdenttest.WorkspaceProxy{}
|
||||
replicaPingDone = [count]bool{}
|
||||
)
|
||||
for i := range proxies {
|
||||
i := i
|
||||
proxies[i] = coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "proxy-1",
|
||||
Token: sessionToken,
|
||||
ProxyURL: proxyURL,
|
||||
ReplicaPingCallback: func(replicas []codersdk.Replica, err string) {
|
||||
if len(replicas) != count-1 {
|
||||
// Still warming up...
|
||||
return
|
||||
}
|
||||
replicaPingDone[i] = true
|
||||
assert.Emptyf(t, err, "replica %d ping callback error", i)
|
||||
},
|
||||
})
|
||||
if i == 0 {
|
||||
sessionToken = proxies[i].Options.ProxySessionToken
|
||||
}
|
||||
}
|
||||
|
||||
// Force all proxies to re-register immediately. This ensures the DERP
|
||||
// mesh is up-to-date. In production this will happen automatically
|
||||
// after about 15 seconds.
|
||||
for i, proxy := range proxies {
|
||||
err := proxy.RegisterNow()
|
||||
require.NoErrorf(t, err, "failed to force proxy %d to re-register", i)
|
||||
}
|
||||
|
||||
// Ensure that all proxies have pinged.
|
||||
require.Eventually(t, func() bool {
|
||||
ok := true
|
||||
for i := range proxies {
|
||||
if !replicaPingDone[i] {
|
||||
t.Logf("replica %d has not pinged yet", i)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return ok
|
||||
}, testutil.WaitLong, testutil.IntervalSlow)
|
||||
t.Log("all replicas have pinged")
|
||||
|
||||
// Check they're all healthy according to /healthz-report.
|
||||
for _, proxy := range proxies {
|
||||
// GET /healthz-report
|
||||
u := proxy.ServerURL.ResolveReference(&url.URL{Path: "/healthz-report"})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
var respJSON codersdk.ProxyHealthReport
|
||||
err = json.NewDecoder(resp.Body).Decode(&respJSON)
|
||||
resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Empty(t, respJSON.Errors, "proxy is not healthy")
|
||||
}
|
||||
})
|
||||
|
||||
// Register one proxy, then pretend to register 5 others. This should cause
|
||||
// the mesh to fail and return an error.
|
||||
t.Run("ProbeFail", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
deploymentValues := coderdtest.DeploymentValues(t)
|
||||
deploymentValues.Experiments = []string{
|
||||
"*",
|
||||
}
|
||||
|
||||
client, closer, api, _ := coderdenttest.NewWithAPI(t, &coderdenttest.Options{
|
||||
Options: &coderdtest.Options{
|
||||
DeploymentValues: deploymentValues,
|
||||
AppHostname: "*.primary.test.coder.com",
|
||||
IncludeProvisionerDaemon: true,
|
||||
RealIPConfig: &httpmw.RealIPConfig{
|
||||
TrustedOrigins: []*net.IPNet{{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Mask: net.CIDRMask(8, 32),
|
||||
}},
|
||||
TrustedHeaders: []string{
|
||||
"CF-Connecting-IP",
|
||||
},
|
||||
},
|
||||
},
|
||||
LicenseOptions: &coderdenttest.LicenseOptions{
|
||||
Features: license.Features{
|
||||
codersdk.FeatureWorkspaceProxy: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
_ = closer.Close()
|
||||
})
|
||||
|
||||
proxyURL, err := url.Parse("https://proxy2.test.coder.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create 1 real proxy replica.
|
||||
const fakeCount = 5
|
||||
replicaPingErr := make(chan string, 4)
|
||||
proxy := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "proxy-2",
|
||||
ProxyURL: proxyURL,
|
||||
ReplicaPingCallback: func(replicas []codersdk.Replica, err string) {
|
||||
if len(replicas) != fakeCount {
|
||||
// Still warming up...
|
||||
return
|
||||
}
|
||||
replicaPingErr <- err
|
||||
},
|
||||
})
|
||||
|
||||
// Register (but don't start wsproxy.Server) 5 other proxies in the same
|
||||
// region. Since they registered recently they should be included in the
|
||||
// mesh. We create a HTTP server on the relay address that always
|
||||
// responds with 500 so probes fail.
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
for i := 0; i < fakeCount; i++ {
|
||||
registerBrokenProxy(ctx, t, api.AccessURL, proxyURL.String(), proxy.Options.ProxySessionToken)
|
||||
}
|
||||
|
||||
// Force the proxy to re-register immediately.
|
||||
err = proxy.RegisterNow()
|
||||
require.NoError(t, err, "failed to force proxy to re-register")
|
||||
|
||||
// Wait for the ping to fail.
|
||||
replicaErr := testutil.RequireRecvCtx(ctx, t, replicaPingErr)
|
||||
require.NotEmpty(t, replicaErr, "replica ping error")
|
||||
|
||||
// GET /healthz-report
|
||||
u := proxy.ServerURL.ResolveReference(&url.URL{Path: "/healthz-report"})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
var respJSON codersdk.ProxyHealthReport
|
||||
err = json.NewDecoder(resp.Body).Decode(&respJSON)
|
||||
resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, respJSON.Errors, 1, "proxy is healthy")
|
||||
require.Contains(t, respJSON.Errors[0], "High availability networking")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWorkspaceProxyWorkspaceApps(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -482,7 +826,7 @@ func TestWorkspaceProxyWorkspaceApps(t *testing.T) {
|
||||
if opts.DisableSubdomainApps {
|
||||
opts.AppHost = ""
|
||||
}
|
||||
proxyAPI := coderdenttest.NewWorkspaceProxy(t, api, client, &coderdenttest.ProxyOptions{
|
||||
proxyAPI := coderdenttest.NewWorkspaceProxyReplica(t, api, client, &coderdenttest.ProxyOptions{
|
||||
Name: "best-proxy",
|
||||
AppHostname: opts.AppHost,
|
||||
DisablePathApps: opts.DisablePathApps,
|
||||
@@ -498,3 +842,84 @@ func TestWorkspaceProxyWorkspaceApps(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// createDERPClient creates a DERP client and spawns a goroutine that reads from
|
||||
// the client and sends the received packets to a channel.
|
||||
//
|
||||
//nolint:revive
|
||||
func createDERPClient(t *testing.T, ctx context.Context, name string, derpURL string) (*derphttp.Client, <-chan derp.ReceivedPacket) {
|
||||
t.Helper()
|
||||
|
||||
client, err := derphttp.NewClient(key.NewNode(), derpURL, func(format string, args ...any) {
|
||||
t.Logf(name+": "+format, args...)
|
||||
})
|
||||
require.NoError(t, err, "create client")
|
||||
t.Cleanup(func() {
|
||||
_ = client.Close()
|
||||
})
|
||||
err = client.Connect(ctx)
|
||||
require.NoError(t, err, "connect to DERP server")
|
||||
|
||||
ch := make(chan derp.ReceivedPacket, 1)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for {
|
||||
msg, err := client.Recv()
|
||||
if err != nil {
|
||||
t.Logf("Recv error: %v", err)
|
||||
return
|
||||
}
|
||||
switch msg := msg.(type) {
|
||||
case derp.ReceivedPacket:
|
||||
ch <- msg
|
||||
return
|
||||
default:
|
||||
// We don't care about other messages.
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return client, ch
|
||||
}
|
||||
|
||||
// testDERPSend sends a message from src to dstKey and waits for it to be
|
||||
// received on dstCh.
|
||||
//
|
||||
// If the packet doesn't arrive within 500ms, it will try to send it again until
|
||||
// testutil.WaitLong is reached.
|
||||
//
|
||||
//nolint:revive
|
||||
func testDERPSend(t *testing.T, ctx context.Context, dstKey key.NodePublic, dstCh <-chan derp.ReceivedPacket, src *derphttp.Client) {
|
||||
t.Helper()
|
||||
|
||||
// The prefix helps identify where the packet starts if you get garbled data
|
||||
// in logs.
|
||||
const msgStrPrefix = "test_packet_"
|
||||
msgStr, err := cryptorand.String(64 - len(msgStrPrefix))
|
||||
require.NoError(t, err, "generate random msg string")
|
||||
msg := []byte(msgStrPrefix + msgStr)
|
||||
|
||||
err = src.Send(dstKey, msg)
|
||||
require.NoError(t, err, "send message via DERP")
|
||||
|
||||
ticker := time.NewTicker(time.Millisecond * 500)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case pkt := <-dstCh:
|
||||
require.Equal(t, src.SelfPublicKey(), pkt.Source, "packet came from wrong source")
|
||||
require.Equal(t, msg, pkt.Data, "packet data is wrong")
|
||||
return
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timed out waiting for packet")
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
// Send another packet. Since we're sending packets immediately
|
||||
// after opening the clients, they might not be meshed together
|
||||
// properly yet.
|
||||
err = src.Send(dstKey, msg)
|
||||
require.NoError(t, err, "send message via DERP")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,135 +277,214 @@ type RegisterWorkspaceProxyLoopOpts struct {
|
||||
// called in a blocking manner, so it should avoid blocking for too long. If
|
||||
// the callback returns an error, the loop will stop immediately and the
|
||||
// error will be returned to the FailureFn.
|
||||
CallbackFn func(ctx context.Context, res RegisterWorkspaceProxyResponse) error
|
||||
CallbackFn func(res RegisterWorkspaceProxyResponse) error
|
||||
// FailureFn is called with the last error returned from the server if the
|
||||
// context is canceled, registration fails for more than MaxFailureCount,
|
||||
// or if any permanent values in the response change.
|
||||
FailureFn func(err error)
|
||||
}
|
||||
|
||||
// RegisterWorkspaceProxyLoop will register the workspace proxy and then start a
|
||||
// goroutine to keep registering periodically in the background.
|
||||
//
|
||||
// The first response is returned immediately, and subsequent responses will be
|
||||
// notified to the given CallbackFn. When the context is canceled the loop will
|
||||
// stop immediately and the context error will be returned to the FailureFn.
|
||||
//
|
||||
// The returned channel will be closed when the loop stops and can be used to
|
||||
// ensure the loop is dead before continuing. When a fatal error is encountered,
|
||||
// the proxy will be deregistered (with the same ReplicaID and AttemptTimeout)
|
||||
// before calling the FailureFn.
|
||||
func (c *Client) RegisterWorkspaceProxyLoop(ctx context.Context, opts RegisterWorkspaceProxyLoopOpts) (RegisterWorkspaceProxyResponse, <-chan struct{}, error) {
|
||||
if opts.Interval == 0 {
|
||||
opts.Interval = 30 * time.Second
|
||||
}
|
||||
if opts.MaxFailureCount == 0 {
|
||||
opts.MaxFailureCount = 10
|
||||
}
|
||||
if opts.AttemptTimeout == 0 {
|
||||
opts.AttemptTimeout = 10 * time.Second
|
||||
}
|
||||
if opts.MutateFn == nil {
|
||||
opts.MutateFn = func(_ *RegisterWorkspaceProxyRequest) {}
|
||||
}
|
||||
if opts.CallbackFn == nil {
|
||||
opts.CallbackFn = func(_ context.Context, _ RegisterWorkspaceProxyResponse) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
type RegisterWorkspaceProxyLoop struct {
|
||||
opts RegisterWorkspaceProxyLoopOpts
|
||||
c *Client
|
||||
|
||||
failureFn := func(err error) {
|
||||
// We have to use background context here because the original context
|
||||
// may be canceled.
|
||||
deregisterCtx, cancel := context.WithTimeout(context.Background(), opts.AttemptTimeout)
|
||||
defer cancel()
|
||||
deregisterErr := c.DeregisterWorkspaceProxy(deregisterCtx, DeregisterWorkspaceProxyRequest{
|
||||
ReplicaID: opts.Request.ReplicaID,
|
||||
})
|
||||
if deregisterErr != nil {
|
||||
opts.Logger.Error(ctx,
|
||||
"failed to deregister workspace proxy with Coder primary (it will be automatically deregistered shortly)",
|
||||
slog.Error(deregisterErr),
|
||||
)
|
||||
}
|
||||
// runLoopNow takes a response channel to send the response to and triggers
|
||||
// the loop to run immediately if it's waiting.
|
||||
runLoopNow chan chan RegisterWorkspaceProxyResponse
|
||||
closedCtx context.Context
|
||||
close context.CancelFunc
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
if opts.FailureFn != nil {
|
||||
opts.FailureFn(err)
|
||||
}
|
||||
}
|
||||
|
||||
originalRes, err := c.RegisterWorkspaceProxy(ctx, opts.Request)
|
||||
func (l *RegisterWorkspaceProxyLoop) register(ctx context.Context) (RegisterWorkspaceProxyResponse, error) {
|
||||
registerCtx, registerCancel := context.WithTimeout(ctx, l.opts.AttemptTimeout)
|
||||
res, err := l.c.RegisterWorkspaceProxy(registerCtx, l.opts.Request)
|
||||
registerCancel()
|
||||
if err != nil {
|
||||
return RegisterWorkspaceProxyResponse{}, nil, xerrors.Errorf("register workspace proxy: %w", err)
|
||||
return RegisterWorkspaceProxyResponse{}, xerrors.Errorf("register workspace proxy: %w", err)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Start starts the proxy registration loop. The provided context is only used
|
||||
// for the initial registration. Use Close() to stop.
|
||||
func (l *RegisterWorkspaceProxyLoop) Start(ctx context.Context) (RegisterWorkspaceProxyResponse, error) {
|
||||
if l.opts.Interval == 0 {
|
||||
l.opts.Interval = 15 * time.Second
|
||||
}
|
||||
if l.opts.MaxFailureCount == 0 {
|
||||
l.opts.MaxFailureCount = 10
|
||||
}
|
||||
if l.opts.AttemptTimeout == 0 {
|
||||
l.opts.AttemptTimeout = 10 * time.Second
|
||||
}
|
||||
|
||||
var err error
|
||||
originalRes, err := l.register(ctx)
|
||||
if err != nil {
|
||||
return RegisterWorkspaceProxyResponse{}, xerrors.Errorf("initial registration: %w", err)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
defer close(l.done)
|
||||
|
||||
var (
|
||||
failedAttempts = 0
|
||||
ticker = time.NewTicker(opts.Interval)
|
||||
ticker = time.NewTicker(l.opts.Interval)
|
||||
)
|
||||
for {
|
||||
var respCh chan RegisterWorkspaceProxyResponse
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
failureFn(ctx.Err())
|
||||
case <-l.closedCtx.Done():
|
||||
l.failureFn(xerrors.Errorf("proxy registration loop closed"))
|
||||
return
|
||||
case respCh = <-l.runLoopNow:
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
opts.Logger.Debug(ctx,
|
||||
l.opts.Logger.Debug(context.Background(),
|
||||
"re-registering workspace proxy with Coder primary",
|
||||
slog.F("req", opts.Request),
|
||||
slog.F("timeout", opts.AttemptTimeout),
|
||||
slog.F("req", l.opts.Request),
|
||||
slog.F("timeout", l.opts.AttemptTimeout),
|
||||
slog.F("failed_attempts", failedAttempts),
|
||||
)
|
||||
opts.MutateFn(&opts.Request)
|
||||
registerCtx, cancel := context.WithTimeout(ctx, opts.AttemptTimeout)
|
||||
res, err := c.RegisterWorkspaceProxy(registerCtx, opts.Request)
|
||||
cancel()
|
||||
|
||||
l.mutateFn(&l.opts.Request)
|
||||
resp, err := l.register(l.closedCtx)
|
||||
if err != nil {
|
||||
failedAttempts++
|
||||
opts.Logger.Warn(ctx,
|
||||
l.opts.Logger.Warn(context.Background(),
|
||||
"failed to re-register workspace proxy with Coder primary",
|
||||
slog.F("req", opts.Request),
|
||||
slog.F("timeout", opts.AttemptTimeout),
|
||||
slog.F("req", l.opts.Request),
|
||||
slog.F("timeout", l.opts.AttemptTimeout),
|
||||
slog.F("failed_attempts", failedAttempts),
|
||||
slog.Error(err),
|
||||
)
|
||||
|
||||
if failedAttempts > opts.MaxFailureCount {
|
||||
failureFn(xerrors.Errorf("exceeded re-registration failure count of %d: last error: %w", opts.MaxFailureCount, err))
|
||||
if failedAttempts > l.opts.MaxFailureCount {
|
||||
l.failureFn(xerrors.Errorf("exceeded re-registration failure count of %d: last error: %w", l.opts.MaxFailureCount, err))
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
failedAttempts = 0
|
||||
|
||||
if res.AppSecurityKey != originalRes.AppSecurityKey {
|
||||
failureFn(xerrors.New("app security key has changed, proxy must be restarted"))
|
||||
// Check for consistency.
|
||||
if originalRes.AppSecurityKey != resp.AppSecurityKey {
|
||||
l.failureFn(xerrors.New("app security key has changed, proxy must be restarted"))
|
||||
return
|
||||
}
|
||||
if res.DERPMeshKey != originalRes.DERPMeshKey {
|
||||
failureFn(xerrors.New("DERP mesh key has changed, proxy must be restarted"))
|
||||
if originalRes.DERPMeshKey != resp.DERPMeshKey {
|
||||
l.failureFn(xerrors.New("DERP mesh key has changed, proxy must be restarted"))
|
||||
return
|
||||
}
|
||||
if res.DERPRegionID != originalRes.DERPRegionID {
|
||||
failureFn(xerrors.New("DERP region ID has changed, proxy must be restarted"))
|
||||
if originalRes.DERPRegionID != resp.DERPRegionID {
|
||||
l.failureFn(xerrors.New("DERP region ID has changed, proxy must be restarted"))
|
||||
return
|
||||
}
|
||||
|
||||
err = opts.CallbackFn(ctx, res)
|
||||
err = l.callbackFn(resp)
|
||||
if err != nil {
|
||||
failureFn(xerrors.Errorf("callback fn returned error: %w", err))
|
||||
l.failureFn(xerrors.Errorf("callback function returned an error: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
ticker.Reset(opts.Interval)
|
||||
// If we were triggered by RegisterNow(), send the response back.
|
||||
if respCh != nil {
|
||||
respCh <- resp
|
||||
close(respCh)
|
||||
}
|
||||
|
||||
ticker.Reset(l.opts.Interval)
|
||||
}
|
||||
}()
|
||||
|
||||
return originalRes, done, nil
|
||||
return originalRes, nil
|
||||
}
|
||||
|
||||
// RegisterNow asks the registration loop to register immediately. A timeout of
|
||||
// 2x the attempt timeout is used to wait for the response.
|
||||
func (l *RegisterWorkspaceProxyLoop) RegisterNow() (RegisterWorkspaceProxyResponse, error) {
|
||||
// The channel is closed by the loop after sending the response.
|
||||
respCh := make(chan RegisterWorkspaceProxyResponse, 1)
|
||||
select {
|
||||
case <-l.done:
|
||||
return RegisterWorkspaceProxyResponse{}, xerrors.New("proxy registration loop closed")
|
||||
case l.runLoopNow <- respCh:
|
||||
}
|
||||
select {
|
||||
case <-l.done:
|
||||
return RegisterWorkspaceProxyResponse{}, xerrors.New("proxy registration loop closed")
|
||||
case resp := <-respCh:
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *RegisterWorkspaceProxyLoop) Close() {
|
||||
l.close()
|
||||
<-l.done
|
||||
}
|
||||
|
||||
func (l *RegisterWorkspaceProxyLoop) mutateFn(req *RegisterWorkspaceProxyRequest) {
|
||||
if l.opts.MutateFn != nil {
|
||||
l.opts.MutateFn(req)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *RegisterWorkspaceProxyLoop) callbackFn(res RegisterWorkspaceProxyResponse) error {
|
||||
if l.opts.CallbackFn != nil {
|
||||
return l.opts.CallbackFn(res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *RegisterWorkspaceProxyLoop) failureFn(err error) {
|
||||
// We have to use background context here because the original context may
|
||||
// be canceled.
|
||||
deregisterCtx, cancel := context.WithTimeout(context.Background(), l.opts.AttemptTimeout)
|
||||
defer cancel()
|
||||
deregisterErr := l.c.DeregisterWorkspaceProxy(deregisterCtx, DeregisterWorkspaceProxyRequest{
|
||||
ReplicaID: l.opts.Request.ReplicaID,
|
||||
})
|
||||
if deregisterErr != nil {
|
||||
l.opts.Logger.Error(context.Background(),
|
||||
"failed to deregister workspace proxy with Coder primary (it will be automatically deregistered shortly)",
|
||||
slog.Error(deregisterErr),
|
||||
)
|
||||
}
|
||||
|
||||
if l.opts.FailureFn != nil {
|
||||
l.opts.FailureFn(err)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterWorkspaceProxyLoop will register the workspace proxy and then start a
|
||||
// goroutine to keep registering periodically in the background.
|
||||
//
|
||||
// The first response is returned immediately, and subsequent responses will be
|
||||
// notified to the given CallbackFn. When the loop is Close()d it will stop
|
||||
// immediately and an error will be returned to the FailureFn.
|
||||
//
|
||||
// When a fatal error is encountered (or the proxy is closed), the proxy will be
|
||||
// deregistered (with the same ReplicaID and AttemptTimeout) before calling the
|
||||
// FailureFn.
|
||||
func (c *Client) RegisterWorkspaceProxyLoop(ctx context.Context, opts RegisterWorkspaceProxyLoopOpts) (*RegisterWorkspaceProxyLoop, RegisterWorkspaceProxyResponse, error) {
|
||||
closedCtx, closeFn := context.WithCancel(context.Background())
|
||||
loop := &RegisterWorkspaceProxyLoop{
|
||||
opts: opts,
|
||||
c: c,
|
||||
runLoopNow: make(chan chan RegisterWorkspaceProxyResponse),
|
||||
closedCtx: closedCtx,
|
||||
close: closeFn,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
regResp, err := loop.Start(ctx)
|
||||
if err != nil {
|
||||
return nil, RegisterWorkspaceProxyResponse{}, xerrors.Errorf("start loop: %w", err)
|
||||
}
|
||||
return loop, regResp, nil
|
||||
}
|
||||
|
||||
type CoordinateMessageType int
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
import type { PropsWithChildren, FC } from "react";
|
||||
import {
|
||||
type PropsWithChildren,
|
||||
type FC,
|
||||
forwardRef,
|
||||
HTMLAttributes,
|
||||
} from "react";
|
||||
import Tooltip from "@mui/material/Tooltip";
|
||||
import { type Interpolation, type Theme } from "@emotion/react";
|
||||
import { Stack } from "components/Stack/Stack";
|
||||
@@ -74,9 +79,14 @@ export const NotReachableBadge: FC = () => {
|
||||
);
|
||||
};
|
||||
|
||||
export const DisabledBadge: FC = () => {
|
||||
export const DisabledBadge: FC = forwardRef<
|
||||
HTMLSpanElement,
|
||||
HTMLAttributes<HTMLSpanElement>
|
||||
>((props, ref) => {
|
||||
return (
|
||||
<span
|
||||
{...props}
|
||||
ref={ref}
|
||||
css={[
|
||||
styles.badge,
|
||||
(theme) => ({
|
||||
@@ -89,7 +99,7 @@ export const DisabledBadge: FC = () => {
|
||||
Disabled
|
||||
</span>
|
||||
);
|
||||
};
|
||||
});
|
||||
|
||||
export const EnterpriseBadge: FC = () => {
|
||||
return (
|
||||
|
||||
@@ -22,7 +22,7 @@ export const useClipboard = (textToCopy: string): UseClipboardResult => {
|
||||
setIsCopied(false);
|
||||
}, 1000);
|
||||
} catch (err) {
|
||||
const isCopied = simulateClipboardWrite();
|
||||
const isCopied = simulateClipboardWrite(textToCopy);
|
||||
if (isCopied) {
|
||||
setIsCopied(true);
|
||||
timeoutIdRef.current = window.setTimeout(() => {
|
||||
@@ -44,12 +44,16 @@ export const useClipboard = (textToCopy: string): UseClipboardResult => {
|
||||
};
|
||||
|
||||
/**
|
||||
* Provides a fallback clipboard method for when browsers do not have access
|
||||
* to the clipboard API (the browser is older, or the deployment is only running
|
||||
* on HTTP, when the clipboard API is only available in secure contexts).
|
||||
*
|
||||
* It feels silly that you have to make a whole dummy input just to simulate a
|
||||
* clipboard, but that's really the recommended approach for older browsers.
|
||||
*
|
||||
* @see {@link https://web.dev/patterns/clipboard/copy-text?hl=en}
|
||||
*/
|
||||
function simulateClipboardWrite(): boolean {
|
||||
function simulateClipboardWrite(textToCopy: string): boolean {
|
||||
const previousFocusTarget = document.activeElement;
|
||||
const dummyInput = document.createElement("input");
|
||||
|
||||
@@ -70,12 +74,26 @@ function simulateClipboardWrite(): boolean {
|
||||
style.border = "0";
|
||||
|
||||
document.body.appendChild(dummyInput);
|
||||
dummyInput.value = textToCopy;
|
||||
dummyInput.focus();
|
||||
dummyInput.select();
|
||||
|
||||
const isCopied = document.execCommand("copy");
|
||||
dummyInput.remove();
|
||||
/**
|
||||
* The document.execCommand method is officially deprecated. Browsers are free
|
||||
* to remove the method entirely or choose to turn it into a no-op function
|
||||
* that always returns false. You cannot make any assumptions about how its
|
||||
* core functionality will be removed.
|
||||
*
|
||||
* @see {@link https://developer.mozilla.org/en-US/docs/Web/API/Clipboard}
|
||||
*/
|
||||
let isCopied: boolean;
|
||||
try {
|
||||
isCopied = document?.execCommand("copy") ?? false;
|
||||
} catch {
|
||||
isCopied = false;
|
||||
}
|
||||
|
||||
dummyInput.remove();
|
||||
if (previousFocusTarget instanceof HTMLElement) {
|
||||
previousFocusTarget.focus();
|
||||
}
|
||||
|
||||
+10
@@ -105,3 +105,13 @@ export const Loading: Story = {
|
||||
isLoading: true,
|
||||
},
|
||||
};
|
||||
|
||||
export const AutoStopAndStartOff: Story = {
|
||||
args: {
|
||||
initialValues: {
|
||||
...defaultInitialValues,
|
||||
autostartEnabled: false,
|
||||
autostopEnabled: false,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
+34
@@ -379,3 +379,37 @@ describe("templateInheritance", () => {
|
||||
expect(ttlInput).toBeDisabled();
|
||||
});
|
||||
});
|
||||
|
||||
test("form should be enabled when both auto stop and auto start features are disabled, given that the template permits these actions", async () => {
|
||||
jest.spyOn(API, "getTemplateByName").mockResolvedValue(MockTemplate);
|
||||
render(
|
||||
<WorkspaceScheduleForm
|
||||
{...defaultFormProps}
|
||||
initialValues={{
|
||||
...defaultFormProps.initialValues,
|
||||
autostopEnabled: false,
|
||||
autostartEnabled: false,
|
||||
}}
|
||||
/>,
|
||||
);
|
||||
|
||||
const submitButton = await screen.findByRole("button", { name: "Submit" });
|
||||
expect(submitButton).toBeEnabled();
|
||||
});
|
||||
|
||||
test("form should be disabled when both auto stop and auto start features are disabled at template level", async () => {
|
||||
jest.spyOn(API, "getTemplateByName").mockResolvedValue(MockTemplate);
|
||||
render(
|
||||
<WorkspaceScheduleForm
|
||||
{...defaultFormProps}
|
||||
allowTemplateAutoStart={false}
|
||||
allowTemplateAutoStop={false}
|
||||
initialValues={{
|
||||
...defaultFormProps.initialValues,
|
||||
}}
|
||||
/>,
|
||||
);
|
||||
|
||||
const submitButton = await screen.findByRole("button", { name: "Submit" });
|
||||
expect(submitButton).toBeDisabled();
|
||||
});
|
||||
|
||||
+3
-4
@@ -441,10 +441,9 @@ export const WorkspaceScheduleForm: FC<WorkspaceScheduleFormProps> = ({
|
||||
<FormFooter
|
||||
onCancel={onCancel}
|
||||
isLoading={isLoading}
|
||||
submitDisabled={
|
||||
(!allowTemplateAutoStart && !allowTemplateAutoStop) ||
|
||||
(!form.values.autostartEnabled && !form.values.autostopEnabled)
|
||||
}
|
||||
// If both options, autostart and autostop, are disabled at the template
|
||||
// level, the form is disabled.
|
||||
submitDisabled={!allowTemplateAutoStart && !allowTemplateAutoStop}
|
||||
/>
|
||||
</HorizontalForm>
|
||||
);
|
||||
|
||||
Reference in New Issue
Block a user