Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e2485b1c2b | |||
| 37de9f3f45 | |||
| 0156f0f375 | |||
| a9b1ccd680 | |||
| 4fe7ee2d66 | |||
| 8708d81222 | |||
| 32f093ef59 | |||
| b568aa7416 | |||
| 4a70bdc20e | |||
| ffccfb9249 | |||
| 3a5c2d7754 | |||
| 2e96160c30 | |||
| e4558e2c54 | |||
| 816a4edd06 |
@@ -1,218 +0,0 @@
|
||||
# Database Development Patterns
|
||||
|
||||
## Database Work Overview
|
||||
|
||||
### Database Generation Process
|
||||
|
||||
1. Modify SQL files in `coderd/database/queries/`
|
||||
2. Run `make gen`
|
||||
3. If errors about audit table, update `enterprise/audit/table.go`
|
||||
4. Run `make gen` again
|
||||
5. Run `make lint` to catch any remaining issues
|
||||
|
||||
## Migration Guidelines
|
||||
|
||||
### Creating Migration Files
|
||||
|
||||
**Location**: `coderd/database/migrations/`
|
||||
**Format**: `{number}_{description}.{up|down}.sql`
|
||||
|
||||
- Number must be unique and sequential
|
||||
- Always include both up and down migrations
|
||||
|
||||
### Helper Scripts
|
||||
|
||||
| Script | Purpose |
|
||||
|---------------------------------------------------------------------|-----------------------------------------|
|
||||
| `./coderd/database/migrations/create_migration.sh "migration name"` | Creates new migration files |
|
||||
| `./coderd/database/migrations/fix_migration_numbers.sh` | Renumbers migrations to avoid conflicts |
|
||||
| `./coderd/database/migrations/create_fixture.sh "fixture name"` | Creates test fixtures for migrations |
|
||||
|
||||
### Database Query Organization
|
||||
|
||||
- **MUST DO**: Any changes to database - adding queries, modifying queries should be done in the `coderd/database/queries/*.sql` files
|
||||
- **MUST DO**: Queries are grouped in files relating to context - e.g. `prebuilds.sql`, `users.sql`, `oauth2.sql`
|
||||
- After making changes to any `coderd/database/queries/*.sql` files you must run `make gen` to generate respective ORM changes
|
||||
|
||||
## Handling Nullable Fields
|
||||
|
||||
Use `sql.NullString`, `sql.NullBool`, etc. for optional database fields:
|
||||
|
||||
```go
|
||||
CodeChallenge: sql.NullString{
|
||||
String: params.codeChallenge,
|
||||
Valid: params.codeChallenge != "",
|
||||
}
|
||||
```
|
||||
|
||||
Set `.Valid = true` when providing values.
|
||||
|
||||
## Audit Table Updates
|
||||
|
||||
If adding fields to auditable types:
|
||||
|
||||
1. Update `enterprise/audit/table.go`
|
||||
2. Add each new field with appropriate action:
|
||||
- `ActionTrack`: Field should be tracked in audit logs
|
||||
- `ActionIgnore`: Field should be ignored in audit logs
|
||||
- `ActionSecret`: Field contains sensitive data
|
||||
3. Run `make gen` to verify no audit errors
|
||||
|
||||
## Database Architecture
|
||||
|
||||
### Core Components
|
||||
|
||||
- **PostgreSQL 13+** recommended for production
|
||||
- **Migrations** managed with `migrate`
|
||||
- **Database authorization** through `dbauthz` package
|
||||
|
||||
### Authorization Patterns
|
||||
|
||||
```go
|
||||
// Public endpoints needing system access (OAuth2 registration)
|
||||
app, err := api.Database.GetOAuth2ProviderAppByClientID(dbauthz.AsSystemRestricted(ctx), clientID)
|
||||
|
||||
// Authenticated endpoints with user context
|
||||
app, err := api.Database.GetOAuth2ProviderAppByClientID(ctx, clientID)
|
||||
|
||||
// System operations in middleware
|
||||
roles, err := db.GetAuthorizationUserRoles(dbauthz.AsSystemRestricted(ctx), userID)
|
||||
```
|
||||
|
||||
## Common Database Issues
|
||||
|
||||
### Migration Issues
|
||||
|
||||
1. **Migration conflicts**: Use `fix_migration_numbers.sh` to renumber
|
||||
2. **Missing down migration**: Always create both up and down files
|
||||
3. **Schema inconsistencies**: Verify against existing schema
|
||||
|
||||
### Field Handling Issues
|
||||
|
||||
1. **Nullable field errors**: Use `sql.Null*` types consistently
|
||||
2. **Missing audit entries**: Update `enterprise/audit/table.go`
|
||||
|
||||
### Query Issues
|
||||
|
||||
1. **Query organization**: Group related queries in appropriate files
|
||||
2. **Generated code errors**: Run `make gen` after query changes
|
||||
3. **Performance issues**: Add appropriate indexes in migrations
|
||||
|
||||
## Database Testing
|
||||
|
||||
### Test Database Setup
|
||||
|
||||
```go
|
||||
func TestDatabaseFunction(t *testing.T) {
|
||||
db := dbtestutil.NewDB(t)
|
||||
|
||||
// Test with real database
|
||||
result, err := db.GetSomething(ctx, param)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, result)
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Schema Design
|
||||
|
||||
1. **Use appropriate data types**: VARCHAR for strings, TIMESTAMP for times
|
||||
2. **Add constraints**: NOT NULL, UNIQUE, FOREIGN KEY as appropriate
|
||||
3. **Create indexes**: For frequently queried columns
|
||||
4. **Consider performance**: Normalize appropriately but avoid over-normalization
|
||||
|
||||
### Query Writing
|
||||
|
||||
1. **Use parameterized queries**: Prevent SQL injection
|
||||
2. **Handle errors appropriately**: Check for specific error types
|
||||
3. **Use transactions**: For related operations that must succeed together
|
||||
4. **Optimize queries**: Use EXPLAIN to understand query performance
|
||||
|
||||
### Migration Writing
|
||||
|
||||
1. **Make migrations reversible**: Always include down migration
|
||||
2. **Test migrations**: On copy of production data if possible
|
||||
3. **Keep migrations small**: One logical change per migration
|
||||
4. **Document complex changes**: Add comments explaining rationale
|
||||
|
||||
## Advanced Patterns
|
||||
|
||||
### Complex Queries
|
||||
|
||||
```sql
|
||||
-- Example: Complex join with aggregation
|
||||
SELECT
|
||||
u.id,
|
||||
u.username,
|
||||
COUNT(w.id) as workspace_count
|
||||
FROM users u
|
||||
LEFT JOIN workspaces w ON u.id = w.owner_id
|
||||
WHERE u.created_at > $1
|
||||
GROUP BY u.id, u.username
|
||||
ORDER BY workspace_count DESC;
|
||||
```
|
||||
|
||||
### Conditional Queries
|
||||
|
||||
```sql
|
||||
-- Example: Dynamic filtering
|
||||
SELECT * FROM oauth2_provider_apps
|
||||
WHERE
|
||||
($1::text IS NULL OR name ILIKE '%' || $1 || '%')
|
||||
AND ($2::uuid IS NULL OR organization_id = $2)
|
||||
ORDER BY created_at DESC;
|
||||
```
|
||||
|
||||
### Audit Patterns
|
||||
|
||||
```go
|
||||
// Example: Auditable database operation
|
||||
func (q *sqlQuerier) UpdateUser(ctx context.Context, arg UpdateUserParams) (User, error) {
|
||||
// Implementation here
|
||||
|
||||
// Audit the change
|
||||
if auditor := audit.FromContext(ctx); auditor != nil {
|
||||
auditor.Record(audit.UserUpdate{
|
||||
UserID: arg.ID,
|
||||
Old: oldUser,
|
||||
New: newUser,
|
||||
})
|
||||
}
|
||||
|
||||
return newUser, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Debugging Database Issues
|
||||
|
||||
### Common Debug Commands
|
||||
|
||||
```bash
|
||||
# Check database connection
|
||||
make test-postgres
|
||||
|
||||
# Run specific database tests
|
||||
go test ./coderd/database/... -run TestSpecificFunction
|
||||
|
||||
# Check query generation
|
||||
make gen
|
||||
|
||||
# Verify audit table
|
||||
make lint
|
||||
```
|
||||
|
||||
### Debug Techniques
|
||||
|
||||
1. **Enable query logging**: Set appropriate log levels
|
||||
2. **Use database tools**: pgAdmin, psql for direct inspection
|
||||
3. **Check constraints**: UNIQUE, FOREIGN KEY violations
|
||||
4. **Analyze performance**: Use EXPLAIN ANALYZE for slow queries
|
||||
|
||||
### Troubleshooting Checklist
|
||||
|
||||
- [ ] Migration files exist (both up and down)
|
||||
- [ ] `make gen` run after query changes
|
||||
- [ ] Audit table updated for new fields
|
||||
- [ ] Nullable fields use `sql.Null*` types
|
||||
- [ ] Authorization context appropriate for endpoint type
|
||||
@@ -1,157 +0,0 @@
|
||||
# OAuth2 Development Guide
|
||||
|
||||
## RFC Compliance Development
|
||||
|
||||
### Implementing Standard Protocols
|
||||
|
||||
When implementing standard protocols (OAuth2, OpenID Connect, etc.):
|
||||
|
||||
1. **Fetch and Analyze Official RFCs**:
|
||||
- Always read the actual RFC specifications before implementation
|
||||
- Use WebFetch tool to get current RFC content for compliance verification
|
||||
- Document RFC requirements in code comments
|
||||
|
||||
2. **Default Values Matter**:
|
||||
- Pay close attention to RFC-specified default values
|
||||
- Example: RFC 7591 specifies `client_secret_basic` as default, not `client_secret_post`
|
||||
- Ensure consistency between database migrations and application code
|
||||
|
||||
3. **Security Requirements**:
|
||||
- Follow RFC security considerations precisely
|
||||
- Example: RFC 7592 prohibits returning registration access tokens in GET responses
|
||||
- Implement proper error responses per protocol specifications
|
||||
|
||||
4. **Validation Compliance**:
|
||||
- Implement comprehensive validation per RFC requirements
|
||||
- Support protocol-specific features (e.g., custom schemes for native OAuth2 apps)
|
||||
- Test edge cases defined in specifications
|
||||
|
||||
## OAuth2 Provider Implementation
|
||||
|
||||
### OAuth2 Spec Compliance
|
||||
|
||||
1. **Follow RFC 6749 for token responses**
|
||||
- Use `expires_in` (seconds) not `expiry` (timestamp) in token responses
|
||||
- Return proper OAuth2 error format: `{"error": "code", "error_description": "details"}`
|
||||
|
||||
2. **Error Response Format**
|
||||
- Create OAuth2-compliant error responses for token endpoint
|
||||
- Use standard error codes: `invalid_client`, `invalid_grant`, `invalid_request`
|
||||
- Avoid generic error responses for OAuth2 endpoints
|
||||
|
||||
### PKCE Implementation
|
||||
|
||||
- Support both with and without PKCE for backward compatibility
|
||||
- Use S256 method for code challenge
|
||||
- Properly validate code_verifier against stored code_challenge
|
||||
|
||||
### UI Authorization Flow
|
||||
|
||||
- Use POST requests for consent, not GET with links
|
||||
- Avoid dependency on referer headers for security decisions
|
||||
- Support proper state parameter validation
|
||||
|
||||
### RFC 8707 Resource Indicators
|
||||
|
||||
- Store resource parameters in database for server-side validation (opaque tokens)
|
||||
- Validate resource consistency between authorization and token requests
|
||||
- Support audience validation in refresh token flows
|
||||
- Resource parameter is optional but must be consistent when provided
|
||||
|
||||
## OAuth2 Error Handling Pattern
|
||||
|
||||
```go
|
||||
// Define specific OAuth2 errors
|
||||
var (
|
||||
errInvalidPKCE = xerrors.New("invalid code_verifier")
|
||||
)
|
||||
|
||||
// Use OAuth2-compliant error responses
|
||||
type OAuth2Error struct {
|
||||
Error string `json:"error"`
|
||||
ErrorDescription string `json:"error_description,omitempty"`
|
||||
}
|
||||
|
||||
// Return proper OAuth2 errors
|
||||
if errors.Is(err, errInvalidPKCE) {
|
||||
writeOAuth2Error(ctx, rw, http.StatusBadRequest, "invalid_grant", "The PKCE code verifier is invalid")
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
## Testing OAuth2 Features
|
||||
|
||||
### Test Scripts
|
||||
|
||||
Located in `./scripts/oauth2/`:
|
||||
|
||||
- `test-mcp-oauth2.sh` - Full automated test suite
|
||||
- `setup-test-app.sh` - Create test OAuth2 app
|
||||
- `cleanup-test-app.sh` - Remove test app
|
||||
- `generate-pkce.sh` - Generate PKCE parameters
|
||||
- `test-manual-flow.sh` - Manual browser testing
|
||||
|
||||
Always run the full test suite after OAuth2 changes:
|
||||
|
||||
```bash
|
||||
./scripts/oauth2/test-mcp-oauth2.sh
|
||||
```
|
||||
|
||||
### RFC Protocol Testing
|
||||
|
||||
1. **Compliance Test Coverage**:
|
||||
- Test all RFC-defined error codes and responses
|
||||
- Validate proper HTTP status codes for different scenarios
|
||||
- Test protocol-specific edge cases (URI formats, token formats, etc.)
|
||||
|
||||
2. **Security Boundary Testing**:
|
||||
- Test client isolation and privilege separation
|
||||
- Verify information disclosure protections
|
||||
- Test token security and proper invalidation
|
||||
|
||||
## Common OAuth2 Issues
|
||||
|
||||
1. **OAuth2 endpoints returning wrong error format** - Ensure OAuth2 endpoints return RFC 6749 compliant errors
|
||||
2. **Resource indicator validation failing** - Ensure database stores and retrieves resource parameters correctly
|
||||
3. **PKCE tests failing** - Verify both authorization code storage and token exchange handle PKCE fields
|
||||
4. **RFC compliance failures** - Verify against actual RFC specifications, not assumptions
|
||||
5. **Authorization context errors in public endpoints** - Use `dbauthz.AsSystemRestricted(ctx)` pattern
|
||||
6. **Default value mismatches** - Ensure database migrations match application code defaults
|
||||
7. **Bearer token authentication issues** - Check token extraction precedence and format validation
|
||||
8. **URI validation failures** - Support both standard schemes and custom schemes per protocol requirements
|
||||
|
||||
## Authorization Context Patterns
|
||||
|
||||
```go
|
||||
// Public endpoints needing system access (OAuth2 registration)
|
||||
app, err := api.Database.GetOAuth2ProviderAppByClientID(dbauthz.AsSystemRestricted(ctx), clientID)
|
||||
|
||||
// Authenticated endpoints with user context
|
||||
app, err := api.Database.GetOAuth2ProviderAppByClientID(ctx, clientID)
|
||||
|
||||
// System operations in middleware
|
||||
roles, err := db.GetAuthorizationUserRoles(dbauthz.AsSystemRestricted(ctx), userID)
|
||||
```
|
||||
|
||||
## OAuth2/Authentication Work Patterns
|
||||
|
||||
- Types go in `codersdk/oauth2.go` or similar
|
||||
- Handlers go in `coderd/oauth2.go` or `coderd/identityprovider/`
|
||||
- Database fields need migration + audit table updates
|
||||
- Always support backward compatibility
|
||||
|
||||
## Protocol Implementation Checklist
|
||||
|
||||
Before completing OAuth2 or authentication feature work:
|
||||
|
||||
- [ ] Verify RFC compliance by reading actual specifications
|
||||
- [ ] Implement proper error response formats per protocol
|
||||
- [ ] Add comprehensive validation for all protocol fields
|
||||
- [ ] Test security boundaries and token handling
|
||||
- [ ] Update RBAC permissions for new resources
|
||||
- [ ] Add audit logging support if applicable
|
||||
- [ ] Create database migrations with proper defaults
|
||||
- [ ] Add comprehensive test coverage including edge cases
|
||||
- [ ] Verify linting compliance
|
||||
- [ ] Test both positive and negative scenarios
|
||||
- [ ] Document protocol-specific patterns and requirements
|
||||
@@ -1,212 +0,0 @@
|
||||
# Testing Patterns and Best Practices
|
||||
|
||||
## Testing Best Practices
|
||||
|
||||
### Avoiding Race Conditions
|
||||
|
||||
1. **Unique Test Identifiers**:
|
||||
- Never use hardcoded names in concurrent tests
|
||||
- Use `time.Now().UnixNano()` or similar for unique identifiers
|
||||
- Example: `fmt.Sprintf("test-client-%s-%d", t.Name(), time.Now().UnixNano())`
|
||||
|
||||
2. **Database Constraint Awareness**:
|
||||
- Understand unique constraints that can cause test conflicts
|
||||
- Generate unique values for all constrained fields
|
||||
- Test name isolation prevents cross-test interference
|
||||
|
||||
### Testing Patterns
|
||||
|
||||
- Use table-driven tests for comprehensive coverage
|
||||
- Mock external dependencies
|
||||
- Test both positive and negative cases
|
||||
- Use `testutil.WaitLong` for timeouts in tests
|
||||
|
||||
### Test Package Naming
|
||||
|
||||
- **Test packages**: Use `package_test` naming (e.g., `identityprovider_test`) for black-box testing
|
||||
|
||||
## RFC Protocol Testing
|
||||
|
||||
### Compliance Test Coverage
|
||||
|
||||
1. **Test all RFC-defined error codes and responses**
|
||||
2. **Validate proper HTTP status codes for different scenarios**
|
||||
3. **Test protocol-specific edge cases** (URI formats, token formats, etc.)
|
||||
|
||||
### Security Boundary Testing
|
||||
|
||||
1. **Test client isolation and privilege separation**
|
||||
2. **Verify information disclosure protections**
|
||||
3. **Test token security and proper invalidation**
|
||||
|
||||
## Test Organization
|
||||
|
||||
### Test File Structure
|
||||
|
||||
```
|
||||
coderd/
|
||||
├── oauth2.go # Implementation
|
||||
├── oauth2_test.go # Main tests
|
||||
├── oauth2_test_helpers.go # Test utilities
|
||||
└── oauth2_validation.go # Validation logic
|
||||
```
|
||||
|
||||
### Test Categories
|
||||
|
||||
1. **Unit Tests**: Test individual functions in isolation
|
||||
2. **Integration Tests**: Test API endpoints with database
|
||||
3. **End-to-End Tests**: Full workflow testing
|
||||
4. **Race Tests**: Concurrent access testing
|
||||
|
||||
## Test Commands
|
||||
|
||||
### Running Tests
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `make test` | Run all Go tests |
|
||||
| `make test RUN=TestFunctionName` | Run specific test |
|
||||
| `go test -v ./path/to/package -run TestFunctionName` | Run test with verbose output |
|
||||
| `make test-postgres` | Run tests with Postgres database |
|
||||
| `make test-race` | Run tests with Go race detector |
|
||||
| `make test-e2e` | Run end-to-end tests |
|
||||
|
||||
### Frontend Testing
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `pnpm test` | Run frontend tests |
|
||||
| `pnpm check` | Run code checks |
|
||||
|
||||
## Common Testing Issues
|
||||
|
||||
### Database-Related
|
||||
|
||||
1. **SQL type errors** - Use `sql.Null*` types for nullable fields
|
||||
2. **Race conditions in tests** - Use unique identifiers instead of hardcoded names
|
||||
|
||||
### OAuth2 Testing
|
||||
|
||||
1. **PKCE tests failing** - Verify both authorization code storage and token exchange handle PKCE fields
|
||||
2. **Resource indicator validation failing** - Ensure database stores and retrieves resource parameters correctly
|
||||
|
||||
### General Issues
|
||||
|
||||
1. **Missing newlines** - Ensure files end with newline character
|
||||
2. **Package naming errors** - Use `package_test` naming for test files
|
||||
3. **Log message formatting errors** - Use lowercase, descriptive messages without special characters
|
||||
|
||||
## Systematic Testing Approach
|
||||
|
||||
### Multi-Issue Problem Solving
|
||||
|
||||
When facing multiple failing tests or complex integration issues:
|
||||
|
||||
1. **Identify Root Causes**:
|
||||
- Run failing tests individually to isolate issues
|
||||
- Use LSP tools to trace through call chains
|
||||
- Check both compilation and runtime errors
|
||||
|
||||
2. **Fix in Logical Order**:
|
||||
- Address compilation issues first (imports, syntax)
|
||||
- Fix authorization and RBAC issues next
|
||||
- Resolve business logic and validation issues
|
||||
- Handle edge cases and race conditions last
|
||||
|
||||
3. **Verification Strategy**:
|
||||
- Test each fix individually before moving to next issue
|
||||
- Use `make lint` and `make gen` after database changes
|
||||
- Verify RFC compliance with actual specifications
|
||||
- Run comprehensive test suites before considering complete
|
||||
|
||||
## Test Data Management
|
||||
|
||||
### Unique Test Data
|
||||
|
||||
```go
|
||||
// Good: Unique identifiers prevent conflicts
|
||||
clientName := fmt.Sprintf("test-client-%s-%d", t.Name(), time.Now().UnixNano())
|
||||
|
||||
// Bad: Hardcoded names cause race conditions
|
||||
clientName := "test-client"
|
||||
```
|
||||
|
||||
### Test Cleanup
|
||||
|
||||
```go
|
||||
func TestSomething(t *testing.T) {
|
||||
// Setup
|
||||
client := coderdtest.New(t, nil)
|
||||
|
||||
// Test code here
|
||||
|
||||
// Cleanup happens automatically via t.Cleanup() in coderdtest
|
||||
}
|
||||
```
|
||||
|
||||
## Test Utilities
|
||||
|
||||
### Common Test Patterns
|
||||
|
||||
```go
|
||||
// Table-driven tests
|
||||
tests := []struct {
|
||||
name string
|
||||
input InputType
|
||||
expected OutputType
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid input",
|
||||
input: validInput,
|
||||
expected: expectedOutput,
|
||||
wantErr: false,
|
||||
},
|
||||
// ... more test cases
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := functionUnderTest(tt.input)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Test Assertions
|
||||
|
||||
```go
|
||||
// Use testify/require for assertions
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
require.NotNil(t, result)
|
||||
require.True(t, condition)
|
||||
```
|
||||
|
||||
## Performance Testing
|
||||
|
||||
### Load Testing
|
||||
|
||||
- Use `scaletest/` directory for load testing scenarios
|
||||
- Run `./scaletest/scaletest.sh` for performance testing
|
||||
|
||||
### Benchmarking
|
||||
|
||||
```go
|
||||
func BenchmarkFunction(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Function call to benchmark
|
||||
_ = functionUnderTest(input)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Run benchmarks with:
|
||||
```bash
|
||||
go test -bench=. -benchmem ./package/path
|
||||
```
|
||||
@@ -1,239 +0,0 @@
|
||||
# Troubleshooting Guide
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Database Issues
|
||||
|
||||
1. **"Audit table entry missing action"**
|
||||
- **Solution**: Update `enterprise/audit/table.go`
|
||||
- Add each new field with appropriate action (ActionTrack, ActionIgnore, ActionSecret)
|
||||
- Run `make gen` to verify no audit errors
|
||||
|
||||
2. **SQL type errors**
|
||||
- **Solution**: Use `sql.Null*` types for nullable fields
|
||||
- Set `.Valid = true` when providing values
|
||||
- Example:
|
||||
|
||||
```go
|
||||
CodeChallenge: sql.NullString{
|
||||
String: params.codeChallenge,
|
||||
Valid: params.codeChallenge != "",
|
||||
}
|
||||
```
|
||||
|
||||
### Testing Issues
|
||||
|
||||
3. **"package should be X_test"**
|
||||
- **Solution**: Use `package_test` naming for test files
|
||||
- Example: `identityprovider_test` for black-box testing
|
||||
|
||||
4. **Race conditions in tests**
|
||||
- **Solution**: Use unique identifiers instead of hardcoded names
|
||||
- Example: `fmt.Sprintf("test-client-%s-%d", t.Name(), time.Now().UnixNano())`
|
||||
- Never use hardcoded names in concurrent tests
|
||||
|
||||
5. **Missing newlines**
|
||||
- **Solution**: Ensure files end with newline character
|
||||
- Most editors can be configured to add this automatically
|
||||
|
||||
### OAuth2 Issues
|
||||
|
||||
6. **OAuth2 endpoints returning wrong error format**
|
||||
- **Solution**: Ensure OAuth2 endpoints return RFC 6749 compliant errors
|
||||
- Use standard error codes: `invalid_client`, `invalid_grant`, `invalid_request`
|
||||
- Format: `{"error": "code", "error_description": "details"}`
|
||||
|
||||
7. **Resource indicator validation failing**
|
||||
- **Solution**: Ensure database stores and retrieves resource parameters correctly
|
||||
- Check both authorization code storage and token exchange handling
|
||||
|
||||
8. **PKCE tests failing**
|
||||
- **Solution**: Verify both authorization code storage and token exchange handle PKCE fields
|
||||
- Check `CodeChallenge` and `CodeChallengeMethod` field handling
|
||||
|
||||
### RFC Compliance Issues
|
||||
|
||||
9. **RFC compliance failures**
|
||||
- **Solution**: Verify against actual RFC specifications, not assumptions
|
||||
- Use WebFetch tool to get current RFC content for compliance verification
|
||||
- Read the actual RFC specifications before implementation
|
||||
|
||||
10. **Default value mismatches**
|
||||
- **Solution**: Ensure database migrations match application code defaults
|
||||
- Example: RFC 7591 specifies `client_secret_basic` as default, not `client_secret_post`
|
||||
|
||||
### Authorization Issues
|
||||
|
||||
11. **Authorization context errors in public endpoints**
|
||||
- **Solution**: Use `dbauthz.AsSystemRestricted(ctx)` pattern
|
||||
- Example:
|
||||
|
||||
```go
|
||||
// Public endpoints needing system access
|
||||
app, err := api.Database.GetOAuth2ProviderAppByClientID(dbauthz.AsSystemRestricted(ctx), clientID)
|
||||
```
|
||||
|
||||
### Authentication Issues
|
||||
|
||||
12. **Bearer token authentication issues**
|
||||
- **Solution**: Check token extraction precedence and format validation
|
||||
- Ensure proper RFC 6750 Bearer Token Support implementation
|
||||
|
||||
13. **URI validation failures**
|
||||
- **Solution**: Support both standard schemes and custom schemes per protocol requirements
|
||||
- Native OAuth2 apps may use custom schemes
|
||||
|
||||
### General Development Issues
|
||||
|
||||
14. **Log message formatting errors**
|
||||
- **Solution**: Use lowercase, descriptive messages without special characters
|
||||
- Follow Go logging conventions
|
||||
|
||||
## Systematic Debugging Approach
|
||||
|
||||
YOU MUST ALWAYS find the root cause of any issue you are debugging
|
||||
YOU MUST NEVER fix a symptom or add a workaround instead of finding a root cause, even if it is faster.
|
||||
|
||||
### Multi-Issue Problem Solving
|
||||
|
||||
When facing multiple failing tests or complex integration issues:
|
||||
|
||||
1. **Identify Root Causes**:
|
||||
- Run failing tests individually to isolate issues
|
||||
- Use LSP tools to trace through call chains
|
||||
- Read Error Messages Carefully: Check both compilation and runtime errors
|
||||
- Reproduce Consistently: Ensure you can reliably reproduce the issue before investigating
|
||||
- Check Recent Changes: What changed that could have caused this? Git diff, recent commits, etc.
|
||||
- When You Don't Know: Say "I don't understand X" rather than pretending to know
|
||||
|
||||
2. **Fix in Logical Order**:
|
||||
- Address compilation issues first (imports, syntax)
|
||||
- Fix authorization and RBAC issues next
|
||||
- Resolve business logic and validation issues
|
||||
- Handle edge cases and race conditions last
|
||||
- IF your first fix doesn't work, STOP and re-analyze rather than adding more fixes
|
||||
|
||||
3. **Verification Strategy**:
|
||||
- Always Test each fix individually before moving to next issue
|
||||
- Verify Before Continuing: Did your test work? If not, form new hypothesis - don't add more fixes
|
||||
- Use `make lint` and `make gen` after database changes
|
||||
- Verify RFC compliance with actual specifications
|
||||
- Run comprehensive test suites before considering complete
|
||||
|
||||
## Debug Commands
|
||||
|
||||
### Useful Debug Commands
|
||||
|
||||
| Command | Purpose |
|
||||
|----------------------------------------------|---------------------------------------|
|
||||
| `make lint` | Run all linters |
|
||||
| `make gen` | Generate mocks, database queries |
|
||||
| `go test -v ./path/to/package -run TestName` | Run specific test with verbose output |
|
||||
| `go test -race ./...` | Run tests with race detector |
|
||||
|
||||
### LSP Debugging
|
||||
|
||||
#### Go LSP (Backend)
|
||||
|
||||
| Command | Purpose |
|
||||
|----------------------------------------------------|------------------------------|
|
||||
| `mcp__go-language-server__definition symbolName` | Find function definition |
|
||||
| `mcp__go-language-server__references symbolName` | Find all references |
|
||||
| `mcp__go-language-server__diagnostics filePath` | Check for compilation errors |
|
||||
| `mcp__go-language-server__hover filePath line col` | Get type information |
|
||||
|
||||
#### TypeScript LSP (Frontend)
|
||||
|
||||
| Command | Purpose |
|
||||
|----------------------------------------------------------------------------|------------------------------------|
|
||||
| `mcp__typescript-language-server__definition symbolName` | Find component/function definition |
|
||||
| `mcp__typescript-language-server__references symbolName` | Find all component/type usages |
|
||||
| `mcp__typescript-language-server__diagnostics filePath` | Check for TypeScript errors |
|
||||
| `mcp__typescript-language-server__hover filePath line col` | Get type information |
|
||||
| `mcp__typescript-language-server__rename_symbol filePath line col newName` | Rename across codebase |
|
||||
|
||||
## Common Error Messages
|
||||
|
||||
### Database Errors
|
||||
|
||||
**Error**: `pq: relation "oauth2_provider_app_codes" does not exist`
|
||||
|
||||
- **Cause**: Missing database migration
|
||||
- **Solution**: Run database migrations, check migration files
|
||||
|
||||
**Error**: `audit table entry missing action for field X`
|
||||
|
||||
- **Cause**: New field added without audit table update
|
||||
- **Solution**: Update `enterprise/audit/table.go`
|
||||
|
||||
### Go Compilation Errors
|
||||
|
||||
**Error**: `package should be identityprovider_test`
|
||||
|
||||
- **Cause**: Test package naming convention violation
|
||||
- **Solution**: Use `package_test` naming for black-box tests
|
||||
|
||||
**Error**: `cannot use X (type Y) as type Z`
|
||||
|
||||
- **Cause**: Type mismatch, often with nullable fields
|
||||
- **Solution**: Use appropriate `sql.Null*` types
|
||||
|
||||
### OAuth2 Errors
|
||||
|
||||
**Error**: `invalid_client` but client exists
|
||||
|
||||
- **Cause**: Authorization context issue
|
||||
- **Solution**: Use `dbauthz.AsSystemRestricted(ctx)` for public endpoints
|
||||
|
||||
**Error**: PKCE validation failing
|
||||
|
||||
- **Cause**: Missing PKCE fields in database operations
|
||||
- **Solution**: Ensure `CodeChallenge` and `CodeChallengeMethod` are handled
|
||||
|
||||
## Prevention Strategies
|
||||
|
||||
### Before Making Changes
|
||||
|
||||
1. **Read the relevant documentation**
|
||||
2. **Check if similar patterns exist in codebase**
|
||||
3. **Understand the authorization context requirements**
|
||||
4. **Plan database changes carefully**
|
||||
|
||||
### During Development
|
||||
|
||||
1. **Run tests frequently**: `make test`
|
||||
2. **Use LSP tools for navigation**: Avoid manual searching
|
||||
3. **Follow RFC specifications precisely**
|
||||
4. **Update audit tables when adding database fields**
|
||||
|
||||
### Before Committing
|
||||
|
||||
1. **Run full test suite**: `make test`
|
||||
2. **Check linting**: `make lint`
|
||||
3. **Test with race detector**: `make test-race`
|
||||
|
||||
## Getting Help
|
||||
|
||||
### Internal Resources
|
||||
|
||||
- Check existing similar implementations in codebase
|
||||
- Use LSP tools to understand code relationships
|
||||
- For Go code: Use `mcp__go-language-server__*` commands
|
||||
- For TypeScript/React code: Use `mcp__typescript-language-server__*` commands
|
||||
- Read related test files for expected behavior
|
||||
|
||||
### External Resources
|
||||
|
||||
- Official RFC specifications for protocol compliance
|
||||
- Go documentation for language features
|
||||
- PostgreSQL documentation for database issues
|
||||
|
||||
### Debug Information Collection
|
||||
|
||||
When reporting issues, include:
|
||||
|
||||
1. **Exact error message**
|
||||
2. **Steps to reproduce**
|
||||
3. **Relevant code snippets**
|
||||
4. **Test output (if applicable)**
|
||||
5. **Environment information** (OS, Go version, etc.)
|
||||
@@ -1,227 +0,0 @@
|
||||
# Development Workflows and Guidelines
|
||||
|
||||
## Quick Start Checklist for New Features
|
||||
|
||||
### Before Starting
|
||||
|
||||
- [ ] Run `git pull` to ensure you're on latest code
|
||||
- [ ] Check if feature touches database - you'll need migrations
|
||||
- [ ] Check if feature touches audit logs - update `enterprise/audit/table.go`
|
||||
|
||||
## Development Server
|
||||
|
||||
### Starting Development Mode
|
||||
|
||||
- **Use `./scripts/develop.sh` to start Coder in development mode**
|
||||
- This automatically builds and runs with `--dev` flag and proper access URL
|
||||
- **⚠️ Do NOT manually run `make build && ./coder server --dev` - use the script instead**
|
||||
|
||||
### Development Workflow
|
||||
|
||||
1. **Always start with the development script**: `./scripts/develop.sh`
|
||||
2. **Make changes** to your code
|
||||
3. **The script will automatically rebuild** and restart as needed
|
||||
4. **Access the development server** at the URL provided by the script
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
### Go Style
|
||||
|
||||
- Follow [Effective Go](https://go.dev/doc/effective_go) and [Go's Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments)
|
||||
- Create packages when used during implementation
|
||||
- Validate abstractions against implementations
|
||||
- **Test packages**: Use `package_test` naming (e.g., `identityprovider_test`) for black-box testing
|
||||
|
||||
### Error Handling
|
||||
|
||||
- Use descriptive error messages
|
||||
- Wrap errors with context
|
||||
- Propagate errors appropriately
|
||||
- Use proper error types
|
||||
- Pattern: `xerrors.Errorf("failed to X: %w", err)`
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
- Names MUST tell what code does, not how it's implemented or its history
|
||||
- Follow Go and TypeScript naming conventions
|
||||
- When changing code, never document the old behavior or the behavior change
|
||||
- NEVER use implementation details in names (e.g., "ZodValidator", "MCPWrapper", "JSONParser")
|
||||
- NEVER use temporal/historical context in names (e.g., "LegacyHandler", "UnifiedTool", "ImprovedInterface", "EnhancedParser")
|
||||
- NEVER use pattern names unless they add clarity (e.g., prefer "Tool" over "ToolFactory")
|
||||
- Abbreviate only when obvious
|
||||
|
||||
### Comments
|
||||
|
||||
- Document exported functions, types, and non-obvious logic
|
||||
- Follow JSDoc format for TypeScript
|
||||
- Use godoc format for Go code
|
||||
|
||||
## Database Migration Workflows
|
||||
|
||||
### Migration Guidelines
|
||||
|
||||
1. **Create migration files**:
|
||||
- Location: `coderd/database/migrations/`
|
||||
- Format: `{number}_{description}.{up|down}.sql`
|
||||
- Number must be unique and sequential
|
||||
- Always include both up and down migrations
|
||||
|
||||
2. **Use helper scripts**:
|
||||
- `./coderd/database/migrations/create_migration.sh "migration name"` - Creates new migration files
|
||||
- `./coderd/database/migrations/fix_migration_numbers.sh` - Renumbers migrations to avoid conflicts
|
||||
- `./coderd/database/migrations/create_fixture.sh "fixture name"` - Creates test fixtures for migrations
|
||||
|
||||
3. **Update database queries**:
|
||||
- **MUST DO**: Any changes to database - adding queries, modifying queries should be done in the `coderd/database/queries/*.sql` files
|
||||
- **MUST DO**: Queries are grouped in files relating to context - e.g. `prebuilds.sql`, `users.sql`, `oauth2.sql`
|
||||
- After making changes to any `coderd/database/queries/*.sql` files you must run `make gen` to generate respective ORM changes
|
||||
|
||||
4. **Handle nullable fields**:
|
||||
- Use `sql.NullString`, `sql.NullBool`, etc. for optional database fields
|
||||
- Set `.Valid = true` when providing values
|
||||
|
||||
5. **Audit table updates**:
|
||||
- If adding fields to auditable types, update `enterprise/audit/table.go`
|
||||
- Add each new field with appropriate action (ActionTrack, ActionIgnore, ActionSecret)
|
||||
- Run `make gen` to verify no audit errors
|
||||
|
||||
### Database Generation Process
|
||||
|
||||
1. Modify SQL files in `coderd/database/queries/`
|
||||
2. Run `make gen`
|
||||
3. If errors about audit table, update `enterprise/audit/table.go`
|
||||
4. Run `make gen` again
|
||||
5. Run `make lint` to catch any remaining issues
|
||||
|
||||
## API Development Workflow
|
||||
|
||||
### Adding New API Endpoints
|
||||
|
||||
1. **Define types** in `codersdk/` package
|
||||
2. **Add handler** in appropriate `coderd/` file
|
||||
3. **Register route** in `coderd/coderd.go`
|
||||
4. **Add tests** in `coderd/*_test.go` files
|
||||
5. **Update OpenAPI** by running `make gen`
|
||||
|
||||
## Testing Workflows
|
||||
|
||||
### Test Execution
|
||||
|
||||
- Run full test suite: `make test`
|
||||
- Run specific test: `make test RUN=TestFunctionName`
|
||||
- Run with Postgres: `make test-postgres`
|
||||
- Run with race detector: `make test-race`
|
||||
- Run end-to-end tests: `make test-e2e`
|
||||
|
||||
### Test Development
|
||||
|
||||
- Use table-driven tests for comprehensive coverage
|
||||
- Mock external dependencies
|
||||
- Test both positive and negative cases
|
||||
- Use `testutil.WaitLong` for timeouts in tests
|
||||
- Always use `t.Parallel()` in tests
|
||||
|
||||
## Commit Style
|
||||
|
||||
- Follow [Conventional Commits 1.0.0](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- Format: `type(scope): message`
|
||||
- Types: `feat`, `fix`, `docs`, `style`, `refactor`, `test`, `chore`
|
||||
- Keep message titles concise (~70 characters)
|
||||
- Use imperative, present tense in commit titles
|
||||
|
||||
## Code Navigation and Investigation
|
||||
|
||||
### Using LSP Tools (STRONGLY RECOMMENDED)
|
||||
|
||||
**IMPORTANT**: Always use LSP tools for code navigation and understanding. These tools provide accurate, real-time analysis of the codebase and should be your first choice for code investigation.
|
||||
|
||||
#### Go LSP Tools (for backend code)
|
||||
|
||||
1. **Find function definitions** (USE THIS FREQUENTLY):
|
||||
- `mcp__go-language-server__definition symbolName`
|
||||
- Example: `mcp__go-language-server__definition getOAuth2ProviderAppAuthorize`
|
||||
- Quickly jump to function implementations across packages
|
||||
|
||||
2. **Find symbol references** (ESSENTIAL FOR UNDERSTANDING IMPACT):
|
||||
- `mcp__go-language-server__references symbolName`
|
||||
- Locate all usages of functions, types, or variables
|
||||
- Critical for refactoring and understanding data flow
|
||||
|
||||
3. **Get symbol information**:
|
||||
- `mcp__go-language-server__hover filePath line column`
|
||||
- Get type information and documentation at specific positions
|
||||
|
||||
#### TypeScript LSP Tools (for frontend code in site/)
|
||||
|
||||
1. **Find component/function definitions** (USE THIS FREQUENTLY):
|
||||
- `mcp__typescript-language-server__definition symbolName`
|
||||
- Example: `mcp__typescript-language-server__definition LoginPage`
|
||||
- Quickly navigate to React components, hooks, and utility functions
|
||||
|
||||
2. **Find symbol references** (ESSENTIAL FOR UNDERSTANDING IMPACT):
|
||||
- `mcp__typescript-language-server__references symbolName`
|
||||
- Locate all usages of components, types, or functions
|
||||
- Critical for refactoring React components and understanding prop usage
|
||||
|
||||
3. **Get type information**:
|
||||
- `mcp__typescript-language-server__hover filePath line column`
|
||||
- Get TypeScript type information and JSDoc documentation
|
||||
|
||||
4. **Rename symbols safely**:
|
||||
- `mcp__typescript-language-server__rename_symbol filePath line column newName`
|
||||
- Rename components, props, or functions across the entire codebase
|
||||
|
||||
5. **Check for TypeScript errors**:
|
||||
- `mcp__typescript-language-server__diagnostics filePath`
|
||||
- Get compilation errors and warnings for a specific file
|
||||
|
||||
### Investigation Strategy (LSP-First Approach)
|
||||
|
||||
#### Backend Investigation (Go)
|
||||
|
||||
1. **Start with route registration** in `coderd/coderd.go` to understand API endpoints
|
||||
2. **Use Go LSP `definition` lookup** to trace from route handlers to actual implementations
|
||||
3. **Use Go LSP `references`** to understand how functions are called throughout the codebase
|
||||
4. **Follow the middleware chain** using LSP tools to understand request processing flow
|
||||
5. **Check test files** for expected behavior and error patterns
|
||||
|
||||
#### Frontend Investigation (TypeScript/React)
|
||||
|
||||
1. **Start with route definitions** in `site/src/App.tsx` or router configuration
|
||||
2. **Use TypeScript LSP `definition`** to navigate to React components and hooks
|
||||
3. **Use TypeScript LSP `references`** to find all component usages and prop drilling
|
||||
4. **Follow the component hierarchy** using LSP tools to understand data flow
|
||||
5. **Check for TypeScript errors** with `diagnostics` before making changes
|
||||
6. **Examine test files** (`.test.tsx`) for component behavior and expected props
|
||||
|
||||
## Troubleshooting Development Issues
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Development server won't start** - Use `./scripts/develop.sh` instead of manual commands
|
||||
2. **Database migration errors** - Check migration file format and use helper scripts
|
||||
3. **Audit table errors** - Update `enterprise/audit/table.go` with new fields
|
||||
4. **OAuth2 compliance issues** - Ensure RFC-compliant error responses
|
||||
|
||||
### Debug Commands
|
||||
|
||||
- Check linting: `make lint`
|
||||
- Generate code: `make gen`
|
||||
- Clean build: `make clean`
|
||||
|
||||
## Development Environment Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Go (version specified in go.mod)
|
||||
- Node.js and pnpm for frontend development
|
||||
- PostgreSQL for database testing
|
||||
- Docker for containerized testing
|
||||
|
||||
### First Time Setup
|
||||
|
||||
1. Clone the repository
|
||||
2. Run `./scripts/develop.sh` to start development server
|
||||
3. Access the development URL provided
|
||||
4. Create admin user as prompted
|
||||
5. Begin development
|
||||
@@ -1,133 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Claude Code hook script for file formatting
|
||||
# This script integrates with the centralized Makefile formatting targets
|
||||
# and supports the Claude Code hooks system for automatic file formatting.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# A variable to memoize the command for canonicalizing paths.
|
||||
_CANONICALIZE_CMD=""
|
||||
|
||||
# canonicalize_path resolves a path to its absolute, canonical form.
|
||||
# It tries 'realpath' and 'readlink -f' in order.
|
||||
# The chosen command is memoized to avoid repeated checks.
|
||||
# If none of these are available, it returns an empty string.
|
||||
canonicalize_path() {
|
||||
local path_to_resolve="$1"
|
||||
|
||||
# If we haven't determined a command yet, find one.
|
||||
if [[ -z "$_CANONICALIZE_CMD" ]]; then
|
||||
if command -v realpath >/dev/null 2>&1; then
|
||||
_CANONICALIZE_CMD="realpath"
|
||||
elif command -v readlink >/dev/null 2>&1 && readlink -f . >/dev/null 2>&1; then
|
||||
_CANONICALIZE_CMD="readlink"
|
||||
else
|
||||
# No command found, so we can't resolve.
|
||||
# We set a "none" value to prevent re-checking.
|
||||
_CANONICALIZE_CMD="none"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Now, execute the command.
|
||||
case "$_CANONICALIZE_CMD" in
|
||||
realpath)
|
||||
realpath "$path_to_resolve" 2>/dev/null
|
||||
;;
|
||||
readlink)
|
||||
readlink -f "$path_to_resolve" 2>/dev/null
|
||||
;;
|
||||
*)
|
||||
# This handles the "none" case or any unexpected error.
|
||||
echo ""
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Read JSON input from stdin
|
||||
input=$(cat)
|
||||
|
||||
# Extract the file path from the JSON input
|
||||
# Expected format: {"tool_input": {"file_path": "/absolute/path/to/file"}} or {"tool_response": {"filePath": "/absolute/path/to/file"}}
|
||||
file_path=$(echo "$input" | jq -r '.tool_input.file_path // .tool_response.filePath // empty')
|
||||
|
||||
# Secure path canonicalization to prevent path traversal attacks
|
||||
# Resolve repo root to an absolute, canonical path.
|
||||
repo_root_raw="$(cd "$(dirname "$0")/../.." && pwd)"
|
||||
repo_root="$(canonicalize_path "$repo_root_raw")"
|
||||
if [[ -z "$repo_root" ]]; then
|
||||
# Fallback if canonicalization fails
|
||||
repo_root="$repo_root_raw"
|
||||
fi
|
||||
|
||||
# Resolve the input path to an absolute path
|
||||
if [[ "$file_path" = /* ]]; then
|
||||
# Already absolute
|
||||
abs_file_path="$file_path"
|
||||
else
|
||||
# Make relative paths absolute from repo root
|
||||
abs_file_path="$repo_root/$file_path"
|
||||
fi
|
||||
|
||||
# Canonicalize the path (resolve symlinks and ".." segments)
|
||||
canonical_file_path="$(canonicalize_path "$abs_file_path")"
|
||||
|
||||
# Check if canonicalization failed or if the resolved path is outside the repo
|
||||
if [[ -z "$canonical_file_path" ]] || { [[ "$canonical_file_path" != "$repo_root" ]] && [[ "$canonical_file_path" != "$repo_root"/* ]]; }; then
|
||||
echo "Error: File path is outside repository or invalid: $file_path" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Handle the case where the file path is the repository root itself.
|
||||
if [[ "$canonical_file_path" == "$repo_root" ]]; then
|
||||
echo "Warning: Formatting the repository root is not a supported operation. Skipping." >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Convert back to relative path from repo root for consistency
|
||||
file_path="${canonical_file_path#"$repo_root"/}"
|
||||
|
||||
if [[ -z "$file_path" ]]; then
|
||||
echo "Error: No file path provided in input" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if file exists
|
||||
if [[ ! -f "$file_path" ]]; then
|
||||
echo "Error: File does not exist: $file_path" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the file extension to determine the appropriate formatter
|
||||
file_ext="${file_path##*.}"
|
||||
|
||||
# Change to the project root directory (where the Makefile is located)
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
# Call the appropriate Makefile target based on file extension
|
||||
case "$file_ext" in
|
||||
go)
|
||||
make fmt/go FILE="$file_path"
|
||||
echo "✓ Formatted Go file: $file_path"
|
||||
;;
|
||||
js | jsx | ts | tsx)
|
||||
make fmt/ts FILE="$file_path"
|
||||
echo "✓ Formatted TypeScript/JavaScript file: $file_path"
|
||||
;;
|
||||
tf | tfvars)
|
||||
make fmt/terraform FILE="$file_path"
|
||||
echo "✓ Formatted Terraform file: $file_path"
|
||||
;;
|
||||
sh)
|
||||
make fmt/shfmt FILE="$file_path"
|
||||
echo "✓ Formatted shell script: $file_path"
|
||||
;;
|
||||
md)
|
||||
make fmt/markdown FILE="$file_path"
|
||||
echo "✓ Formatted Markdown file: $file_path"
|
||||
;;
|
||||
*)
|
||||
echo "No formatter available for file extension: $file_ext"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"hooks": {
|
||||
"PostToolUse": [
|
||||
{
|
||||
"matcher": "Edit|Write|MultiEdit",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": ".claude/scripts/format.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
+13
-15
@@ -4,7 +4,7 @@ This project is called "Coder" - an application for managing remote development
|
||||
|
||||
Coder provides a platform for creating, managing, and using remote development environments (also known as Cloud Development Environments or CDEs). It leverages Terraform to define and provision these environments, which are referred to as "workspaces" within the project. The system is designed to be extensible, secure, and provide developers with a seamless remote development experience.
|
||||
|
||||
## Core Architecture
|
||||
# Core Architecture
|
||||
|
||||
The heart of Coder is a control plane that orchestrates the creation and management of workspaces. This control plane interacts with separate Provisioner processes over gRPC to handle workspace builds. The Provisioners consume workspace definitions and use Terraform to create the actual infrastructure.
|
||||
|
||||
@@ -12,17 +12,17 @@ The CLI package serves dual purposes - it can be used to launch the control plan
|
||||
|
||||
The database layer uses PostgreSQL with SQLC for generating type-safe database code. Database migrations are carefully managed to ensure both forward and backward compatibility through paired `.up.sql` and `.down.sql` files.
|
||||
|
||||
## API Design
|
||||
# API Design
|
||||
|
||||
Coder's API architecture combines REST and gRPC approaches. The REST API is defined in `coderd/coderd.go` and uses Chi for HTTP routing. This provides the primary interface for the frontend and external integrations.
|
||||
|
||||
Internal communication with Provisioners occurs over gRPC, with service definitions maintained in `.proto` files. This separation allows for efficient binary communication with the components responsible for infrastructure management while providing a standard REST interface for human-facing applications.
|
||||
|
||||
## Network Architecture
|
||||
# Network Architecture
|
||||
|
||||
Coder implements a secure networking layer based on Tailscale's Wireguard implementation. The `tailnet` package provides connectivity between workspace agents and clients through DERP (Designated Encrypted Relay for Packets) servers when direct connections aren't possible. This creates a secure overlay network allowing access to workspaces regardless of network topology, firewalls, or NAT configurations.
|
||||
|
||||
### Tailnet and DERP System
|
||||
## Tailnet and DERP System
|
||||
|
||||
The networking system has three key components:
|
||||
|
||||
@@ -35,7 +35,7 @@ The networking system has three key components:
|
||||
|
||||
3. **Direct Connections**: When possible, the system establishes peer-to-peer connections between clients and workspaces using STUN for NAT traversal. This requires both endpoints to send UDP traffic on ephemeral ports.
|
||||
|
||||
### Workspace Proxies
|
||||
## Workspace Proxies
|
||||
|
||||
Workspace proxies (in the Enterprise edition) provide regional relay points for browser-based connections, reducing latency for geo-distributed teams. Key characteristics:
|
||||
|
||||
@@ -45,10 +45,9 @@ Workspace proxies (in the Enterprise edition) provide regional relay points for
|
||||
- Managed through the `coder wsproxy` commands
|
||||
- Implemented primarily in the `enterprise/wsproxy/` package
|
||||
|
||||
## Agent System
|
||||
# Agent System
|
||||
|
||||
The workspace agent runs within each provisioned workspace and provides core functionality including:
|
||||
|
||||
- SSH access to workspaces via the `agentssh` package
|
||||
- Port forwarding
|
||||
- Terminal connectivity via the `pty` package for pseudo-terminal support
|
||||
@@ -58,7 +57,7 @@ The workspace agent runs within each provisioned workspace and provides core fun
|
||||
|
||||
Agents communicate with the control plane using the tailnet system and authenticate using secure tokens.
|
||||
|
||||
## Workspace Applications
|
||||
# Workspace Applications
|
||||
|
||||
Workspace applications (or "apps") provide browser-based access to services running within workspaces. The system supports:
|
||||
|
||||
@@ -70,17 +69,17 @@ Workspace applications (or "apps") provide browser-based access to services runn
|
||||
|
||||
The implementation is primarily in the `coderd/workspaceapps/` directory with components for URL generation, proxying connections, and managing application state.
|
||||
|
||||
## Implementation Details
|
||||
# Implementation Details
|
||||
|
||||
The project structure separates frontend and backend concerns. React components and pages are organized in the `site/src/` directory, with Jest used for testing. The backend is primarily written in Go, with a strong emphasis on error handling patterns and test coverage.
|
||||
|
||||
Database interactions are carefully managed through migrations in `coderd/database/migrations/` and queries in `coderd/database/queries/`. All new queries require proper database authorization (dbauthz) implementation to ensure that only users with appropriate permissions can access specific resources.
|
||||
|
||||
## Authorization System
|
||||
# Authorization System
|
||||
|
||||
The database authorization (dbauthz) system enforces fine-grained access control across all database operations. It uses role-based access control (RBAC) to validate user permissions before executing database operations. The `dbauthz` package wraps the database store and performs authorization checks before returning data. All database operations must pass through this layer to ensure security.
|
||||
|
||||
## Testing Framework
|
||||
# Testing Framework
|
||||
|
||||
The codebase has a comprehensive testing approach with several key components:
|
||||
|
||||
@@ -92,7 +91,7 @@ The codebase has a comprehensive testing approach with several key components:
|
||||
|
||||
4. **Enterprise Testing**: Enterprise features have dedicated test utilities in the `coderdenttest` package.
|
||||
|
||||
## Open Source and Enterprise Components
|
||||
# Open Source and Enterprise Components
|
||||
|
||||
The repository contains both open source and enterprise components:
|
||||
|
||||
@@ -101,10 +100,9 @@ The repository contains both open source and enterprise components:
|
||||
- The boundary between open source and enterprise is managed through a licensing system
|
||||
- The same core codebase supports both editions, with enterprise features conditionally enabled
|
||||
|
||||
## Development Philosophy
|
||||
# Development Philosophy
|
||||
|
||||
Coder emphasizes clear error handling, with specific patterns required:
|
||||
|
||||
- Concise error messages that avoid phrases like "failed to"
|
||||
- Wrapping errors with `%w` to maintain error chains
|
||||
- Using sentinel errors with the "err" prefix (e.g., `errNotFound`)
|
||||
@@ -113,7 +111,7 @@ All tests should run in parallel using `t.Parallel()` to ensure efficient testin
|
||||
|
||||
Git contributions follow a standard format with commit messages structured as `type: <message>`, where type is one of `feat`, `fix`, or `chore`.
|
||||
|
||||
## Development Workflow
|
||||
# Development Workflow
|
||||
|
||||
Development can be initiated using `scripts/develop.sh` to start the application after making changes. Database schema updates should be performed through the migration system using `create_migration.sh <name>` to generate migration files, with each `.up.sql` migration paired with a corresponding `.down.sql` that properly reverts all changes.
|
||||
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
{
|
||||
"name": "Development environments on your infrastructure",
|
||||
"image": "codercom/oss-dogfood:latest",
|
||||
|
||||
"features": {
|
||||
// See all possible options here https://github.com/devcontainers/features/tree/main/src/docker-in-docker
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
||||
"moby": "false"
|
||||
},
|
||||
"ghcr.io/coder/devcontainer-features/code-server:1": {
|
||||
"auth": "none",
|
||||
"port": 13337
|
||||
},
|
||||
"./filebrowser": {
|
||||
"folder": "${containerWorkspaceFolder}"
|
||||
}
|
||||
},
|
||||
// SYS_PTRACE to enable go debugging
|
||||
@@ -18,65 +13,6 @@
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": ["biomejs.biome"]
|
||||
},
|
||||
"coder": {
|
||||
"apps": [
|
||||
{
|
||||
"slug": "cursor",
|
||||
"displayName": "Cursor Desktop",
|
||||
"url": "cursor://coder.coder-remote/openDevContainer?owner=${localEnv:CODER_WORKSPACE_OWNER_NAME}&workspace=${localEnv:CODER_WORKSPACE_NAME}&agent=${localEnv:CODER_WORKSPACE_PARENT_AGENT_NAME}&url=${localEnv:CODER_URL}&token=$SESSION_TOKEN&devContainerName=${localEnv:CONTAINER_ID}&devContainerFolder=${containerWorkspaceFolder}&localWorkspaceFolder=${localWorkspaceFolder}",
|
||||
"external": true,
|
||||
"icon": "/icon/cursor.svg",
|
||||
"order": 1
|
||||
},
|
||||
{
|
||||
"slug": "windsurf",
|
||||
"displayName": "Windsurf Editor",
|
||||
"url": "windsurf://coder.coder-remote/openDevContainer?owner=${localEnv:CODER_WORKSPACE_OWNER_NAME}&workspace=${localEnv:CODER_WORKSPACE_NAME}&agent=${localEnv:CODER_WORKSPACE_PARENT_AGENT_NAME}&url=${localEnv:CODER_URL}&token=$SESSION_TOKEN&devContainerName=${localEnv:CONTAINER_ID}&devContainerFolder=${containerWorkspaceFolder}&localWorkspaceFolder=${localWorkspaceFolder}",
|
||||
"external": true,
|
||||
"icon": "/icon/windsurf.svg",
|
||||
"order": 4
|
||||
},
|
||||
{
|
||||
"slug": "zed",
|
||||
"displayName": "Zed Editor",
|
||||
"url": "zed://ssh/${localEnv:CODER_WORKSPACE_AGENT_NAME}.${localEnv:CODER_WORKSPACE_NAME}.${localEnv:CODER_WORKSPACE_OWNER_NAME}.coder${containerWorkspaceFolder}",
|
||||
"external": true,
|
||||
"icon": "/icon/zed.svg",
|
||||
"order": 5
|
||||
},
|
||||
// Reproduce `code-server` app here from the code-server
|
||||
// feature so that we can set the correct folder and order.
|
||||
// Currently, the order cannot be specified via option because
|
||||
// we parse it as a number whereas variable interpolation
|
||||
// results in a string. Additionally we set health check which
|
||||
// is not yet set in the feature.
|
||||
{
|
||||
"slug": "code-server",
|
||||
"displayName": "code-server",
|
||||
"url": "http://${localEnv:FEATURE_CODE_SERVER_OPTION_HOST:127.0.0.1}:${localEnv:FEATURE_CODE_SERVER_OPTION_PORT:8080}/?folder=${containerWorkspaceFolder}",
|
||||
"openIn": "${localEnv:FEATURE_CODE_SERVER_OPTION_APPOPENIN:slim-window}",
|
||||
"share": "${localEnv:FEATURE_CODE_SERVER_OPTION_APPSHARE:owner}",
|
||||
"icon": "/icon/code.svg",
|
||||
"group": "${localEnv:FEATURE_CODE_SERVER_OPTION_APPGROUP:Web Editors}",
|
||||
"order": 3,
|
||||
"healthCheck": {
|
||||
"url": "http://${localEnv:FEATURE_CODE_SERVER_OPTION_HOST:127.0.0.1}:${localEnv:FEATURE_CODE_SERVER_OPTION_PORT:8080}/healthz",
|
||||
"interval": 5,
|
||||
"threshold": 2
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"mounts": [
|
||||
// Add a volume for the Coder home directory to persist shell history,
|
||||
// and speed up dotfiles init and/or personalization.
|
||||
"source=coder-coder-devcontainer-home,target=/home/coder,type=volume",
|
||||
// Mount the entire home because conditional mounts are not supported.
|
||||
// See: https://github.com/devcontainers/spec/issues/132
|
||||
"source=${localEnv:HOME},target=/mnt/home/coder,type=bind,readonly"
|
||||
],
|
||||
"postCreateCommand": ["./.devcontainer/scripts/post_create.sh"],
|
||||
"postStartCommand": ["./.devcontainer/scripts/post_start.sh"]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
{
|
||||
"id": "filebrowser",
|
||||
"version": "0.0.1",
|
||||
"name": "File Browser",
|
||||
"description": "A web-based file browser for your development container",
|
||||
"options": {
|
||||
"port": {
|
||||
"type": "string",
|
||||
"default": "13339",
|
||||
"description": "The port to run filebrowser on"
|
||||
},
|
||||
"folder": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "The root directory for filebrowser to serve"
|
||||
},
|
||||
"baseUrl": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "The base URL for filebrowser (e.g., /filebrowser)"
|
||||
}
|
||||
},
|
||||
"entrypoint": "/usr/local/bin/filebrowser-entrypoint",
|
||||
"dependsOn": {
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {}
|
||||
},
|
||||
"customizations": {
|
||||
"coder": {
|
||||
"apps": [
|
||||
{
|
||||
"slug": "filebrowser",
|
||||
"displayName": "File Browser",
|
||||
"url": "http://localhost:${localEnv:FEATURE_FILEBROWSER_OPTION_PORT:13339}",
|
||||
"icon": "/icon/filebrowser.svg",
|
||||
"order": 3,
|
||||
"subdomain": true,
|
||||
"healthcheck": {
|
||||
"url": "http://localhost:${localEnv:FEATURE_FILEBROWSER_OPTION_PORT:13339}/health",
|
||||
"interval": 5,
|
||||
"threshold": 2
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
BOLD='\033[0;1m'
|
||||
|
||||
printf "%sInstalling filebrowser\n\n" "${BOLD}"
|
||||
|
||||
# Check if filebrowser is installed.
|
||||
if ! command -v filebrowser &>/dev/null; then
|
||||
VERSION="v2.42.1"
|
||||
EXPECTED_HASH="7d83c0f077df10a8ec9bfd9bf6e745da5d172c3c768a322b0e50583a6bc1d3cc"
|
||||
|
||||
curl -fsSL "https://github.com/filebrowser/filebrowser/releases/download/${VERSION}/linux-amd64-filebrowser.tar.gz" -o /tmp/filebrowser.tar.gz
|
||||
echo "${EXPECTED_HASH} /tmp/filebrowser.tar.gz" | sha256sum -c
|
||||
tar -xzf /tmp/filebrowser.tar.gz -C /tmp
|
||||
sudo mv /tmp/filebrowser /usr/local/bin/
|
||||
sudo chmod +x /usr/local/bin/filebrowser
|
||||
rm /tmp/filebrowser.tar.gz
|
||||
fi
|
||||
|
||||
# Create entrypoint.
|
||||
cat >/usr/local/bin/filebrowser-entrypoint <<EOF
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PORT="${PORT}"
|
||||
FOLDER="${FOLDER:-}"
|
||||
FOLDER="\${FOLDER:-\$(pwd)}"
|
||||
BASEURL="${BASEURL:-}"
|
||||
LOG_PATH=/tmp/filebrowser.log
|
||||
export FB_DATABASE="\${HOME}/.filebrowser.db"
|
||||
|
||||
printf "🛠️ Configuring filebrowser\n\n"
|
||||
|
||||
# Check if filebrowser db exists.
|
||||
if [[ ! -f "\${FB_DATABASE}" ]]; then
|
||||
filebrowser config init >>\${LOG_PATH} 2>&1
|
||||
filebrowser users add admin "" --perm.admin=true --viewMode=mosaic >>\${LOG_PATH} 2>&1
|
||||
fi
|
||||
|
||||
filebrowser config set --baseurl=\${BASEURL} --port=\${PORT} --auth.method=noauth --root=\${FOLDER} >>\${LOG_PATH} 2>&1
|
||||
|
||||
printf "👷 Starting filebrowser...\n\n"
|
||||
|
||||
printf "📂 Serving \${FOLDER} at http://localhost:\${PORT}\n\n"
|
||||
|
||||
filebrowser >>\${LOG_PATH} 2>&1 &
|
||||
|
||||
printf "📝 Logs at \${LOG_PATH}\n\n"
|
||||
EOF
|
||||
|
||||
chmod +x /usr/local/bin/filebrowser-entrypoint
|
||||
|
||||
printf "🥳 Installation complete!\n\n"
|
||||
@@ -1,67 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
install_devcontainer_cli() {
|
||||
set -e
|
||||
echo "🔧 Installing DevContainer CLI..."
|
||||
cd "$(dirname "$0")/../tools/devcontainer-cli"
|
||||
npm ci --omit=dev
|
||||
ln -sf "$(pwd)/node_modules/.bin/devcontainer" "$(npm config get prefix)/bin/devcontainer"
|
||||
}
|
||||
|
||||
install_ssh_config() {
|
||||
echo "🔑 Installing SSH configuration..."
|
||||
if [ -d /mnt/home/coder/.ssh ]; then
|
||||
rsync -a /mnt/home/coder/.ssh/ ~/.ssh/
|
||||
chmod 0700 ~/.ssh
|
||||
else
|
||||
echo "⚠️ SSH directory not found."
|
||||
fi
|
||||
}
|
||||
|
||||
install_git_config() {
|
||||
echo "📂 Installing Git configuration..."
|
||||
if [ -f /mnt/home/coder/git/config ]; then
|
||||
rsync -a /mnt/home/coder/git/ ~/.config/git/
|
||||
elif [ -d /mnt/home/coder/.gitconfig ]; then
|
||||
rsync -a /mnt/home/coder/.gitconfig ~/.gitconfig
|
||||
else
|
||||
echo "⚠️ Git configuration directory not found."
|
||||
fi
|
||||
}
|
||||
|
||||
install_dotfiles() {
|
||||
if [ ! -d /mnt/home/coder/.config/coderv2/dotfiles ]; then
|
||||
echo "⚠️ Dotfiles directory not found."
|
||||
return
|
||||
fi
|
||||
|
||||
cd /mnt/home/coder/.config/coderv2/dotfiles || return
|
||||
for script in install.sh install bootstrap.sh bootstrap script/bootstrap setup.sh setup script/setup; do
|
||||
if [ -x $script ]; then
|
||||
echo "📦 Installing dotfiles..."
|
||||
./$script || {
|
||||
echo "❌ Error running $script. Please check the script for issues."
|
||||
return
|
||||
}
|
||||
echo "✅ Dotfiles installed successfully."
|
||||
return
|
||||
fi
|
||||
done
|
||||
echo "⚠️ No install script found in dotfiles directory."
|
||||
}
|
||||
|
||||
personalize() {
|
||||
# Allow script to continue as Coder dogfood utilizes a hack to
|
||||
# synchronize startup script execution.
|
||||
touch /tmp/.coder-startup-script.done
|
||||
|
||||
if [ -x /mnt/home/coder/personalize ]; then
|
||||
echo "🎨 Personalizing environment..."
|
||||
/mnt/home/coder/personalize
|
||||
fi
|
||||
}
|
||||
|
||||
install_devcontainer_cli
|
||||
install_ssh_config
|
||||
install_dotfiles
|
||||
personalize
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Start Docker service if not already running.
|
||||
sudo service docker start
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"name": "devcontainer-cli",
|
||||
"version": "1.0.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "devcontainer-cli",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@devcontainers/cli": "^0.80.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@devcontainers/cli": {
|
||||
"version": "0.80.0",
|
||||
"resolved": "https://registry.npmjs.org/@devcontainers/cli/-/cli-0.80.0.tgz",
|
||||
"integrity": "sha512-w2EaxgjyeVGyzfA/KUEZBhyXqu/5PyWNXcnrXsZOBrt3aN2zyGiHrXoG54TF6K0b5DSCF01Rt5fnIyrCeFzFKw==",
|
||||
"bin": {
|
||||
"devcontainer": "devcontainer.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^16.13.0 || >=18.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"name": "devcontainer-cli",
|
||||
"private": true,
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@devcontainers/cli": "^0.80.0"
|
||||
}
|
||||
}
|
||||
+1
-13
@@ -7,22 +7,10 @@ trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
indent_style = tab
|
||||
|
||||
[*.{yaml,yml,tf,tftpl,tfvars,nix}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.proto]
|
||||
[*.{yaml,yml,tf,tfvars,nix}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[coderd/database/dump.sql]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[coderd/database/queries/*.sql]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
|
||||
[coderd/database/migrations/*.sql]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
|
||||
+1
-3
@@ -15,8 +15,6 @@ provisionersdk/proto/*.go linguist-generated=true
|
||||
*.tfstate.json linguist-generated=true
|
||||
*.tfstate.dot linguist-generated=true
|
||||
*.tfplan.dot linguist-generated=true
|
||||
site/e2e/google/protobuf/timestampGenerated.ts
|
||||
site/e2e/provisionerGenerated.ts linguist-generated=true
|
||||
site/src/api/countriesGenerated.tsx linguist-generated=true
|
||||
site/src/api/rbacresourcesGenerated.tsx linguist-generated=true
|
||||
site/src/api/typesGenerated.ts linguist-generated=true
|
||||
site/src/pages/SetupPage/countries.tsx linguist-generated=true
|
||||
|
||||
@@ -24,10 +24,5 @@ ignorePatterns:
|
||||
- pattern: "mutagen.io"
|
||||
- pattern: "docs.github.com"
|
||||
- pattern: "claude.ai"
|
||||
- pattern: "splunk.com"
|
||||
- pattern: "stackoverflow.com/questions"
|
||||
- pattern: "developer.hashicorp.com/terraform/language"
|
||||
- pattern: "platform.openai.com"
|
||||
- pattern: "api.openai.com"
|
||||
aliveStatusCodes:
|
||||
- 200
|
||||
|
||||
@@ -2,7 +2,6 @@ name: "🐞 Bug"
|
||||
description: "File a bug report."
|
||||
title: "bug: "
|
||||
labels: ["needs-triage"]
|
||||
type: "Bug"
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: existing_issues
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
name: "Download Embedded Postgres Cache"
|
||||
description: |
|
||||
Downloads the embedded postgres cache and outputs today's cache key.
|
||||
A PR job can use a cache if it was created by its base branch, its current
|
||||
branch, or the default branch.
|
||||
https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
|
||||
outputs:
|
||||
cache-key:
|
||||
description: "Today's cache key"
|
||||
value: ${{ steps.vars.outputs.cache-key }}
|
||||
inputs:
|
||||
key-prefix:
|
||||
description: "Prefix for the cache key"
|
||||
required: true
|
||||
cache-path:
|
||||
description: "Path to the cache directory"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Get date values and cache key
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
export YEAR_MONTH=$(date +'%Y-%m')
|
||||
export PREV_YEAR_MONTH=$(date -d 'last month' +'%Y-%m')
|
||||
export DAY=$(date +'%d')
|
||||
echo "year-month=$YEAR_MONTH" >> "$GITHUB_OUTPUT"
|
||||
echo "prev-year-month=$PREV_YEAR_MONTH" >> "$GITHUB_OUTPUT"
|
||||
echo "cache-key=${INPUTS_KEY_PREFIX}-${YEAR_MONTH}-${DAY}" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
INPUTS_KEY_PREFIX: ${{ inputs.key-prefix }}
|
||||
|
||||
# By default, depot keeps caches for 14 days. This is plenty for embedded
|
||||
# postgres, which changes infrequently.
|
||||
# https://depot.dev/docs/github-actions/overview#cache-retention-policy
|
||||
- name: Download embedded Postgres cache
|
||||
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: ${{ inputs.cache-path }}
|
||||
key: ${{ steps.vars.outputs.cache-key }}
|
||||
# > If there are multiple partial matches for a restore key, the action returns the most recently created cache.
|
||||
# https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/caching-dependencies-to-speed-up-workflows#matching-a-cache-key
|
||||
# The second restore key allows non-main branches to use the cache from the previous month.
|
||||
# This prevents PRs from rebuilding the cache on the first day of the month.
|
||||
# It also makes sure that once a month, the cache is fully reset.
|
||||
restore-keys: |
|
||||
${{ inputs.key-prefix }}-${{ steps.vars.outputs.year-month }}-
|
||||
${{ github.ref != 'refs/heads/main' && format('{0}-{1}-', inputs.key-prefix, steps.vars.outputs.prev-year-month) || '' }}
|
||||
@@ -1,18 +0,0 @@
|
||||
name: "Upload Embedded Postgres Cache"
|
||||
description: Uploads the embedded Postgres cache. This only runs on the main branch.
|
||||
inputs:
|
||||
cache-key:
|
||||
description: "Cache key"
|
||||
required: true
|
||||
cache-path:
|
||||
description: "Path to the cache directory"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Upload Embedded Postgres cache
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
with:
|
||||
path: ${{ inputs.cache-path }}
|
||||
key: ${{ inputs.cache-key }}
|
||||
@@ -1,33 +0,0 @@
|
||||
name: "Setup Embedded Postgres Cache Paths"
|
||||
description: Sets up a path for cached embedded postgres binaries.
|
||||
outputs:
|
||||
embedded-pg-cache:
|
||||
description: "Value of EMBEDDED_PG_CACHE_DIR"
|
||||
value: ${{ steps.paths.outputs.embedded-pg-cache }}
|
||||
cached-dirs:
|
||||
description: "directories that should be cached between CI runs"
|
||||
value: ${{ steps.paths.outputs.cached-dirs }}
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Override Go paths
|
||||
id: paths
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
|
||||
with:
|
||||
script: |
|
||||
const path = require('path');
|
||||
|
||||
// RUNNER_TEMP should be backed by a RAM disk on Windows if
|
||||
// coder/setup-ramdisk-action was used
|
||||
const runnerTemp = process.env.RUNNER_TEMP;
|
||||
const embeddedPgCacheDir = path.join(runnerTemp, 'embedded-pg-cache');
|
||||
core.exportVariable('EMBEDDED_PG_CACHE_DIR', embeddedPgCacheDir);
|
||||
core.setOutput('embedded-pg-cache', embeddedPgCacheDir);
|
||||
const cachedDirs = `${embeddedPgCacheDir}`;
|
||||
core.setOutput('cached-dirs', cachedDirs);
|
||||
|
||||
- name: Create directories
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
mkdir -p "$EMBEDDED_PG_CACHE_DIR"
|
||||
@@ -1,57 +0,0 @@
|
||||
name: "Setup Go Paths"
|
||||
description: Overrides Go paths like GOCACHE and GOMODCACHE to use temporary directories.
|
||||
outputs:
|
||||
gocache:
|
||||
description: "Value of GOCACHE"
|
||||
value: ${{ steps.paths.outputs.gocache }}
|
||||
gomodcache:
|
||||
description: "Value of GOMODCACHE"
|
||||
value: ${{ steps.paths.outputs.gomodcache }}
|
||||
gopath:
|
||||
description: "Value of GOPATH"
|
||||
value: ${{ steps.paths.outputs.gopath }}
|
||||
gotmp:
|
||||
description: "Value of GOTMPDIR"
|
||||
value: ${{ steps.paths.outputs.gotmp }}
|
||||
cached-dirs:
|
||||
description: "Go directories that should be cached between CI runs"
|
||||
value: ${{ steps.paths.outputs.cached-dirs }}
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Override Go paths
|
||||
id: paths
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
|
||||
with:
|
||||
script: |
|
||||
const path = require('path');
|
||||
|
||||
// RUNNER_TEMP should be backed by a RAM disk on Windows if
|
||||
// coder/setup-ramdisk-action was used
|
||||
const runnerTemp = process.env.RUNNER_TEMP;
|
||||
const gocacheDir = path.join(runnerTemp, 'go-cache');
|
||||
const gomodcacheDir = path.join(runnerTemp, 'go-mod-cache');
|
||||
const gopathDir = path.join(runnerTemp, 'go-path');
|
||||
const gotmpDir = path.join(runnerTemp, 'go-tmp');
|
||||
|
||||
core.exportVariable('GOCACHE', gocacheDir);
|
||||
core.exportVariable('GOMODCACHE', gomodcacheDir);
|
||||
core.exportVariable('GOPATH', gopathDir);
|
||||
core.exportVariable('GOTMPDIR', gotmpDir);
|
||||
|
||||
core.setOutput('gocache', gocacheDir);
|
||||
core.setOutput('gomodcache', gomodcacheDir);
|
||||
core.setOutput('gopath', gopathDir);
|
||||
core.setOutput('gotmp', gotmpDir);
|
||||
|
||||
const cachedDirs = `${gocacheDir}\n${gomodcacheDir}`;
|
||||
core.setOutput('cached-dirs', cachedDirs);
|
||||
|
||||
- name: Create directories
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
mkdir -p "$GOCACHE"
|
||||
mkdir -p "$GOMODCACHE"
|
||||
mkdir -p "$GOPATH"
|
||||
mkdir -p "$GOTMPDIR"
|
||||
@@ -4,29 +4,18 @@ description: |
|
||||
inputs:
|
||||
version:
|
||||
description: "The Go version to use."
|
||||
default: "1.24.10"
|
||||
use-preinstalled-go:
|
||||
description: "Whether to use preinstalled Go."
|
||||
default: "false"
|
||||
use-cache:
|
||||
description: "Whether to use the cache."
|
||||
default: "true"
|
||||
default: "1.24.2"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
go-version: ${{ inputs.use-preinstalled-go == 'false' && inputs.version || '' }}
|
||||
cache: ${{ inputs.use-cache }}
|
||||
go-version: ${{ inputs.version }}
|
||||
|
||||
- name: Install gotestsum
|
||||
shell: bash
|
||||
run: go install gotest.tools/gotestsum@0d9599e513d70e5792bb9334869f82f6e8b53d4d # main as of 2025-05-15
|
||||
|
||||
- name: Install mtimehash
|
||||
shell: bash
|
||||
run: go install github.com/slsyy/mtimehash/cmd/mtimehash@a6b5da4ed2c4a40e7b805534b004e9fde7b53ce0 # v1.0.0
|
||||
run: go install gotest.tools/gotestsum@latest
|
||||
|
||||
# It isn't necessary that we ever do this, but it helps
|
||||
# separate the "setup" from the "run" times.
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
name: "Setup ImDisk"
|
||||
if: runner.os == 'Windows'
|
||||
description: |
|
||||
Sets up the ImDisk toolkit for Windows and creates a RAM disk on drive R:.
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Download ImDisk
|
||||
if: runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir imdisk
|
||||
cd imdisk
|
||||
curl -L -o files.cab https://github.com/coder/imdisk-artifacts/raw/92a17839ebc0ee3e69be019f66b3e9b5d2de4482/files.cab
|
||||
curl -L -o install.bat https://github.com/coder/imdisk-artifacts/raw/92a17839ebc0ee3e69be019f66b3e9b5d2de4482/install.bat
|
||||
cd ..
|
||||
|
||||
- name: Install ImDisk
|
||||
shell: cmd
|
||||
run: |
|
||||
cd imdisk
|
||||
install.bat /silent
|
||||
|
||||
- name: Create RAM Disk
|
||||
shell: cmd
|
||||
run: |
|
||||
imdisk -a -s 4096M -m R: -p "/fs:ntfs /q /y"
|
||||
@@ -16,7 +16,7 @@ runs:
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
|
||||
with:
|
||||
node-version: 22.19.0
|
||||
node-version: 20.16.0
|
||||
# See https://github.com/actions/setup-node#caching-global-packages-data
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: ${{ inputs.directory }}/pnpm-lock.yaml
|
||||
|
||||
@@ -5,13 +5,6 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup sqlc
|
||||
# uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
|
||||
# with:
|
||||
# sqlc-version: "1.30.0"
|
||||
|
||||
# Switched to coder/sqlc fork to fix ambiguous column bug, see:
|
||||
# - https://github.com/coder/sqlc/pull/1
|
||||
# - https://github.com/sqlc-dev/sqlc/pull/4159
|
||||
shell: bash
|
||||
run: |
|
||||
CGO_ENABLED=1 go install github.com/coder/sqlc/cmd/sqlc@aab4e865a51df0c43e1839f81a9d349b41d14f05
|
||||
uses: sqlc-dev/setup-sqlc@c0209b9199cd1cce6a14fc27cabcec491b651761 # v4.0.0
|
||||
with:
|
||||
sqlc-version: "1.27.0"
|
||||
|
||||
@@ -7,5 +7,5 @@ runs:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: 1.13.4
|
||||
terraform_version: 1.11.4
|
||||
terraform_wrapper: false
|
||||
|
||||
@@ -27,11 +27,9 @@ runs:
|
||||
export YEAR_MONTH=$(date +'%Y-%m')
|
||||
export PREV_YEAR_MONTH=$(date -d 'last month' +'%Y-%m')
|
||||
export DAY=$(date +'%d')
|
||||
echo "year-month=$YEAR_MONTH" >> "$GITHUB_OUTPUT"
|
||||
echo "prev-year-month=$PREV_YEAR_MONTH" >> "$GITHUB_OUTPUT"
|
||||
echo "cache-key=${INPUTS_KEY_PREFIX}-${YEAR_MONTH}-${DAY}" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
INPUTS_KEY_PREFIX: ${{ inputs.key-prefix }}
|
||||
echo "year-month=$YEAR_MONTH" >> $GITHUB_OUTPUT
|
||||
echo "prev-year-month=$PREV_YEAR_MONTH" >> $GITHUB_OUTPUT
|
||||
echo "cache-key=${{ inputs.key-prefix }}-${YEAR_MONTH}-${DAY}" >> $GITHUB_OUTPUT
|
||||
|
||||
# TODO: As a cost optimization, we could remove caches that are older than
|
||||
# a day or two. By default, depot keeps caches for 14 days, which isn't
|
||||
|
||||
@@ -10,58 +10,19 @@ runs:
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
set -e
|
||||
|
||||
echo "owner: $REPO_OWNER"
|
||||
if [[ "$REPO_OWNER" != "coder" ]]; then
|
||||
owner=${{ github.repository_owner }}
|
||||
echo "owner: $owner"
|
||||
if [[ $owner != "coder" ]]; then
|
||||
echo "Not a pull request from the main repo, skipping..."
|
||||
exit 0
|
||||
fi
|
||||
if [[ -z "${DATADOG_API_KEY}" ]]; then
|
||||
if [[ -z "${{ inputs.api-key }}" ]]; then
|
||||
# This can happen for dependabot.
|
||||
echo "No API key provided, skipping..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
BINARY_VERSION="v2.48.0"
|
||||
BINARY_HASH_WINDOWS="b7bebb8212403fddb1563bae84ce5e69a70dac11e35eb07a00c9ef7ac9ed65ea"
|
||||
BINARY_HASH_MACOS="e87c808638fddb21a87a5c4584b68ba802965eb0a593d43959c81f67246bd9eb"
|
||||
BINARY_HASH_LINUX="5e700c465728fff8313e77c2d5ba1ce19a736168735137e1ddc7c6346ed48208"
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
BINARY_PATH="${TMP_DIR}/datadog-ci.exe"
|
||||
BINARY_URL="https://github.com/DataDog/datadog-ci/releases/download/${BINARY_VERSION}/datadog-ci_win-x64"
|
||||
elif [[ "${RUNNER_OS}" == "macOS" ]]; then
|
||||
BINARY_PATH="${TMP_DIR}/datadog-ci"
|
||||
BINARY_URL="https://github.com/DataDog/datadog-ci/releases/download/${BINARY_VERSION}/datadog-ci_darwin-arm64"
|
||||
elif [[ "${RUNNER_OS}" == "Linux" ]]; then
|
||||
BINARY_PATH="${TMP_DIR}/datadog-ci"
|
||||
BINARY_URL="https://github.com/DataDog/datadog-ci/releases/download/${BINARY_VERSION}/datadog-ci_linux-x64"
|
||||
else
|
||||
echo "Unsupported OS: $RUNNER_OS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Downloading DataDog CI binary version ${BINARY_VERSION} for $RUNNER_OS..."
|
||||
curl -sSL "$BINARY_URL" -o "$BINARY_PATH"
|
||||
|
||||
if [[ "${RUNNER_OS}" == "Windows" ]]; then
|
||||
echo "$BINARY_HASH_WINDOWS $BINARY_PATH" | sha256sum --check
|
||||
elif [[ "${RUNNER_OS}" == "macOS" ]]; then
|
||||
echo "$BINARY_HASH_MACOS $BINARY_PATH" | shasum -a 256 --check
|
||||
elif [[ "${RUNNER_OS}" == "Linux" ]]; then
|
||||
echo "$BINARY_HASH_LINUX $BINARY_PATH" | sha256sum --check
|
||||
fi
|
||||
|
||||
# Make binary executable (not needed for Windows)
|
||||
if [[ "${RUNNER_OS}" != "Windows" ]]; then
|
||||
chmod +x "$BINARY_PATH"
|
||||
fi
|
||||
|
||||
"$BINARY_PATH" junit upload --service coder ./gotests.xml \
|
||||
--tags "os:${RUNNER_OS}" --tags "runner_name:${RUNNER_NAME}"
|
||||
npm install -g @datadog/datadog-ci@2.21.0
|
||||
datadog-ci junit upload --service coder ./gotests.xml \
|
||||
--tags os:${{runner.os}} --tags runner_name:${{runner.name}}
|
||||
env:
|
||||
REPO_OWNER: ${{ github.repository_owner }}
|
||||
DATADOG_API_KEY: ${{ inputs.api-key }}
|
||||
|
||||
@@ -33,7 +33,6 @@ updates:
|
||||
- dependency-name: "*"
|
||||
update-types:
|
||||
- version-update:semver-patch
|
||||
- dependency-name: "github.com/mark3labs/mcp-go"
|
||||
|
||||
# Update our Dockerfile.
|
||||
- package-ecosystem: "docker"
|
||||
@@ -80,9 +79,6 @@ updates:
|
||||
mui:
|
||||
patterns:
|
||||
- "@mui*"
|
||||
radix:
|
||||
patterns:
|
||||
- "@radix-ui/*"
|
||||
react:
|
||||
patterns:
|
||||
- "react"
|
||||
@@ -107,23 +103,4 @@ updates:
|
||||
- dependency-name: "*"
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
- dependency-name: "@playwright/test"
|
||||
open-pull-requests-limit: 15
|
||||
|
||||
- package-ecosystem: "terraform"
|
||||
directories:
|
||||
- "dogfood/*/"
|
||||
- "examples/templates/*/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
groups:
|
||||
coder:
|
||||
patterns:
|
||||
- "registry.coder.com/coder/*/coder"
|
||||
labels: []
|
||||
ignore:
|
||||
- dependency-name: "*"
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
app = "sao-paulo-coder"
|
||||
primary_region = "gru"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://sao-paulo.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.sao-paulo.fly.dev.coder.com"
|
||||
CODER_VERBOSE = "true"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
# Ref: https://fly.io/docs/reference/configuration/#http_service-concurrency
|
||||
[http_service.concurrency]
|
||||
type = "requests"
|
||||
soft_limit = 50
|
||||
hard_limit = 100
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
@@ -1,5 +0,0 @@
|
||||
<!--
|
||||
|
||||
If you have used AI to produce some or all of this PR, please ensure you have read our [AI Contribution guidelines](https://coder.com/docs/about/contributing/AI_CONTRIBUTING) before submitting.
|
||||
|
||||
-->
|
||||
+410
-385
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,6 @@ name: contrib
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created, edited]
|
||||
# zizmor: ignore[dangerous-triggers] We explicitly want to run on pull_request_target.
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
@@ -43,7 +42,7 @@ jobs:
|
||||
# branch should not be protected
|
||||
branch: "main"
|
||||
# Some users have signed a corporate CLA with Coder so are exempt from signing our community one.
|
||||
allowlist: "coryb,aaronlehmann,dependabot*,blink-so*"
|
||||
allowlist: "coryb,aaronlehmann,dependabot*"
|
||||
|
||||
release-labels:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -53,7 +52,7 @@ jobs:
|
||||
if: ${{ github.event_name == 'pull_request_target' && !github.event.pull_request.draft }}
|
||||
steps:
|
||||
- name: release-labels
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
with:
|
||||
# This script ensures PR title and labels are in sync:
|
||||
#
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'opened' &&
|
||||
github.event.pull_request.user.login == 'dependabot[bot]' &&
|
||||
github.event.pull_request.user.id == 49699333 &&
|
||||
github.actor_id == 49699333 &&
|
||||
github.repository == 'coder/coder'
|
||||
permissions:
|
||||
pull-requests: write
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
steps:
|
||||
- name: Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@08eff52bf64351f401fb50d4972fa95b9f2c2d1b # v2.4.0
|
||||
uses: dependabot/fetch-metadata@d7267f607e9d3fb96fc2fbe83e0af444713e90b7 # v2.3.0
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
@@ -44,6 +44,10 @@ jobs:
|
||||
GH_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Send Slack notification
|
||||
env:
|
||||
PR_URL: ${{github.event.pull_request.html_url}}
|
||||
PR_TITLE: ${{github.event.pull_request.title}}
|
||||
PR_NUMBER: ${{github.event.pull_request.number}}
|
||||
run: |
|
||||
curl -X POST -H 'Content-type: application/json' \
|
||||
--data '{
|
||||
@@ -54,7 +58,7 @@ jobs:
|
||||
"type": "header",
|
||||
"text": {
|
||||
"type": "plain_text",
|
||||
"text": ":pr-merged: Auto merge enabled for Dependabot PR #'"${PR_NUMBER}"'",
|
||||
"text": ":pr-merged: Auto merge enabled for Dependabot PR #${{ env.PR_NUMBER }}",
|
||||
"emoji": true
|
||||
}
|
||||
},
|
||||
@@ -63,7 +67,7 @@ jobs:
|
||||
"fields": [
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": "'"${PR_TITLE}"'"
|
||||
"text": "${{ env.PR_TITLE }}"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -76,14 +80,9 @@ jobs:
|
||||
"type": "plain_text",
|
||||
"text": "View PR"
|
||||
},
|
||||
"url": "'"${PR_URL}"'"
|
||||
"url": "${{ env.PR_URL }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}' "${{ secrets.DEPENDABOT_PRS_SLACK_WEBHOOK }}"
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.DEPENDABOT_PRS_SLACK_WEBHOOK }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||
}' ${{ secrets.DEPENDABOT_PRS_SLACK_WEBHOOK }}
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
name: deploy
|
||||
|
||||
on:
|
||||
# Via workflow_call, called from ci.yaml
|
||||
workflow_call:
|
||||
inputs:
|
||||
image:
|
||||
description: "Image and tag to potentially deploy. Current branch will be validated against should-deploy check."
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
FLY_API_TOKEN:
|
||||
required: true
|
||||
FLY_PARIS_CODER_PROXY_SESSION_TOKEN:
|
||||
required: true
|
||||
FLY_SYDNEY_CODER_PROXY_SESSION_TOKEN:
|
||||
required: true
|
||||
FLY_SAO_PAULO_CODER_PROXY_SESSION_TOKEN:
|
||||
required: true
|
||||
FLY_JNB_CODER_PROXY_SESSION_TOKEN:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }} # no per-branch concurrency
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# Determines if the given branch should be deployed to dogfood.
|
||||
should-deploy:
|
||||
name: should-deploy
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
verdict: ${{ steps.check.outputs.verdict }} # DEPLOY or NOOP
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check if deploy is enabled
|
||||
id: check
|
||||
run: |
|
||||
set -euo pipefail
|
||||
verdict="$(./scripts/should_deploy.sh)"
|
||||
echo "verdict=$verdict" >> "$GITHUB_OUTPUT"
|
||||
|
||||
deploy:
|
||||
name: "deploy"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
needs: should-deploy
|
||||
if: needs.should-deploy.outputs.verdict == 'DEPLOY'
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
packages: write # to retag image as dogfood
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3.0.0
|
||||
with:
|
||||
workload_identity_provider: ${{ vars.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: Set up Google Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
|
||||
|
||||
- name: Set up Flux CLI
|
||||
uses: fluxcd/flux2/action@b6e76ca2534f76dcb8dd94fb057cdfa923c3b641 # v2.7.3
|
||||
with:
|
||||
# Keep this and the github action up to date with the version of flux installed in dogfood cluster
|
||||
version: "2.7.0"
|
||||
|
||||
- name: Get Cluster Credentials
|
||||
uses: google-github-actions/get-gke-credentials@3da1e46a907576cefaa90c484278bb5b259dd395 # v3.0.0
|
||||
with:
|
||||
cluster_name: dogfood-v2
|
||||
location: us-central1-a
|
||||
project_id: coder-dogfood-v2
|
||||
|
||||
# Retag image as dogfood while maintaining the multi-arch manifest
|
||||
- name: Tag image as dogfood
|
||||
run: docker buildx imagetools create --tag "ghcr.io/coder/coder-preview:dogfood" "$IMAGE"
|
||||
env:
|
||||
IMAGE: ${{ inputs.image }}
|
||||
|
||||
- name: Reconcile Flux
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
flux --namespace flux-system reconcile source git flux-system
|
||||
flux --namespace flux-system reconcile source git coder-main
|
||||
flux --namespace flux-system reconcile kustomization flux-system
|
||||
flux --namespace flux-system reconcile kustomization coder
|
||||
flux --namespace flux-system reconcile source chart coder-coder
|
||||
flux --namespace flux-system reconcile source chart coder-coder-provisioner
|
||||
flux --namespace coder reconcile helmrelease coder
|
||||
flux --namespace coder reconcile helmrelease coder-provisioner
|
||||
flux --namespace coder reconcile helmrelease coder-provisioner-tagged
|
||||
flux --namespace coder reconcile helmrelease coder-provisioner-tagged-prebuilds
|
||||
|
||||
# Just updating Flux is usually not enough. The Helm release may get
|
||||
# redeployed, but unless something causes the Deployment to update the
|
||||
# pods won't be recreated. It's important that the pods get recreated,
|
||||
# since we use `imagePullPolicy: Always` to ensure we're running the
|
||||
# latest image.
|
||||
- name: Rollout Deployment
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
kubectl --namespace coder rollout restart deployment/coder
|
||||
kubectl --namespace coder rollout status deployment/coder
|
||||
kubectl --namespace coder rollout restart deployment/coder-provisioner
|
||||
kubectl --namespace coder rollout status deployment/coder-provisioner
|
||||
kubectl --namespace coder rollout restart deployment/coder-provisioner-tagged
|
||||
kubectl --namespace coder rollout status deployment/coder-provisioner-tagged
|
||||
kubectl --namespace coder rollout restart deployment/coder-provisioner-tagged-prebuilds
|
||||
kubectl --namespace coder rollout status deployment/coder-provisioner-tagged-prebuilds
|
||||
|
||||
deploy-wsproxies:
|
||||
runs-on: ubuntu-latest
|
||||
needs: deploy
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup flyctl
|
||||
uses: superfly/flyctl-actions/setup-flyctl@fc53c09e1bc3be6f54706524e3b82c4f462f77be # v1.5
|
||||
|
||||
- name: Deploy workspace proxies
|
||||
run: |
|
||||
flyctl deploy --image "$IMAGE" --app paris-coder --config ./.github/fly-wsproxies/paris-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_PARIS" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sydney-coder --config ./.github/fly-wsproxies/sydney-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SYDNEY" --yes
|
||||
flyctl deploy --image "$IMAGE" --app jnb-coder --config ./.github/fly-wsproxies/jnb-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_JNB" --yes
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
IMAGE: ${{ inputs.image }}
|
||||
TOKEN_PARIS: ${{ secrets.FLY_PARIS_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SYDNEY: ${{ secrets.FLY_SYDNEY_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_JNB: ${{ secrets.FLY_JNB_CODER_PROXY_SESSION_TOKEN }}
|
||||
@@ -1,205 +0,0 @@
|
||||
# This workflow checks if a PR requires documentation updates.
|
||||
# It creates a Coder Task that uses AI to analyze the PR changes,
|
||||
# search existing docs, and comment with recommendations.
|
||||
#
|
||||
# Triggered by: Adding the "doc-check" label to a PR, or manual dispatch.
|
||||
|
||||
name: AI Documentation Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_url:
|
||||
description: "Pull Request URL to check"
|
||||
required: true
|
||||
type: string
|
||||
template_preset:
|
||||
description: "Template preset to use"
|
||||
required: false
|
||||
default: ""
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
doc-check:
|
||||
name: Analyze PR for Documentation Updates Needed
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
(github.event.label.name == 'doc-check' || github.event_name == 'workflow_dispatch') &&
|
||||
(github.event.pull_request.draft == false || github.event_name == 'workflow_dispatch')
|
||||
timeout-minutes: 30
|
||||
env:
|
||||
CODER_URL: ${{ secrets.DOC_CHECK_CODER_URL }}
|
||||
CODER_SESSION_TOKEN: ${{ secrets.DOC_CHECK_CODER_SESSION_TOKEN }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
- name: Determine PR Context
|
||||
id: determine-context
|
||||
env:
|
||||
GITHUB_ACTOR: ${{ github.actor }}
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_EVENT_PR_HTML_URL: ${{ github.event.pull_request.html_url }}
|
||||
GITHUB_EVENT_PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
GITHUB_EVENT_SENDER_ID: ${{ github.event.sender.id }}
|
||||
GITHUB_EVENT_SENDER_LOGIN: ${{ github.event.sender.login }}
|
||||
INPUTS_PR_URL: ${{ inputs.pr_url }}
|
||||
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || '' }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo "Using template preset: ${INPUTS_TEMPLATE_PRESET}"
|
||||
echo "template_preset=${INPUTS_TEMPLATE_PRESET}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# For workflow_dispatch, use the provided PR URL
|
||||
if [[ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ]]; then
|
||||
if ! GITHUB_USER_ID=$(gh api "users/${GITHUB_ACTOR}" --jq '.id'); then
|
||||
echo "::error::Failed to get GitHub user ID for actor ${GITHUB_ACTOR}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Using workflow_dispatch actor: ${GITHUB_ACTOR} (ID: ${GITHUB_USER_ID})"
|
||||
echo "github_user_id=${GITHUB_USER_ID}" >> "${GITHUB_OUTPUT}"
|
||||
echo "github_username=${GITHUB_ACTOR}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
echo "Using PR URL: ${INPUTS_PR_URL}"
|
||||
# Convert /pull/ to /issues/ for create-task-action compatibility
|
||||
ISSUE_URL="${INPUTS_PR_URL/\/pull\//\/issues\/}"
|
||||
echo "pr_url=${ISSUE_URL}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Extract PR number from URL for later use
|
||||
PR_NUMBER=$(echo "${INPUTS_PR_URL}" | grep -oP '(?<=pull/)\d+')
|
||||
echo "pr_number=${PR_NUMBER}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
elif [[ "${GITHUB_EVENT_NAME}" == "pull_request" ]]; then
|
||||
GITHUB_USER_ID=${GITHUB_EVENT_SENDER_ID}
|
||||
echo "Using label adder: ${GITHUB_EVENT_SENDER_LOGIN} (ID: ${GITHUB_USER_ID})"
|
||||
echo "github_user_id=${GITHUB_USER_ID}" >> "${GITHUB_OUTPUT}"
|
||||
echo "github_username=${GITHUB_EVENT_SENDER_LOGIN}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
echo "Using PR URL: ${GITHUB_EVENT_PR_HTML_URL}"
|
||||
# Convert /pull/ to /issues/ for create-task-action compatibility
|
||||
ISSUE_URL="${GITHUB_EVENT_PR_HTML_URL/\/pull\//\/issues\/}"
|
||||
echo "pr_url=${ISSUE_URL}" >> "${GITHUB_OUTPUT}"
|
||||
echo "pr_number=${GITHUB_EVENT_PR_NUMBER}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
else
|
||||
echo "::error::Unsupported event type: ${GITHUB_EVENT_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Extract changed files and build prompt
|
||||
id: extract-context
|
||||
env:
|
||||
PR_URL: ${{ steps.determine-context.outputs.pr_url }}
|
||||
PR_NUMBER: ${{ steps.determine-context.outputs.pr_number }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo "Analyzing PR #${PR_NUMBER}"
|
||||
|
||||
# Build task prompt - using unquoted heredoc so variables expand
|
||||
TASK_PROMPT=$(cat <<EOF
|
||||
Review PR #${PR_NUMBER} and determine if documentation needs updating or creating.
|
||||
|
||||
PR URL: ${PR_URL}
|
||||
|
||||
WORKFLOW:
|
||||
1. Setup (repo is pre-cloned at ~/coder)
|
||||
cd ~/coder
|
||||
git fetch origin pull/${PR_NUMBER}/head:pr-${PR_NUMBER}
|
||||
git checkout pr-${PR_NUMBER}
|
||||
|
||||
2. Get PR info
|
||||
Use GitHub MCP tools to get PR title, body, and diff
|
||||
Or use: git diff main...pr-${PR_NUMBER}
|
||||
|
||||
3. Understand Changes
|
||||
Read the diff and identify what changed
|
||||
Ask: Is this user-facing? Does it change behavior? Is it a new feature?
|
||||
|
||||
4. Search for Related Docs
|
||||
cat ~/coder/docs/manifest.json | jq '.routes[] | {title, path}' | head -50
|
||||
grep -ri "relevant_term" ~/coder/docs/ --include="*.md"
|
||||
|
||||
5. Decide
|
||||
NEEDS DOCS if: New feature, API change, CLI change, behavior change, user-visible
|
||||
NO DOCS if: Internal refactor, test-only, already documented, non-user-facing, dependency updates
|
||||
FIRST check: Did this PR already update docs? If yes and complete, say "No Changes Needed"
|
||||
|
||||
6. Comment on the PR using this format
|
||||
|
||||
COMMENT FORMAT:
|
||||
## 📚 Documentation Check
|
||||
|
||||
### ✅ Updates Needed
|
||||
- **[docs/path/file.md](github_link)** - Brief what needs changing
|
||||
|
||||
### 📝 New Docs Needed
|
||||
- **docs/suggested/location.md** - What should be documented
|
||||
|
||||
### ✨ No Changes Needed
|
||||
[Reason: Documents already updated in PR | Internal changes only | Test-only | No user-facing impact]
|
||||
|
||||
---
|
||||
*This comment was generated by an AI Agent through [Coder Tasks](https://coder.com/docs/ai-coder/tasks)*
|
||||
|
||||
DOCS STRUCTURE:
|
||||
Read ~/coder/docs/manifest.json for the complete documentation structure.
|
||||
Common areas include: reference/, admin/, user-guides/, ai-coder/, install/, tutorials/
|
||||
But check manifest.json - it has everything.
|
||||
|
||||
EOF
|
||||
)
|
||||
|
||||
# Output the prompt
|
||||
{
|
||||
echo "task_prompt<<EOFOUTPUT"
|
||||
echo "${TASK_PROMPT}"
|
||||
echo "EOFOUTPUT"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Checkout create-task-action
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 1
|
||||
path: ./.github/actions/create-task-action
|
||||
persist-credentials: false
|
||||
ref: main
|
||||
repository: coder/create-task-action
|
||||
|
||||
- name: Create Coder Task for Documentation Check
|
||||
id: create_task
|
||||
uses: ./.github/actions/create-task-action
|
||||
with:
|
||||
coder-url: ${{ secrets.DOC_CHECK_CODER_URL }}
|
||||
coder-token: ${{ secrets.DOC_CHECK_CODER_SESSION_TOKEN }}
|
||||
coder-organization: "default"
|
||||
coder-template-name: coder
|
||||
coder-template-preset: ${{ steps.determine-context.outputs.template_preset }}
|
||||
coder-task-name-prefix: doc-check
|
||||
coder-task-prompt: ${{ steps.extract-context.outputs.task_prompt }}
|
||||
github-user-id: ${{ steps.determine-context.outputs.github_user_id }}
|
||||
github-token: ${{ github.token }}
|
||||
github-issue-url: ${{ steps.determine-context.outputs.pr_url }}
|
||||
comment-on-issue: true
|
||||
|
||||
- name: Write outputs
|
||||
env:
|
||||
TASK_CREATED: ${{ steps.create_task.outputs.task-created }}
|
||||
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
|
||||
TASK_URL: ${{ steps.create_task.outputs.task-url }}
|
||||
PR_URL: ${{ steps.determine-context.outputs.pr_url }}
|
||||
run: |
|
||||
{
|
||||
echo "## Documentation Check Task"
|
||||
echo ""
|
||||
echo "**PR:** ${PR_URL}"
|
||||
echo "**Task created:** ${TASK_CREATED}"
|
||||
echo "**Task name:** ${TASK_NAME}"
|
||||
echo "**Task URL:** ${TASK_URL}"
|
||||
echo ""
|
||||
echo "The Coder task is analyzing the PR changes and will comment with documentation recommendations."
|
||||
} >> "${GITHUB_STEP_SUMMARY}"
|
||||
@@ -38,17 +38,15 @@ jobs:
|
||||
if: github.repository_owner == 'coder'
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Docker login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -62,7 +60,7 @@ jobs:
|
||||
|
||||
# This uses OIDC authentication, so no auth variables are required.
|
||||
- name: Build base Docker image via depot.dev
|
||||
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
|
||||
uses: depot/build-push-action@636daae76684e38c301daa0c5eca1c095b24e780 # v1.14.0
|
||||
with:
|
||||
project: wl5hnrrkns
|
||||
context: base-build-context
|
||||
|
||||
@@ -23,14 +23,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- uses: tj-actions/changed-files@70069877f29101175ed2b055d210fe8b1d54d7d7 # v45.0.7
|
||||
- uses: tj-actions/changed-files@5426ecc3f5c2b10effaefbd374f0abdc6a571b2f # v45.0.7
|
||||
id: changed-files
|
||||
with:
|
||||
files: |
|
||||
@@ -41,16 +39,10 @@ jobs:
|
||||
- name: lint
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
# shellcheck disable=SC2086
|
||||
pnpm exec markdownlint-cli2 $ALL_CHANGED_FILES
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
|
||||
pnpm exec markdownlint-cli2 ${{ steps.changed-files.outputs.all_changed_files }}
|
||||
|
||||
- name: fmt
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
# markdown-table-formatter requires a space separated list of files
|
||||
# shellcheck disable=SC2086
|
||||
echo $ALL_CHANGED_FILES | tr ',' '\n' | pnpm exec markdown-table-formatter --check
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
|
||||
echo ${{ steps.changed-files.outputs.all_changed_files }} | tr ',' '\n' | pnpm exec markdown-table-formatter --check
|
||||
|
||||
@@ -18,7 +18,8 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
# Necessary for GCP authentication (https://github.com/google-github-actions/setup-gcloud#usage)
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
build_image:
|
||||
@@ -26,21 +27,15 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-4' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Setup Nix
|
||||
uses: nixbuild/nix-quick-install-action@2c9db80fb984ceb1bcaa77cdda3fdf8cfba92035 # v34
|
||||
with:
|
||||
# Pinning to 2.28 here, as Nix gets a "error: [json.exception.type_error.302] type must be array, but is string"
|
||||
# on version 2.29 and above.
|
||||
nix_version: "2.28.5"
|
||||
uses: nixbuild/nix-quick-install-action@5bb6a3b3abe66fd09bbf250dce8ada94f856a703 # v30
|
||||
|
||||
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
|
||||
with:
|
||||
@@ -63,32 +58,31 @@ jobs:
|
||||
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@5250492686b253f06fa55861556d1027b067aeb5 # v9.0.2
|
||||
uses: tj-actions/branch-names@dde14ac574a8b9b1cedc59a1cf312788af43d8d8 # v8.2.1
|
||||
|
||||
- name: "Branch name to Docker tag name"
|
||||
id: docker-tag-name
|
||||
run: |
|
||||
tag=${{ steps.branch-name.outputs.current_branch }}
|
||||
# Replace / with --, e.g. user/feature => user--feature.
|
||||
tag=${BRANCH_NAME//\//--}
|
||||
echo "tag=${tag}" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
BRANCH_NAME: ${{ steps.branch-name.outputs.current_branch }}
|
||||
tag=${tag//\//--}
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Depot CLI
|
||||
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and push Non-Nix image
|
||||
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
|
||||
uses: depot/build-push-action@636daae76684e38c301daa0c5eca1c095b24e780 # v1.14.0
|
||||
with:
|
||||
project: b4q6ltmpzh
|
||||
token: ${{ secrets.DEPOT_TOKEN }}
|
||||
@@ -109,39 +103,32 @@ jobs:
|
||||
|
||||
CURRENT_SYSTEM=$(nix eval --impure --raw --expr 'builtins.currentSystem')
|
||||
|
||||
docker image tag "codercom/oss-dogfood-nix:latest-$CURRENT_SYSTEM" "codercom/oss-dogfood-nix:${DOCKER_TAG}"
|
||||
docker image push "codercom/oss-dogfood-nix:${DOCKER_TAG}"
|
||||
docker image tag codercom/oss-dogfood-nix:latest-$CURRENT_SYSTEM codercom/oss-dogfood-nix:${{ steps.docker-tag-name.outputs.tag }}
|
||||
docker image push codercom/oss-dogfood-nix:${{ steps.docker-tag-name.outputs.tag }}
|
||||
|
||||
docker image tag "codercom/oss-dogfood-nix:latest-$CURRENT_SYSTEM" "codercom/oss-dogfood-nix:latest"
|
||||
docker image push "codercom/oss-dogfood-nix:latest"
|
||||
env:
|
||||
DOCKER_TAG: ${{ steps.docker-tag-name.outputs.tag }}
|
||||
docker image tag codercom/oss-dogfood-nix:latest-$CURRENT_SYSTEM codercom/oss-dogfood-nix:latest
|
||||
docker image push codercom/oss-dogfood-nix:latest
|
||||
|
||||
deploy_template:
|
||||
needs: build_image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Necessary for GCP authentication (https://github.com/google-github-actions/setup-gcloud#usage)
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3.0.0
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: ${{ vars.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_SERVICE_ACCOUNT }}
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
|
||||
- name: Terraform init and validate
|
||||
run: |
|
||||
@@ -161,12 +148,12 @@ jobs:
|
||||
- name: Get short commit SHA
|
||||
if: github.ref == 'refs/heads/main'
|
||||
id: vars
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> "$GITHUB_OUTPUT"
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get latest commit title
|
||||
if: github.ref == 'refs/heads/main'
|
||||
id: message
|
||||
run: echo "pr_title=$(git log --format=%s -n 1 ${{ github.sha }})" >> "$GITHUB_OUTPUT"
|
||||
run: echo "pr_title=$(git log --format=%s -n 1 ${{ github.sha }})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Push template"
|
||||
if: github.ref == 'refs/heads/main'
|
||||
@@ -178,7 +165,6 @@ jobs:
|
||||
CODER_URL: https://dev.coder.com
|
||||
CODER_SESSION_TOKEN: ${{ secrets.CODER_SESSION_TOKEN }}
|
||||
# Template source & details
|
||||
TF_VAR_CODER_DOGFOOD_ANTHROPIC_API_KEY: ${{ secrets.CODER_DOGFOOD_ANTHROPIC_API_KEY }}
|
||||
TF_VAR_CODER_TEMPLATE_NAME: ${{ secrets.CODER_TEMPLATE_NAME }}
|
||||
TF_VAR_CODER_TEMPLATE_VERSION: ${{ steps.vars.outputs.sha_short }}
|
||||
TF_VAR_CODER_TEMPLATE_DIR: ./coder
|
||||
|
||||
@@ -12,73 +12,40 @@ permissions:
|
||||
|
||||
jobs:
|
||||
test-go-pg:
|
||||
# make sure to adjust NUM_PARALLEL_PACKAGES and NUM_PARALLEL_TESTS below
|
||||
# when changing runner sizes
|
||||
runs-on: ${{ matrix.os == 'macos-latest' && github.repository_owner == 'coder' && 'depot-macos-latest' || matrix.os == 'windows-2022' && github.repository_owner == 'coder' && 'depot-windows-2022-16' || matrix.os }}
|
||||
runs-on: ${{ matrix.os == 'macos-latest' && github.repository_owner == 'coder' && 'depot-macos-latest' || matrix.os == 'windows-2022' && github.repository_owner == 'coder' && 'windows-latest-16-cores' || matrix.os }}
|
||||
if: github.ref == 'refs/heads/main'
|
||||
# This timeout must be greater than the timeout set by `go test` in
|
||||
# `make test-postgres` to ensure we receive a trace of running
|
||||
# goroutines. Setting this to the timeout +5m should work quite well
|
||||
# even if some of the preceding steps are slow.
|
||||
timeout-minutes: 25
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- macos-latest
|
||||
- windows-2022
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
# macOS indexes all new files in the background. Our Postgres tests
|
||||
# create and destroy thousands of databases on disk, and Spotlight
|
||||
# tries to index all of them, seriously slowing down the tests.
|
||||
- name: Disable Spotlight Indexing
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
enabled=$(sudo mdutil -a -s | { grep -Fc "Indexing enabled" || true; })
|
||||
if [ "$enabled" -eq 0 ]; then
|
||||
echo "Spotlight indexing is already disabled"
|
||||
exit 0
|
||||
fi
|
||||
sudo mdutil -a -i off
|
||||
sudo mdutil -X /
|
||||
sudo launchctl bootout system /System/Library/LaunchDaemons/com.apple.metadata.mds.plist
|
||||
|
||||
# Set up RAM disks to speed up the rest of the job. This action is in
|
||||
# a separate repository to allow its use before actions/checkout.
|
||||
- name: Setup RAM Disks
|
||||
if: runner.os == 'Windows'
|
||||
uses: coder/setup-ramdisk-action@e1100847ab2d7bcd9d14bcda8f2d1b0f07b36f1b # v0.1.0
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
with:
|
||||
# Runners have Go baked-in and Go will automatically
|
||||
# download the toolchain configured in go.mod, so we don't
|
||||
# need to reinstall it. It's faster on Windows runners.
|
||||
use-preinstalled-go: ${{ runner.os == 'Windows' }}
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Setup Embedded Postgres Cache Paths
|
||||
id: embedded-pg-cache
|
||||
uses: ./.github/actions/setup-embedded-pg-cache-paths
|
||||
|
||||
- name: Download Embedded Postgres Cache
|
||||
id: download-embedded-pg-cache
|
||||
uses: ./.github/actions/embedded-pg-cache/download
|
||||
with:
|
||||
key-prefix: embedded-pg-${{ runner.os }}-${{ runner.arch }}
|
||||
cache-path: ${{ steps.embedded-pg-cache.outputs.cached-dirs }}
|
||||
# Sets up the ImDisk toolkit for Windows and creates a RAM disk on drive R:.
|
||||
- name: Setup ImDisk
|
||||
if: runner.os == 'Windows'
|
||||
uses: ./.github/actions/setup-imdisk
|
||||
|
||||
- name: Test with PostgreSQL Database
|
||||
env:
|
||||
@@ -88,23 +55,6 @@ jobs:
|
||||
LC_ALL: "en_US.UTF-8"
|
||||
shell: bash
|
||||
run: |
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
if [ "${{ runner.os }}" == "Windows" ]; then
|
||||
# Create a temp dir on the R: ramdisk drive for Windows. The default
|
||||
# C: drive is extremely slow: https://github.com/actions/runner-images/issues/8755
|
||||
mkdir -p "R:/temp/embedded-pg"
|
||||
go run scripts/embedded-pg/main.go -path "R:/temp/embedded-pg" -cache "${EMBEDDED_PG_CACHE_DIR}"
|
||||
elif [ "${{ runner.os }}" == "macOS" ]; then
|
||||
# Postgres runs faster on a ramdisk on macOS too
|
||||
mkdir -p /tmp/tmpfs
|
||||
sudo mount_tmpfs -o noowners -s 8g /tmp/tmpfs
|
||||
go run scripts/embedded-pg/main.go -path /tmp/tmpfs/embedded-pg -cache "${EMBEDDED_PG_CACHE_DIR}"
|
||||
elif [ "${{ runner.os }}" == "Linux" ]; then
|
||||
make test-postgres-docker
|
||||
fi
|
||||
|
||||
# if macOS, install google-chrome for scaletests
|
||||
# As another concern, should we really have this kind of external dependency
|
||||
# requirement on standard CI?
|
||||
@@ -112,6 +62,10 @@ jobs:
|
||||
brew install google-chrome
|
||||
fi
|
||||
|
||||
# By default Go will use the number of logical CPUs, which
|
||||
# is a fine default.
|
||||
PARALLEL_FLAG=""
|
||||
|
||||
# macOS will output "The default interactive shell is now zsh"
|
||||
# intermittently in CI...
|
||||
if [ "${{ matrix.os }}" == "macos-latest" ]; then
|
||||
@@ -119,39 +73,18 @@ jobs:
|
||||
fi
|
||||
|
||||
if [ "${{ runner.os }}" == "Windows" ]; then
|
||||
# Our Windows runners have 16 cores.
|
||||
# On Windows Postgres chokes up when we have 16x16=256 tests
|
||||
# running in parallel, and dbtestutil.NewDB starts to take more than
|
||||
# 10s to complete sometimes causing test timeouts. With 16x8=128 tests
|
||||
# Postgres tends not to choke.
|
||||
NUM_PARALLEL_PACKAGES=8
|
||||
NUM_PARALLEL_TESTS=16
|
||||
elif [ "${{ runner.os }}" == "macOS" ]; then
|
||||
# Our macOS runners have 8 cores. We set NUM_PARALLEL_TESTS to 16
|
||||
# because the tests complete faster and Postgres doesn't choke. It seems
|
||||
# that macOS's tmpfs is faster than the one on Windows.
|
||||
NUM_PARALLEL_PACKAGES=8
|
||||
NUM_PARALLEL_TESTS=16
|
||||
elif [ "${{ runner.os }}" == "Linux" ]; then
|
||||
# Our Linux runners have 8 cores.
|
||||
NUM_PARALLEL_PACKAGES=8
|
||||
NUM_PARALLEL_TESTS=8
|
||||
# Create a temp dir on the R: ramdisk drive for Windows. The default
|
||||
# C: drive is extremely slow: https://github.com/actions/runner-images/issues/8755
|
||||
mkdir -p "R:/temp/embedded-pg"
|
||||
go run scripts/embedded-pg/main.go -path "R:/temp/embedded-pg"
|
||||
else
|
||||
go run scripts/embedded-pg/main.go
|
||||
fi
|
||||
|
||||
# run tests without cache
|
||||
TESTCOUNT="-count=1"
|
||||
|
||||
DB=ci gotestsum \
|
||||
--format standard-quiet --packages "./..." \
|
||||
-- -timeout=20m -v -p "$NUM_PARALLEL_PACKAGES" -parallel="$NUM_PARALLEL_TESTS" "$TESTCOUNT"
|
||||
|
||||
- name: Upload Embedded Postgres Cache
|
||||
uses: ./.github/actions/embedded-pg-cache/upload
|
||||
# We only use the embedded Postgres cache on macOS and Windows runners.
|
||||
if: runner.OS == 'macOS' || runner.OS == 'Windows'
|
||||
with:
|
||||
cache-key: ${{ steps.download-embedded-pg-cache.outputs.cache-key }}
|
||||
cache-path: "${{ steps.embedded-pg-cache.outputs.embedded-pg-cache }}"
|
||||
# Reduce test parallelism, mirroring what we do for race tests.
|
||||
# We'd been encountering issues with timing related flakes, and
|
||||
# this seems to help.
|
||||
DB=ci gotestsum --format standard-quiet -- -v -short -count=1 -parallel 4 -p 4 ./...
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
timeout-minutes: 1
|
||||
@@ -170,7 +103,6 @@ jobs:
|
||||
steps:
|
||||
- name: Send Slack notification
|
||||
run: |
|
||||
ESCAPED_PROMPT=$(printf "%s" "<@U09LQ75AHKR> $BLINK_CI_FAILURE_PROMPT" | jq -Rsa .)
|
||||
curl -X POST -H 'Content-type: application/json' \
|
||||
--data '{
|
||||
"blocks": [
|
||||
@@ -184,21 +116,27 @@ jobs:
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "*View failure:* <'"${RUN_URL}"'|Click here>"
|
||||
}
|
||||
"fields": [
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": "*Workflow:*\n${{ github.workflow }}"
|
||||
},
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": "*Committer:*\n${{ github.actor }}"
|
||||
},
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": "*Commit:*\n${{ github.sha }}"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": '"$ESCAPED_PROMPT"'
|
||||
"text": "*View failure:* <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Click here>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}' "${SLACK_WEBHOOK}"
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.CI_FAILURE_SLACK_WEBHOOK }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
BLINK_CI_FAILURE_PROMPT: ${{ vars.BLINK_CI_FAILURE_PROMPT }}
|
||||
}' ${{ secrets.CI_FAILURE_SLACK_WEBHOOK }}
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
name: PR Auto Assign
|
||||
|
||||
on:
|
||||
# zizmor: ignore[dangerous-triggers] We explicitly want to run on pull_request_target.
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
|
||||
@@ -15,7 +14,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -27,12 +27,10 @@ jobs:
|
||||
id: pr_number
|
||||
run: |
|
||||
if [ -n "${{ github.event.pull_request.number }}" ]; then
|
||||
echo "PR_NUMBER=${{ github.event.pull_request.number }}" >> "$GITHUB_OUTPUT"
|
||||
echo "PR_NUMBER=${{ github.event.pull_request.number }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "PR_NUMBER=${PR_NUMBER}" >> "$GITHUB_OUTPUT"
|
||||
echo "PR_NUMBER=${{ github.event.inputs.pr_number }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
env:
|
||||
PR_NUMBER: ${{ github.event.inputs.pr_number }}
|
||||
|
||||
- name: Delete image
|
||||
continue-on-error: true
|
||||
@@ -53,21 +51,17 @@ jobs:
|
||||
- name: Delete helm release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
helm delete --namespace "pr${PR_NUMBER}" "pr${PR_NUMBER}" || echo "helm release not found"
|
||||
env:
|
||||
PR_NUMBER: ${{ steps.pr_number.outputs.PR_NUMBER }}
|
||||
helm delete --namespace "pr${{ steps.pr_number.outputs.PR_NUMBER }}" "pr${{ steps.pr_number.outputs.PR_NUMBER }}" || echo "helm release not found"
|
||||
|
||||
- name: "Remove PR namespace"
|
||||
run: |
|
||||
kubectl delete namespace "pr${PR_NUMBER}" || echo "namespace not found"
|
||||
env:
|
||||
PR_NUMBER: ${{ steps.pr_number.outputs.PR_NUMBER }}
|
||||
kubectl delete namespace "pr${{ steps.pr_number.outputs.PR_NUMBER }}" || echo "namespace not found"
|
||||
|
||||
- name: "Remove DNS records"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Get identifier for the record
|
||||
record_id=$(curl -X GET "https://api.cloudflare.com/client/v4/zones/${{ secrets.PR_DEPLOYMENTS_ZONE_ID }}/dns_records?name=%2A.pr${PR_NUMBER}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}" \
|
||||
record_id=$(curl -X GET "https://api.cloudflare.com/client/v4/zones/${{ secrets.PR_DEPLOYMENTS_ZONE_ID }}/dns_records?name=%2A.pr${{ steps.pr_number.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" | jq -r '.result[0].id') || echo "DNS record not found"
|
||||
|
||||
@@ -79,13 +73,9 @@ jobs:
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" | jq -r '.success'
|
||||
) || echo "DNS record not found"
|
||||
env:
|
||||
PR_NUMBER: ${{ steps.pr_number.outputs.PR_NUMBER }}
|
||||
|
||||
- name: "Delete certificate"
|
||||
if: ${{ github.event.pull_request.merged == true }}
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
kubectl delete certificate "pr${PR_NUMBER}-tls" -n pr-deployment-certs || echo "certificate not found"
|
||||
env:
|
||||
PR_NUMBER: ${{ steps.pr_number.outputs.PR_NUMBER }}
|
||||
kubectl delete certificate "pr${{ steps.pr_number.outputs.PR_NUMBER }}-tls" -n pr-deployment-certs || echo "certificate not found"
|
||||
|
||||
@@ -39,14 +39,12 @@ jobs:
|
||||
PR_OPEN: ${{ steps.check_pr.outputs.pr_open }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Check if PR is open
|
||||
id: check_pr
|
||||
@@ -57,7 +55,7 @@ jobs:
|
||||
echo "PR doesn't exist or is closed."
|
||||
pr_open=false
|
||||
fi
|
||||
echo "pr_open=$pr_open" >> "$GITHUB_OUTPUT"
|
||||
echo "pr_open=$pr_open" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -76,15 +74,14 @@ jobs:
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get PR number, title, and branch name
|
||||
id: pr_info
|
||||
@@ -93,11 +90,9 @@ jobs:
|
||||
PR_NUMBER=$(gh pr view --json number | jq -r '.number')
|
||||
PR_TITLE=$(gh pr view --json title | jq -r '.title')
|
||||
PR_URL=$(gh pr view --json url | jq -r '.url')
|
||||
{
|
||||
echo "PR_URL=$PR_URL"
|
||||
echo "PR_NUMBER=$PR_NUMBER"
|
||||
echo "PR_TITLE=$PR_TITLE"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
echo "PR_URL=$PR_URL" >> $GITHUB_OUTPUT
|
||||
echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_OUTPUT
|
||||
echo "PR_TITLE=$PR_TITLE" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -105,8 +100,8 @@ jobs:
|
||||
id: set_tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "CODER_BASE_IMAGE_TAG=$CODER_BASE_IMAGE_TAG" >> "$GITHUB_OUTPUT"
|
||||
echo "CODER_IMAGE_TAG=$CODER_IMAGE_TAG" >> "$GITHUB_OUTPUT"
|
||||
echo "CODER_BASE_IMAGE_TAG=$CODER_BASE_IMAGE_TAG" >> $GITHUB_OUTPUT
|
||||
echo "CODER_IMAGE_TAG=$CODER_IMAGE_TAG" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
CODER_BASE_IMAGE_TAG: ghcr.io/coder/coder-preview-base:pr${{ steps.pr_info.outputs.PR_NUMBER }}
|
||||
CODER_IMAGE_TAG: ghcr.io/coder/coder-preview:pr${{ steps.pr_info.outputs.PR_NUMBER }}
|
||||
@@ -123,16 +118,14 @@ jobs:
|
||||
id: check_deployment
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if helm status "pr${PR_NUMBER}" --namespace "pr${PR_NUMBER}" > /dev/null 2>&1; then
|
||||
if helm status "pr${{ steps.pr_info.outputs.PR_NUMBER }}" --namespace "pr${{ steps.pr_info.outputs.PR_NUMBER }}" > /dev/null 2>&1; then
|
||||
echo "Deployment already exists. Skipping deployment."
|
||||
NEW=false
|
||||
else
|
||||
echo "Deployment doesn't exist."
|
||||
NEW=true
|
||||
fi
|
||||
echo "NEW=$NEW" >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
PR_NUMBER: ${{ steps.pr_info.outputs.PR_NUMBER }}
|
||||
echo "NEW=$NEW" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check changed files
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
@@ -161,20 +154,17 @@ jobs:
|
||||
- name: Print number of changed files
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Total number of changed files: ${ALL_COUNT}"
|
||||
echo "Number of ignored files: ${IGNORED_COUNT}"
|
||||
env:
|
||||
ALL_COUNT: ${{ steps.filter.outputs.all_count }}
|
||||
IGNORED_COUNT: ${{ steps.filter.outputs.ignored_count }}
|
||||
echo "Total number of changed files: ${{ steps.filter.outputs.all_count }}"
|
||||
echo "Number of ignored files: ${{ steps.filter.outputs.ignored_count }}"
|
||||
|
||||
- name: Build conditionals
|
||||
id: build_conditionals
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# build if the workflow is manually triggered and the deployment doesn't exist (first build or force rebuild)
|
||||
echo "first_or_force_build=${{ (github.event_name == 'workflow_dispatch' && steps.check_deployment.outputs.NEW == 'true') || github.event.inputs.build == 'true' }}" >> "$GITHUB_OUTPUT"
|
||||
echo "first_or_force_build=${{ (github.event_name == 'workflow_dispatch' && steps.check_deployment.outputs.NEW == 'true') || github.event.inputs.build == 'true' }}" >> $GITHUB_OUTPUT
|
||||
# build if the deployment already exist and there are changes in the files that we care about (automatic updates)
|
||||
echo "automatic_rebuild=${{ steps.check_deployment.outputs.NEW == 'false' && steps.filter.outputs.all_count > steps.filter.outputs.ignored_count }}" >> "$GITHUB_OUTPUT"
|
||||
echo "automatic_rebuild=${{ steps.check_deployment.outputs.NEW == 'false' && steps.filter.outputs.all_count > steps.filter.outputs.ignored_count }}" >> $GITHUB_OUTPUT
|
||||
|
||||
comment-pr:
|
||||
needs: get_info
|
||||
@@ -184,12 +174,12 @@ jobs:
|
||||
pull-requests: write # needed for commenting on PRs
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@b30e6a3c0ed37e7c023ccd3f1db5c6c0b0c23aad # v4.0.0
|
||||
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
|
||||
id: fc
|
||||
with:
|
||||
issue-number: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
@@ -199,7 +189,7 @@ jobs:
|
||||
|
||||
- name: Comment on PR
|
||||
id: comment_id
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0
|
||||
with:
|
||||
comment-id: ${{ steps.fc.outputs.comment-id }}
|
||||
issue-number: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
@@ -228,15 +218,14 @@ jobs:
|
||||
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
@@ -248,7 +237,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -261,13 +250,12 @@ jobs:
|
||||
make gen/mark-fresh
|
||||
export DOCKER_IMAGE_NO_PREREQUISITES=true
|
||||
version="$(./scripts/version.sh)"
|
||||
CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
export CODER_IMAGE_BUILD_BASE_TAG
|
||||
export CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
make -j build/coder_linux_amd64
|
||||
./scripts/build_docker.sh \
|
||||
--arch amd64 \
|
||||
--target "${CODER_IMAGE_TAG}" \
|
||||
--version "$version" \
|
||||
--target ${{ env.CODER_IMAGE_TAG }} \
|
||||
--version $version \
|
||||
--push \
|
||||
build/coder_linux_amd64
|
||||
|
||||
@@ -288,7 +276,7 @@ jobs:
|
||||
PR_HOSTNAME: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -305,13 +293,13 @@ jobs:
|
||||
set -euo pipefail
|
||||
foundTag=$(
|
||||
gh api /orgs/coder/packages/container/coder-preview/versions |
|
||||
jq -r --arg tag "pr${PR_NUMBER}" '.[] |
|
||||
jq -r --arg tag "pr${{ env.PR_NUMBER }}" '.[] |
|
||||
select(.metadata.container.tags == [$tag]) |
|
||||
.metadata.container.tags[0]'
|
||||
)
|
||||
if [ -z "$foundTag" ]; then
|
||||
echo "Image not found"
|
||||
echo "${CODER_IMAGE_TAG} not found in ghcr.io/coder/coder-preview"
|
||||
echo "${{ env.CODER_IMAGE_TAG }} not found in ghcr.io/coder/coder-preview"
|
||||
exit 1
|
||||
else
|
||||
echo "Image found"
|
||||
@@ -326,42 +314,40 @@ jobs:
|
||||
curl -X POST "https://api.cloudflare.com/client/v4/zones/${{ secrets.PR_DEPLOYMENTS_ZONE_ID }}/dns_records" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" \
|
||||
--data '{"type":"CNAME","name":"*.'"${PR_HOSTNAME}"'","content":"'"${PR_HOSTNAME}"'","ttl":1,"proxied":false}'
|
||||
--data '{"type":"CNAME","name":"*.${{ env.PR_HOSTNAME }}","content":"${{ env.PR_HOSTNAME }}","ttl":1,"proxied":false}'
|
||||
|
||||
- name: Create PR namespace
|
||||
if: needs.get_info.outputs.NEW == 'true' || github.event.inputs.deploy == 'true'
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# try to delete the namespace, but don't fail if it doesn't exist
|
||||
kubectl delete namespace "pr${PR_NUMBER}" || true
|
||||
kubectl create namespace "pr${PR_NUMBER}"
|
||||
kubectl delete namespace "pr${{ env.PR_NUMBER }}" || true
|
||||
kubectl create namespace "pr${{ env.PR_NUMBER }}"
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Check and Create Certificate
|
||||
if: needs.get_info.outputs.NEW == 'true' || github.event.inputs.deploy == 'true'
|
||||
run: |
|
||||
# Using kubectl to check if a Certificate resource already exists
|
||||
# we are doing this to avoid letsenrypt rate limits
|
||||
if ! kubectl get certificate "pr${PR_NUMBER}-tls" -n pr-deployment-certs > /dev/null 2>&1; then
|
||||
if ! kubectl get certificate pr${{ env.PR_NUMBER }}-tls -n pr-deployment-certs > /dev/null 2>&1; then
|
||||
echo "Certificate doesn't exist. Creating a new one."
|
||||
envsubst < ./.github/pr-deployments/certificate.yaml | kubectl apply -f -
|
||||
else
|
||||
echo "Certificate exists. Skipping certificate creation."
|
||||
fi
|
||||
echo "Copy certificate from pr-deployment-certs to pr${PR_NUMBER} namespace"
|
||||
until kubectl get secret "pr${PR_NUMBER}-tls" -n pr-deployment-certs &> /dev/null
|
||||
echo "Copy certificate from pr-deployment-certs to pr${{ env.PR_NUMBER }} namespace"
|
||||
until kubectl get secret pr${{ env.PR_NUMBER }}-tls -n pr-deployment-certs &> /dev/null
|
||||
do
|
||||
echo "Waiting for secret pr${PR_NUMBER}-tls to be created..."
|
||||
echo "Waiting for secret pr${{ env.PR_NUMBER }}-tls to be created..."
|
||||
sleep 5
|
||||
done
|
||||
(
|
||||
kubectl get secret "pr${PR_NUMBER}-tls" -n pr-deployment-certs -o json |
|
||||
kubectl get secret pr${{ env.PR_NUMBER }}-tls -n pr-deployment-certs -o json |
|
||||
jq 'del(.metadata.namespace,.metadata.creationTimestamp,.metadata.resourceVersion,.metadata.selfLink,.metadata.uid,.metadata.managedFields)' |
|
||||
kubectl -n "pr${PR_NUMBER}" apply -f -
|
||||
kubectl -n pr${{ env.PR_NUMBER }} apply -f -
|
||||
)
|
||||
|
||||
- name: Set up PostgreSQL database
|
||||
@@ -369,14 +355,13 @@ jobs:
|
||||
run: |
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
helm install coder-db bitnami/postgresql \
|
||||
--namespace "pr${PR_NUMBER}" \
|
||||
--set image.repository=bitnamilegacy/postgresql \
|
||||
--namespace pr${{ env.PR_NUMBER }} \
|
||||
--set auth.username=coder \
|
||||
--set auth.password=coder \
|
||||
--set auth.database=coder \
|
||||
--set persistence.size=10Gi
|
||||
kubectl create secret generic coder-db-url -n "pr${PR_NUMBER}" \
|
||||
--from-literal=url="postgres://coder:coder@coder-db-postgresql.pr${PR_NUMBER}.svc.cluster.local:5432/coder?sslmode=disable"
|
||||
kubectl create secret generic coder-db-url -n pr${{ env.PR_NUMBER }} \
|
||||
--from-literal=url="postgres://coder:coder@coder-db-postgresql.pr${{ env.PR_NUMBER }}.svc.cluster.local:5432/coder?sslmode=disable"
|
||||
|
||||
- name: Create a service account, role, and rolebinding for the PR namespace
|
||||
if: needs.get_info.outputs.NEW == 'true' || github.event.inputs.deploy == 'true'
|
||||
@@ -398,8 +383,8 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
helm dependency update --skip-refresh ./helm/coder
|
||||
helm upgrade --install "pr${PR_NUMBER}" ./helm/coder \
|
||||
--namespace "pr${PR_NUMBER}" \
|
||||
helm upgrade --install "pr${{ env.PR_NUMBER }}" ./helm/coder \
|
||||
--namespace "pr${{ env.PR_NUMBER }}" \
|
||||
--values ./pr-deploy-values.yaml \
|
||||
--force
|
||||
|
||||
@@ -408,8 +393,8 @@ jobs:
|
||||
run: |
|
||||
helm repo add coder-logstream-kube https://helm.coder.com/logstream-kube
|
||||
helm upgrade --install coder-logstream-kube coder-logstream-kube/coder-logstream-kube \
|
||||
--namespace "pr${PR_NUMBER}" \
|
||||
--set url="https://${PR_HOSTNAME}"
|
||||
--namespace "pr${{ env.PR_NUMBER }}" \
|
||||
--set url="https://${{ env.PR_HOSTNAME }}"
|
||||
|
||||
- name: Get Coder binary
|
||||
if: needs.get_info.outputs.NEW == 'true' || github.event.inputs.deploy == 'true'
|
||||
@@ -417,16 +402,16 @@ jobs:
|
||||
set -euo pipefail
|
||||
|
||||
DEST="${HOME}/coder"
|
||||
URL="https://${PR_HOSTNAME}/bin/coder-linux-amd64"
|
||||
URL="https://${{ env.PR_HOSTNAME }}/bin/coder-linux-amd64"
|
||||
|
||||
mkdir -p "$(dirname "$DEST")"
|
||||
mkdir -p "$(dirname ${DEST})"
|
||||
|
||||
COUNT=0
|
||||
until curl --output /dev/null --silent --head --fail "$URL"; do
|
||||
until $(curl --output /dev/null --silent --head --fail "$URL"); do
|
||||
printf '.'
|
||||
sleep 5
|
||||
COUNT=$((COUNT+1))
|
||||
if [ "$COUNT" -ge 60 ]; then
|
||||
if [ $COUNT -ge 60 ]; then
|
||||
echo "Timed out waiting for URL to be available"
|
||||
exit 1
|
||||
fi
|
||||
@@ -435,7 +420,7 @@ jobs:
|
||||
curl -fsSL "$URL" -o "${DEST}"
|
||||
chmod +x "${DEST}"
|
||||
"${DEST}" version
|
||||
sudo mv "${DEST}" /usr/local/bin/coder
|
||||
mv "${DEST}" /usr/local/bin/coder
|
||||
|
||||
- name: Create first user
|
||||
if: needs.get_info.outputs.NEW == 'true' || github.event.inputs.deploy == 'true'
|
||||
@@ -450,24 +435,24 @@ jobs:
|
||||
|
||||
# add mask so that the password is not printed to the logs
|
||||
echo "::add-mask::$password"
|
||||
echo "password=$password" >> "$GITHUB_OUTPUT"
|
||||
echo "password=$password" >> $GITHUB_OUTPUT
|
||||
|
||||
coder login \
|
||||
--first-user-username "pr${PR_NUMBER}-admin" \
|
||||
--first-user-email "pr${PR_NUMBER}@coder.com" \
|
||||
--first-user-password "$password" \
|
||||
--first-user-username pr${{ env.PR_NUMBER }}-admin \
|
||||
--first-user-email pr${{ env.PR_NUMBER }}@coder.com \
|
||||
--first-user-password $password \
|
||||
--first-user-trial=false \
|
||||
--use-token-as-session \
|
||||
"https://${PR_HOSTNAME}"
|
||||
https://${{ env.PR_HOSTNAME }}
|
||||
|
||||
# Create a user for the github.actor
|
||||
# TODO: update once https://github.com/coder/coder/issues/15466 is resolved
|
||||
# coder users create \
|
||||
# --username ${GITHUB_ACTOR} \
|
||||
# --username ${{ github.actor }} \
|
||||
# --login-type github
|
||||
|
||||
# promote the user to admin role
|
||||
# coder org members edit-role ${GITHUB_ACTOR} organization-admin
|
||||
# coder org members edit-role ${{ github.actor }} organization-admin
|
||||
# TODO: update once https://github.com/coder/internal/issues/207 is resolved
|
||||
|
||||
- name: Send Slack notification
|
||||
@@ -476,22 +461,20 @@ jobs:
|
||||
curl -s -o /dev/null -X POST -H 'Content-type: application/json' \
|
||||
-d \
|
||||
'{
|
||||
"pr_number": "'"${PR_NUMBER}"'",
|
||||
"pr_url": "'"${PR_URL}"'",
|
||||
"pr_title": "'"${PR_TITLE}"'",
|
||||
"pr_access_url": "'"https://${PR_HOSTNAME}"'",
|
||||
"pr_username": "'"pr${PR_NUMBER}-admin"'",
|
||||
"pr_email": "'"pr${PR_NUMBER}@coder.com"'",
|
||||
"pr_password": "'"${PASSWORD}"'",
|
||||
"pr_actor": "'"${GITHUB_ACTOR}"'"
|
||||
"pr_number": "'"${{ env.PR_NUMBER }}"'",
|
||||
"pr_url": "'"${{ env.PR_URL }}"'",
|
||||
"pr_title": "'"${{ env.PR_TITLE }}"'",
|
||||
"pr_access_url": "'"https://${{ env.PR_HOSTNAME }}"'",
|
||||
"pr_username": "'"pr${{ env.PR_NUMBER }}-admin"'",
|
||||
"pr_email": "'"pr${{ env.PR_NUMBER }}@coder.com"'",
|
||||
"pr_password": "'"${{ steps.setup_deployment.outputs.password }}"'",
|
||||
"pr_actor": "'"${{ github.actor }}"'"
|
||||
}' \
|
||||
${{ secrets.PR_DEPLOYMENTS_SLACK_WEBHOOK }}
|
||||
echo "Slack notification sent"
|
||||
env:
|
||||
PASSWORD: ${{ steps.setup_deployment.outputs.password }}
|
||||
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@b30e6a3c0ed37e7c023ccd3f1db5c6c0b0c23aad # v4.0.0
|
||||
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
|
||||
id: fc
|
||||
with:
|
||||
issue-number: ${{ env.PR_NUMBER }}
|
||||
@@ -500,7 +483,7 @@ jobs:
|
||||
direction: last
|
||||
|
||||
- name: Comment on PR
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0
|
||||
env:
|
||||
STATUS: ${{ needs.get_info.outputs.NEW == 'true' && 'Created' || 'Updated' }}
|
||||
with:
|
||||
@@ -521,7 +504,7 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd .github/pr-deployments/template
|
||||
coder templates push -y --variable "namespace=pr${PR_NUMBER}" kubernetes
|
||||
coder templates push -y --variable namespace=pr${{ env.PR_NUMBER }} kubernetes
|
||||
|
||||
# Create workspace
|
||||
coder create --template="kubernetes" kube --parameter cpu=2 --parameter memory=4 --parameter home_disk_size=2 -y
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
+121
-138
@@ -32,43 +32,15 @@ env:
|
||||
CODER_RELEASE_NOTES: ${{ inputs.release_notes }}
|
||||
|
||||
jobs:
|
||||
# Only allow maintainers/admins to release.
|
||||
check-perms:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Allow only maintainers/admins
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const {data} = await github.rest.repos.getCollaboratorPermissionLevel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
username: context.actor
|
||||
});
|
||||
const role = data.role_name || data.user?.role_name || data.permission;
|
||||
const perms = data.user?.permissions || {};
|
||||
core.info(`Actor ${context.actor} permission=${data.permission}, role_name=${role}`);
|
||||
|
||||
const allowed =
|
||||
role === 'admin' ||
|
||||
role === 'maintain' ||
|
||||
perms.admin === true ||
|
||||
perms.maintain === true;
|
||||
|
||||
if (!allowed) core.setFailed('Denied: requires maintain or admin');
|
||||
|
||||
# build-dylib is a separate job to build the dylib on macOS.
|
||||
build-dylib:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-macos-latest' || 'macos-latest' }}
|
||||
needs: check-perms
|
||||
steps:
|
||||
# Harden Runner doesn't work on macOS.
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
# If the event that triggered the build was an annotated tag (which our
|
||||
# tags are supposed to be), actions/checkout has a bug where the tag in
|
||||
@@ -81,16 +53,14 @@ jobs:
|
||||
- name: Setup build tools
|
||||
run: |
|
||||
brew install bash gnu-getopt make
|
||||
{
|
||||
echo "$(brew --prefix bash)/bin"
|
||||
echo "$(brew --prefix gnu-getopt)/bin"
|
||||
echo "$(brew --prefix make)/libexec/gnubin"
|
||||
} >> "$GITHUB_PATH"
|
||||
echo "$(brew --prefix bash)/bin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix gnu-getopt)/bin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix make)/libexec/gnubin" >> $GITHUB_PATH
|
||||
|
||||
- name: Switch XCode Version
|
||||
uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0
|
||||
with:
|
||||
xcode-version: "16.1.0"
|
||||
xcode-version: "16.0.0"
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
@@ -131,7 +101,7 @@ jobs:
|
||||
AC_CERTIFICATE_PASSWORD_FILE: /tmp/apple_cert_password.txt
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: dylibs
|
||||
path: |
|
||||
@@ -144,7 +114,7 @@ jobs:
|
||||
|
||||
release:
|
||||
name: Build and publish
|
||||
needs: [build-dylib, check-perms]
|
||||
needs: build-dylib
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# Required to publish a release
|
||||
@@ -164,15 +134,14 @@ jobs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
# If the event that triggered the build was an annotated tag (which our
|
||||
# tags are supposed to be), actions/checkout has a bug where the tag in
|
||||
@@ -187,9 +156,9 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
version="$(./scripts/version.sh)"
|
||||
echo "version=$version" >> "$GITHUB_OUTPUT"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
# Speed up future version.sh calls.
|
||||
echo "CODER_FORCE_VERSION=$version" >> "$GITHUB_ENV"
|
||||
echo "CODER_FORCE_VERSION=$version" >> $GITHUB_ENV
|
||||
echo "$version"
|
||||
|
||||
# Verify that all expectations for a release are met.
|
||||
@@ -231,7 +200,7 @@ jobs:
|
||||
|
||||
release_notes_file="$(mktemp -t release_notes.XXXXXX)"
|
||||
echo "$CODER_RELEASE_NOTES" > "$release_notes_file"
|
||||
echo CODER_RELEASE_NOTES_FILE="$release_notes_file" >> "$GITHUB_ENV"
|
||||
echo CODER_RELEASE_NOTES_FILE="$release_notes_file" >> $GITHUB_ENV
|
||||
|
||||
- name: Show release notes
|
||||
run: |
|
||||
@@ -239,7 +208,7 @@ jobs:
|
||||
cat "$CODER_RELEASE_NOTES_FILE"
|
||||
|
||||
- name: Docker Login
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -253,7 +222,7 @@ jobs:
|
||||
|
||||
# Necessary for signing Windows binaries.
|
||||
- name: Setup Java
|
||||
uses: actions/setup-java@dded0888837ed1f317902acf8a20df0ad188d165 # v5.0.0
|
||||
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4.7.1
|
||||
with:
|
||||
distribution: "zulu"
|
||||
java-version: "11.0"
|
||||
@@ -317,17 +286,17 @@ jobs:
|
||||
# Setup GCloud for signing Windows binaries.
|
||||
- name: Authenticate to Google Cloud
|
||||
id: gcloud_auth
|
||||
uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3.0.0
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: ${{ vars.GCP_CODE_SIGNING_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
|
||||
workload_identity_provider: ${{ secrets.GCP_CODE_SIGNING_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_CODE_SIGNING_SERVICE_ACCOUNT }}
|
||||
token_format: "access_token"
|
||||
|
||||
- name: Setup GCloud SDK
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1
|
||||
uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # v2.1.4
|
||||
|
||||
- name: Download dylibs
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: dylibs
|
||||
path: ./build
|
||||
@@ -354,8 +323,6 @@ jobs:
|
||||
env:
|
||||
CODER_SIGN_WINDOWS: "1"
|
||||
CODER_SIGN_DARWIN: "1"
|
||||
CODER_SIGN_GPG: "1"
|
||||
CODER_GPG_RELEASE_KEY_BASE64: ${{ secrets.GPG_RELEASE_KEY_BASE64 }}
|
||||
CODER_WINDOWS_RESOURCES: "1"
|
||||
AC_CERTIFICATE_FILE: /tmp/apple_cert.p12
|
||||
AC_CERTIFICATE_PASSWORD_FILE: /tmp/apple_cert_password.txt
|
||||
@@ -381,9 +348,9 @@ jobs:
|
||||
set -euo pipefail
|
||||
if [[ "${CODER_RELEASE:-}" != *t* ]] || [[ "${CODER_DRY_RUN:-}" == *t* ]]; then
|
||||
# Empty value means use the default and avoid building a fresh one.
|
||||
echo "tag=" >> "$GITHUB_OUTPUT"
|
||||
echo "tag=" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "tag=$(CODER_IMAGE_BASE=ghcr.io/coder/coder-base ./scripts/image_tag.sh)" >> "$GITHUB_OUTPUT"
|
||||
echo "tag=$(CODER_IMAGE_BASE=ghcr.io/coder/coder-base ./scripts/image_tag.sh)" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Create empty base-build-context directory
|
||||
@@ -397,7 +364,7 @@ jobs:
|
||||
# This uses OIDC authentication, so no auth variables are required.
|
||||
- name: Build base Docker image via depot.dev
|
||||
if: steps.image-base-tag.outputs.tag != ''
|
||||
uses: depot/build-push-action@9785b135c3c76c33db102e45be96a25ab55cd507 # v1.16.2
|
||||
uses: depot/build-push-action@636daae76684e38c301daa0c5eca1c095b24e780 # v1.14.0
|
||||
with:
|
||||
project: wl5hnrrkns
|
||||
context: base-build-context
|
||||
@@ -418,7 +385,7 @@ jobs:
|
||||
# available immediately
|
||||
for i in {1..10}; do
|
||||
rc=0
|
||||
raw_manifests=$(docker buildx imagetools inspect --raw "${IMAGE_TAG}") || rc=$?
|
||||
raw_manifests=$(docker buildx imagetools inspect --raw "${{ steps.image-base-tag.outputs.tag }}") || rc=$?
|
||||
if [[ "$rc" -eq 0 ]]; then
|
||||
break
|
||||
fi
|
||||
@@ -440,8 +407,6 @@ jobs:
|
||||
echo "$manifests" | grep -q linux/amd64
|
||||
echo "$manifests" | grep -q linux/arm64
|
||||
echo "$manifests" | grep -q linux/arm/v7
|
||||
env:
|
||||
IMAGE_TAG: ${{ steps.image-base-tag.outputs.tag }}
|
||||
|
||||
# GitHub attestation provides SLSA provenance for Docker images, establishing a verifiable
|
||||
# record that these images were built in GitHub Actions with specific inputs and environment.
|
||||
@@ -454,7 +419,7 @@ jobs:
|
||||
id: attest_base
|
||||
if: ${{ !inputs.dry_run && steps.image-base-tag.outputs.tag != '' }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@daf44fb950173508f38bd2406030372c1d1162b1 # v3.0.0
|
||||
uses: actions/attest@afd638254319277bb3d7f0a234478733e2e46a73 # v2.3.0
|
||||
with:
|
||||
subject-name: ${{ steps.image-base-tag.outputs.tag }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -509,7 +474,7 @@ jobs:
|
||||
|
||||
# Save multiarch image tag for attestation
|
||||
multiarch_image="$(./scripts/image_tag.sh)"
|
||||
echo "multiarch_image=${multiarch_image}" >> "$GITHUB_OUTPUT"
|
||||
echo "multiarch_image=${multiarch_image}" >> $GITHUB_OUTPUT
|
||||
|
||||
# For debugging, print all docker image tags
|
||||
docker images
|
||||
@@ -517,15 +482,16 @@ jobs:
|
||||
# if the current version is equal to the highest (according to semver)
|
||||
# version in the repo, also create a multi-arch image as ":latest" and
|
||||
# push it
|
||||
created_latest_tag=false
|
||||
if [[ "$(git tag | grep '^v' | grep -vE '(rc|dev|-|\+|\/)' | sort -r --version-sort | head -n1)" == "v$(./scripts/version.sh)" ]]; then
|
||||
# shellcheck disable=SC2046
|
||||
./scripts/build_docker_multiarch.sh \
|
||||
--push \
|
||||
--target "$(./scripts/image_tag.sh --version latest)" \
|
||||
$(cat build/coder_"$version"_linux_{amd64,arm64,armv7}.tag)
|
||||
echo "created_latest_tag=true" >> "$GITHUB_OUTPUT"
|
||||
created_latest_tag=true
|
||||
echo "created_latest_tag=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "created_latest_tag=false" >> "$GITHUB_OUTPUT"
|
||||
echo "created_latest_tag=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
env:
|
||||
CODER_BASE_IMAGE_TAG: ${{ steps.image-base-tag.outputs.tag }}
|
||||
@@ -533,27 +499,24 @@ jobs:
|
||||
- name: SBOM Generation and Attestation
|
||||
if: ${{ !inputs.dry_run }}
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: '1'
|
||||
MULTIARCH_IMAGE: ${{ steps.build_docker.outputs.multiarch_image }}
|
||||
VERSION: ${{ steps.version.outputs.version }}
|
||||
CREATED_LATEST_TAG: ${{ steps.build_docker.outputs.created_latest_tag }}
|
||||
COSIGN_EXPERIMENTAL: "1"
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
|
||||
# Generate SBOM for multi-arch image with version in filename
|
||||
echo "Generating SBOM for multi-arch image: ${MULTIARCH_IMAGE}"
|
||||
syft "${MULTIARCH_IMAGE}" -o spdx-json > "coder_${VERSION}_sbom.spdx.json"
|
||||
echo "Generating SBOM for multi-arch image: ${{ steps.build_docker.outputs.multiarch_image }}"
|
||||
syft "${{ steps.build_docker.outputs.multiarch_image }}" -o spdx-json > coder_${{ steps.version.outputs.version }}_sbom.spdx.json
|
||||
|
||||
# Attest SBOM to multi-arch image
|
||||
echo "Attesting SBOM to multi-arch image: ${MULTIARCH_IMAGE}"
|
||||
cosign clean --force=true "${MULTIARCH_IMAGE}"
|
||||
echo "Attesting SBOM to multi-arch image: ${{ steps.build_docker.outputs.multiarch_image }}"
|
||||
cosign clean --force=true "${{ steps.build_docker.outputs.multiarch_image }}"
|
||||
cosign attest --type spdxjson \
|
||||
--predicate "coder_${VERSION}_sbom.spdx.json" \
|
||||
--predicate coder_${{ steps.version.outputs.version }}_sbom.spdx.json \
|
||||
--yes \
|
||||
"${MULTIARCH_IMAGE}"
|
||||
"${{ steps.build_docker.outputs.multiarch_image }}"
|
||||
|
||||
# If latest tag was created, also attest it
|
||||
if [[ "${CREATED_LATEST_TAG}" == "true" ]]; then
|
||||
if [[ "${{ steps.build_docker.outputs.created_latest_tag }}" == "true" ]]; then
|
||||
latest_tag="$(./scripts/image_tag.sh --version latest)"
|
||||
echo "Generating SBOM for latest image: ${latest_tag}"
|
||||
syft "${latest_tag}" -o spdx-json > coder_latest_sbom.spdx.json
|
||||
@@ -570,7 +533,7 @@ jobs:
|
||||
id: attest_main
|
||||
if: ${{ !inputs.dry_run }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@daf44fb950173508f38bd2406030372c1d1162b1 # v3.0.0
|
||||
uses: actions/attest@afd638254319277bb3d7f0a234478733e2e46a73 # v2.3.0
|
||||
with:
|
||||
subject-name: ${{ steps.build_docker.outputs.multiarch_image }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -607,14 +570,14 @@ jobs:
|
||||
- name: Get latest tag name
|
||||
id: latest_tag
|
||||
if: ${{ !inputs.dry_run && steps.build_docker.outputs.created_latest_tag == 'true' }}
|
||||
run: echo "tag=$(./scripts/image_tag.sh --version latest)" >> "$GITHUB_OUTPUT"
|
||||
run: echo "tag=$(./scripts/image_tag.sh --version latest)" >> $GITHUB_OUTPUT
|
||||
|
||||
# If this is the highest version according to semver, also attest the "latest" tag
|
||||
- name: GitHub Attestation for "latest" Docker image
|
||||
id: attest_latest
|
||||
if: ${{ !inputs.dry_run && steps.build_docker.outputs.created_latest_tag == 'true' }}
|
||||
continue-on-error: true
|
||||
uses: actions/attest@daf44fb950173508f38bd2406030372c1d1162b1 # v3.0.0
|
||||
uses: actions/attest@afd638254319277bb3d7f0a234478733e2e46a73 # v2.3.0
|
||||
with:
|
||||
subject-name: ${{ steps.latest_tag.outputs.tag }}
|
||||
predicate-type: "https://slsa.dev/provenance/v1"
|
||||
@@ -650,7 +613,7 @@ jobs:
|
||||
# Report attestation failures but don't fail the workflow
|
||||
- name: Check attestation status
|
||||
if: ${{ !inputs.dry_run }}
|
||||
run: | # zizmor: ignore[template-injection] We're just reading steps.attest_x.outcome here, no risk of injection
|
||||
run: |
|
||||
if [[ "${{ steps.attest_base.outcome }}" == "failure" && "${{ steps.attest_base.conclusion }}" != "skipped" ]]; then
|
||||
echo "::warning::GitHub attestation for base image failed"
|
||||
fi
|
||||
@@ -669,30 +632,6 @@ jobs:
|
||||
- name: ls build
|
||||
run: ls -lh build
|
||||
|
||||
- name: Publish Coder CLI binaries and detached signatures to GCS
|
||||
if: ${{ !inputs.dry_run }}
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
|
||||
version="$(./scripts/version.sh)"
|
||||
|
||||
# Source array of slim binaries
|
||||
declare -A binaries
|
||||
binaries["coder-darwin-amd64"]="coder-slim_${version}_darwin_amd64"
|
||||
binaries["coder-darwin-arm64"]="coder-slim_${version}_darwin_arm64"
|
||||
binaries["coder-linux-amd64"]="coder-slim_${version}_linux_amd64"
|
||||
binaries["coder-linux-arm64"]="coder-slim_${version}_linux_arm64"
|
||||
binaries["coder-linux-armv7"]="coder-slim_${version}_linux_armv7"
|
||||
binaries["coder-windows-amd64.exe"]="coder-slim_${version}_windows_amd64.exe"
|
||||
binaries["coder-windows-arm64.exe"]="coder-slim_${version}_windows_arm64.exe"
|
||||
|
||||
for cli_name in "${!binaries[@]}"; do
|
||||
slim_binary="${binaries[$cli_name]}"
|
||||
detached_signature="${slim_binary}.asc"
|
||||
gcloud storage cp "./build/${slim_binary}" "gs://releases.coder.com/coder-cli/${version}/${cli_name}"
|
||||
gcloud storage cp "./build/${detached_signature}" "gs://releases.coder.com/coder-cli/${version}/${cli_name}.asc"
|
||||
done
|
||||
|
||||
- name: Publish release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
@@ -715,11 +654,11 @@ jobs:
|
||||
./build/*.apk
|
||||
./build/*.deb
|
||||
./build/*.rpm
|
||||
"./coder_${VERSION}_sbom.spdx.json"
|
||||
./coder_${{ steps.version.outputs.version }}_sbom.spdx.json
|
||||
)
|
||||
|
||||
# Only include the latest SBOM file if it was created
|
||||
if [[ "${CREATED_LATEST_TAG}" == "true" ]]; then
|
||||
if [[ "${{ steps.build_docker.outputs.created_latest_tag }}" == "true" ]]; then
|
||||
files+=(./coder_latest_sbom.spdx.json)
|
||||
fi
|
||||
|
||||
@@ -730,17 +669,15 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CODER_GPG_RELEASE_KEY_BASE64: ${{ secrets.GPG_RELEASE_KEY_BASE64 }}
|
||||
VERSION: ${{ steps.version.outputs.version }}
|
||||
CREATED_LATEST_TAG: ${{ steps.build_docker.outputs.created_latest_tag }}
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3.0.0
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: ${{ vars.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ vars.GCP_SERVICE_ACCOUNT }}
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: Setup GCloud SDK
|
||||
uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # 3.0.1
|
||||
uses: google-github-actions/setup-gcloud@77e7a554d41e2ee56fc945c52dfd3f33d12def9a # 2.1.4
|
||||
|
||||
- name: Publish Helm Chart
|
||||
if: ${{ !inputs.dry_run }}
|
||||
@@ -752,16 +689,14 @@ jobs:
|
||||
cp "build/provisioner_helm_${version}.tgz" build/helm
|
||||
gsutil cp gs://helm.coder.com/v2/index.yaml build/helm/index.yaml
|
||||
helm repo index build/helm --url https://helm.coder.com/v2 --merge build/helm/index.yaml
|
||||
gsutil -h "Cache-Control:no-cache,max-age=0" cp "build/helm/coder_helm_${version}.tgz" gs://helm.coder.com/v2
|
||||
gsutil -h "Cache-Control:no-cache,max-age=0" cp "build/helm/provisioner_helm_${version}.tgz" gs://helm.coder.com/v2
|
||||
gsutil -h "Cache-Control:no-cache,max-age=0" cp "build/helm/index.yaml" gs://helm.coder.com/v2
|
||||
gsutil -h "Cache-Control:no-cache,max-age=0" cp "helm/artifacthub-repo.yml" gs://helm.coder.com/v2
|
||||
helm push "build/coder_helm_${version}.tgz" oci://ghcr.io/coder/chart
|
||||
helm push "build/provisioner_helm_${version}.tgz" oci://ghcr.io/coder/chart
|
||||
gsutil -h "Cache-Control:no-cache,max-age=0" cp build/helm/coder_helm_${version}.tgz gs://helm.coder.com/v2
|
||||
gsutil -h "Cache-Control:no-cache,max-age=0" cp build/helm/provisioner_helm_${version}.tgz gs://helm.coder.com/v2
|
||||
gsutil -h "Cache-Control:no-cache,max-age=0" cp build/helm/index.yaml gs://helm.coder.com/v2
|
||||
gsutil -h "Cache-Control:no-cache,max-age=0" cp helm/artifacthub-repo.yml gs://helm.coder.com/v2
|
||||
|
||||
- name: Upload artifacts to actions (if dry-run)
|
||||
if: ${{ inputs.dry_run }}
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: release-artifacts
|
||||
path: |
|
||||
@@ -777,7 +712,7 @@ jobs:
|
||||
|
||||
- name: Upload latest sbom artifact to actions (if dry-run)
|
||||
if: inputs.dry_run && steps.build_docker.outputs.created_latest_tag == 'true'
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: latest-sbom-artifact
|
||||
path: ./coder_latest_sbom.spdx.json
|
||||
@@ -785,7 +720,7 @@ jobs:
|
||||
|
||||
- name: Send repository-dispatch event
|
||||
if: ${{ !inputs.dry_run }}
|
||||
uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1
|
||||
uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3.0.0
|
||||
with:
|
||||
token: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
repository: coder/packages
|
||||
@@ -802,18 +737,18 @@ jobs:
|
||||
# TODO: skip this if it's not a new release (i.e. a backport). This is
|
||||
# fine right now because it just makes a PR that we can close.
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Update homebrew
|
||||
env:
|
||||
# Variables used by the `gh` command
|
||||
GH_REPO: coder/homebrew-coder
|
||||
GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
VERSION: ${{ needs.release.outputs.version }}
|
||||
run: |
|
||||
# Keep version number around for reference, removing any potential leading v
|
||||
coder_version="$(echo "${VERSION}" | tr -d v)"
|
||||
coder_version="$(echo "${{ needs.release.outputs.version }}" | tr -d v)"
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
@@ -832,9 +767,9 @@ jobs:
|
||||
wget "$checksums_url" -O checksums.txt
|
||||
|
||||
# Get the SHAs
|
||||
darwin_arm_sha="$(grep "darwin_arm64.zip" checksums.txt | awk '{ print $1 }')"
|
||||
darwin_intel_sha="$(grep "darwin_amd64.zip" checksums.txt | awk '{ print $1 }')"
|
||||
linux_sha="$(grep "linux_amd64.tar.gz" checksums.txt | awk '{ print $1 }')"
|
||||
darwin_arm_sha="$(cat checksums.txt | grep "darwin_arm64.zip" | awk '{ print $1 }')"
|
||||
darwin_intel_sha="$(cat checksums.txt | grep "darwin_amd64.zip" | awk '{ print $1 }')"
|
||||
linux_sha="$(cat checksums.txt | grep "linux_amd64.tar.gz" | awk '{ print $1 }')"
|
||||
|
||||
echo "macOS arm64: $darwin_arm_sha"
|
||||
echo "macOS amd64: $darwin_intel_sha"
|
||||
@@ -847,7 +782,7 @@ jobs:
|
||||
|
||||
# Check if a PR already exists.
|
||||
pr_count="$(gh pr list --search "head:$brew_branch" --json id,closed | jq -r ".[] | select(.closed == false) | .id" | wc -l)"
|
||||
if [ "$pr_count" -gt 0 ]; then
|
||||
if [[ "$pr_count" > 0 ]]; then
|
||||
echo "Bailing out as PR already exists" 2>&1
|
||||
exit 0
|
||||
fi
|
||||
@@ -866,8 +801,8 @@ jobs:
|
||||
-B master -H "$brew_branch" \
|
||||
-t "coder $coder_version" \
|
||||
-b "" \
|
||||
-r "${GITHUB_ACTOR}" \
|
||||
-a "${GITHUB_ACTOR}" \
|
||||
-r "${{ github.actor }}" \
|
||||
-a "${{ github.actor }}" \
|
||||
-b "This automatic PR was triggered by the release of Coder v$coder_version"
|
||||
|
||||
publish-winget:
|
||||
@@ -878,7 +813,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
@@ -888,10 +823,9 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
# If the event that triggered the build was an annotated tag (which our
|
||||
# tags are supposed to be), actions/checkout has a bug where the tag in
|
||||
@@ -910,7 +844,7 @@ jobs:
|
||||
# The package version is the same as the tag minus the leading "v".
|
||||
# The version in this output already has the leading "v" removed but
|
||||
# we do it again to be safe.
|
||||
$version = $env:VERSION.Trim('v')
|
||||
$version = "${{ needs.release.outputs.version }}".Trim('v')
|
||||
|
||||
$release_assets = gh release view --repo coder/coder "v${version}" --json assets | `
|
||||
ConvertFrom-Json
|
||||
@@ -942,14 +876,13 @@ jobs:
|
||||
# For wingetcreate. We need a real token since we're pushing a commit
|
||||
# to GitHub and then making a PR in a different repo.
|
||||
WINGET_GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
VERSION: ${{ needs.release.outputs.version }}
|
||||
|
||||
- name: Comment on PR
|
||||
run: |
|
||||
# wait 30 seconds
|
||||
Start-Sleep -Seconds 30.0
|
||||
# Find the PR that wingetcreate just made.
|
||||
$version = $env:VERSION.Trim('v')
|
||||
$version = "${{ needs.release.outputs.version }}".Trim('v')
|
||||
$pr_list = gh pr list --repo microsoft/winget-pkgs --search "author:cdrci Coder.Coder version ${version}" --limit 1 --json number | `
|
||||
ConvertFrom-Json
|
||||
$pr_number = $pr_list[0].number
|
||||
@@ -960,7 +893,6 @@ jobs:
|
||||
# For gh CLI. We need a real token since we're commenting on a PR in a
|
||||
# different repo.
|
||||
GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
VERSION: ${{ needs.release.outputs.version }}
|
||||
|
||||
# publish-sqlc pushes the latest schema to sqlc cloud.
|
||||
# At present these pushes cannot be tagged, so the last push is always the latest.
|
||||
@@ -971,15 +903,14 @@ jobs:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
persist-credentials: false
|
||||
|
||||
# We need golang to run the migration main.go
|
||||
- name: Setup Go
|
||||
@@ -993,3 +924,55 @@ jobs:
|
||||
continue-on-error: true
|
||||
run: |
|
||||
make sqlc-push
|
||||
|
||||
update-calendar:
|
||||
name: "Update release calendar in docs"
|
||||
runs-on: "ubuntu-latest"
|
||||
needs: [release, publish-homebrew, publish-winget, publish-sqlc]
|
||||
if: ${{ !inputs.dry_run }}
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0 # Needed to get all tags for version calculation
|
||||
|
||||
- name: Set up Git
|
||||
run: |
|
||||
git config user.name "Coder CI"
|
||||
git config user.email "cdrci@coder.com"
|
||||
|
||||
- name: Run update script
|
||||
run: |
|
||||
./scripts/update-release-calendar.sh
|
||||
make fmt/markdown
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
if git diff --quiet docs/install/releases/index.md; then
|
||||
echo "No changes detected in release calendar."
|
||||
echo "changes=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Changes detected in release calendar."
|
||||
echo "changes=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Create Pull Request
|
||||
if: steps.check_changes.outputs.changes == 'true'
|
||||
uses: peter-evans/create-pull-request@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3.0.0
|
||||
with:
|
||||
commit-message: "docs: update release calendar"
|
||||
title: "docs: update release calendar"
|
||||
body: |
|
||||
This PR automatically updates the release calendar in the docs.
|
||||
branch: bot/update-release-calendar
|
||||
delete-branch: true
|
||||
labels: docs
|
||||
|
||||
@@ -20,17 +20,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
|
||||
uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
# Upload the results as artifacts.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
@@ -47,6 +47,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@014f16e7ab1402f30e7c3329d33797e7948572db # v3.29.5
|
||||
uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
@@ -27,20 +27,18 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@014f16e7ab1402f30e7c3329d33797e7948572db # v3.29.5
|
||||
uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
@@ -50,7 +48,7 @@ jobs:
|
||||
rm Makefile
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@014f16e7ab1402f30e7c3329d33797e7948572db # v3.29.5
|
||||
uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||
|
||||
- name: Send Slack notification on failure
|
||||
if: ${{ failure() }}
|
||||
@@ -69,15 +67,14 @@ jobs:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'depot-ubuntu-22.04-8' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
@@ -137,16 +134,15 @@ jobs:
|
||||
# This environment variables forces scripts/build_docker.sh to build
|
||||
# the base image tag locally instead of using the cached version from
|
||||
# the registry.
|
||||
CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
export CODER_IMAGE_BUILD_BASE_TAG
|
||||
export CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
|
||||
# We would like to use make -j here, but it doesn't work with the some recent additions
|
||||
# to our code generation.
|
||||
make "$image_job"
|
||||
echo "image=$(cat "$image_job")" >> "$GITHUB_OUTPUT"
|
||||
echo "image=$(cat "$image_job")" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
|
||||
uses: aquasecurity/trivy-action@6c175e9c4083a92bbca2f9724c8a5e33bc2d97a5
|
||||
with:
|
||||
image-ref: ${{ steps.build.outputs.image }}
|
||||
format: sarif
|
||||
@@ -154,13 +150,13 @@ jobs:
|
||||
severity: "CRITICAL,HIGH"
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@014f16e7ab1402f30e7c3329d33797e7948572db # v3.29.5
|
||||
uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||
with:
|
||||
sarif_file: trivy-results.sarif
|
||||
category: "Trivy"
|
||||
|
||||
- name: Upload Trivy scan results as an artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: trivy
|
||||
path: trivy-results.sarif
|
||||
|
||||
@@ -18,12 +18,12 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: stale
|
||||
uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
with:
|
||||
stale-issue-label: "stale"
|
||||
stale-pr-label: "stale"
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
# Start with the oldest issues, always.
|
||||
ascending: true
|
||||
- name: "Close old issues labeled likely-no"
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
@@ -96,14 +96,12 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Run delete-old-branches-action
|
||||
uses: beatlabs/delete-old-branches-action@4eeeb8740ff8b3cb310296ddd6b43c3387734588 # v0.0.11
|
||||
with:
|
||||
@@ -120,12 +118,12 @@ jobs:
|
||||
actions: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Delete PR Cleanup workflow runs
|
||||
uses: Mattraks/delete-workflow-runs@5bf9a1dac5c4d041c029f0a8370ddf0c5cb5aeb7 # v2.1.0
|
||||
uses: Mattraks/delete-workflow-runs@39f0bbed25d76b34de5594dceab824811479e5de # v2.0.6
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
@@ -134,7 +132,7 @@ jobs:
|
||||
delete_workflow_pattern: pr-cleanup.yaml
|
||||
|
||||
- name: Delete PR Deploy workflow skipped runs
|
||||
uses: Mattraks/delete-workflow-runs@5bf9a1dac5c4d041c029f0a8370ddf0c5cb5aeb7 # v2.1.0
|
||||
uses: Mattraks/delete-workflow-runs@39f0bbed25d76b34de5594dceab824811479e5de # v2.0.6
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
|
||||
@@ -19,7 +19,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Start Coder workspace
|
||||
uses: coder/start-workspace-action@f97a681b4cc7985c9eef9963750c7cc6ebc93a19
|
||||
uses: coder/start-workspace-action@35a4608cefc7e8cc56573cae7c3b85304575cb72
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
github-username: >-
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
name: AI Triage Automation
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
issue_url:
|
||||
description: "GitHub Issue URL to process"
|
||||
required: true
|
||||
type: string
|
||||
template_name:
|
||||
description: "Coder template to use for workspace"
|
||||
required: true
|
||||
default: "coder"
|
||||
type: string
|
||||
template_preset:
|
||||
description: "Template preset to use"
|
||||
required: false
|
||||
default: ""
|
||||
type: string
|
||||
prefix:
|
||||
description: "Prefix for workspace name"
|
||||
required: false
|
||||
default: "traiage"
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
traiage:
|
||||
name: Triage GitHub Issue with Claude Code
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'traiage' || github.event_name == 'workflow_dispatch'
|
||||
timeout-minutes: 30
|
||||
env:
|
||||
CODER_URL: ${{ secrets.TRAIAGE_CODER_URL }}
|
||||
CODER_SESSION_TOKEN: ${{ secrets.TRAIAGE_CODER_SESSION_TOKEN }}
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
# This is only required for testing locally using nektos/act, so leaving commented out.
|
||||
# An alternative is to use a larger or custom image.
|
||||
# - name: Install Github CLI
|
||||
# id: install-gh
|
||||
# run: |
|
||||
# (type -p wget >/dev/null || (sudo apt update && sudo apt install wget -y)) \
|
||||
# && sudo mkdir -p -m 755 /etc/apt/keyrings \
|
||||
# && out=$(mktemp) && wget -nv -O$out https://cli.github.com/packages/githubcli-archive-keyring.gpg \
|
||||
# && cat $out | sudo tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
|
||||
# && sudo chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
|
||||
# && sudo mkdir -p -m 755 /etc/apt/sources.list.d \
|
||||
# && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
|
||||
# && sudo apt update \
|
||||
# && sudo apt install gh -y
|
||||
|
||||
- name: Determine Inputs
|
||||
id: determine-inputs
|
||||
if: always()
|
||||
env:
|
||||
GITHUB_ACTOR: ${{ github.actor }}
|
||||
GITHUB_EVENT_ISSUE_HTML_URL: ${{ github.event.issue.html_url }}
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
GITHUB_EVENT_USER_ID: ${{ github.event.sender.id }}
|
||||
GITHUB_EVENT_USER_LOGIN: ${{ github.event.sender.login }}
|
||||
INPUTS_ISSUE_URL: ${{ inputs.issue_url }}
|
||||
INPUTS_TEMPLATE_NAME: ${{ inputs.template_name || 'coder' }}
|
||||
INPUTS_TEMPLATE_PRESET: ${{ inputs.template_preset || ''}}
|
||||
INPUTS_PREFIX: ${{ inputs.prefix || 'traiage' }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo "Using template name: ${INPUTS_TEMPLATE_NAME}"
|
||||
echo "template_name=${INPUTS_TEMPLATE_NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
echo "Using template preset: ${INPUTS_TEMPLATE_PRESET}"
|
||||
echo "template_preset=${INPUTS_TEMPLATE_PRESET}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
echo "Using prefix: ${INPUTS_PREFIX}"
|
||||
echo "prefix=${INPUTS_PREFIX}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# For workflow_dispatch, use the actor who triggered it
|
||||
# For issues events, use the issue author.
|
||||
if [[ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ]]; then
|
||||
if ! GITHUB_USER_ID=$(gh api "users/${GITHUB_ACTOR}" --jq '.id'); then
|
||||
echo "::error::Failed to get GitHub user ID for actor ${GITHUB_ACTOR}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Using workflow_dispatch actor: ${GITHUB_ACTOR} (ID: ${GITHUB_USER_ID})"
|
||||
echo "github_user_id=${GITHUB_USER_ID}" >> "${GITHUB_OUTPUT}"
|
||||
echo "github_username=${GITHUB_ACTOR}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
echo "Using issue URL: ${INPUTS_ISSUE_URL}"
|
||||
echo "issue_url=${INPUTS_ISSUE_URL}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
exit 0
|
||||
elif [[ "${GITHUB_EVENT_NAME}" == "issues" ]]; then
|
||||
GITHUB_USER_ID=${GITHUB_EVENT_USER_ID}
|
||||
echo "Using issue author: ${GITHUB_EVENT_USER_LOGIN} (ID: ${GITHUB_USER_ID})"
|
||||
echo "github_user_id=${GITHUB_USER_ID}" >> "${GITHUB_OUTPUT}"
|
||||
echo "github_username=${GITHUB_EVENT_USER_LOGIN}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
echo "Using issue URL: ${GITHUB_EVENT_ISSUE_HTML_URL}"
|
||||
echo "issue_url=${GITHUB_EVENT_ISSUE_HTML_URL}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo "::error::Unsupported event type: ${GITHUB_EVENT_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Verify push access
|
||||
env:
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
GITHUB_USERNAME: ${{ steps.determine-inputs.outputs.github_username }}
|
||||
GITHUB_USER_ID: ${{ steps.determine-inputs.outputs.github_user_id }}
|
||||
run: |
|
||||
# Query the actor’s permission on this repo
|
||||
can_push="$(gh api "/repos/${GITHUB_REPOSITORY}/collaborators/${GITHUB_USERNAME}/permission" --jq '.user.permissions.push')"
|
||||
if [[ "${can_push}" != "true" ]]; then
|
||||
echo "::error title=Access Denied::${GITHUB_USERNAME} does not have push access to ${GITHUB_REPOSITORY}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Extract context key and description from issue
|
||||
id: extract-context
|
||||
env:
|
||||
ISSUE_URL: ${{ steps.determine-inputs.outputs.issue_url }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
issue_number="$(gh issue view "${ISSUE_URL}" --json number --jq '.number')"
|
||||
context_key="gh-${issue_number}"
|
||||
|
||||
TASK_PROMPT=$(cat <<EOF
|
||||
Fix ${ISSUE_URL}
|
||||
|
||||
1. Use the gh CLI to read the issue description and comments.
|
||||
2. Think carefully and try to understand the root cause. If the issue is unclear or not well defined, ask me to clarify and provide more information.
|
||||
3. Write a proposed implementation plan to PLAN.md for me to review before starting implementation. Your plan should use TDD and only make the minimal changes necessary to fix the root cause.
|
||||
4. When I approve your plan, start working on it. If you encounter issues with the plan, ask me for clarification and update the plan as required.
|
||||
5. When you have finished implementation according to the plan, commit and push your changes, and create a PR using the gh CLI for me to review.
|
||||
|
||||
EOF
|
||||
)
|
||||
|
||||
echo "context_key=${context_key}" >> "${GITHUB_OUTPUT}"
|
||||
{
|
||||
echo "TASK_PROMPT<<EOF"
|
||||
echo "${TASK_PROMPT}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 1
|
||||
path: ./.github/actions/create-task-action
|
||||
persist-credentials: false
|
||||
ref: main
|
||||
repository: coder/create-task-action
|
||||
|
||||
- name: Create Coder Task
|
||||
id: create_task
|
||||
uses: ./.github/actions/create-task-action
|
||||
with:
|
||||
coder-url: ${{ secrets.TRAIAGE_CODER_URL }}
|
||||
coder-token: ${{ secrets.TRAIAGE_CODER_SESSION_TOKEN }}
|
||||
coder-organization: "default"
|
||||
coder-template-name: coder
|
||||
coder-template-preset: ${{ steps.determine-inputs.outputs.template_preset }}
|
||||
coder-task-name-prefix: gh-coder
|
||||
coder-task-prompt: ${{ steps.extract-context.outputs.task_prompt }}
|
||||
github-user-id: ${{ steps.determine-inputs.outputs.github_user_id }}
|
||||
github-token: ${{ github.token }}
|
||||
github-issue-url: ${{ steps.determine-inputs.outputs.issue_url }}
|
||||
comment-on-issue: ${{ startsWith(steps.determine-inputs.outputs.issue_url, format('{0}/{1}', github.server_url, github.repository)) }}
|
||||
|
||||
- name: Write outputs
|
||||
env:
|
||||
TASK_CREATED: ${{ steps.create_task.outputs.task-created }}
|
||||
TASK_NAME: ${{ steps.create_task.outputs.task-name }}
|
||||
TASK_URL: ${{ steps.create_task.outputs.task-url }}
|
||||
run: |
|
||||
{
|
||||
echo "**Task created:** ${TASK_CREATED}"
|
||||
echo "**Task name:** ${TASK_NAME}"
|
||||
echo "**Task URL**: ${TASK_URL}"
|
||||
} >> "${GITHUB_STEP_SUMMARY}"
|
||||
@@ -1,6 +1,5 @@
|
||||
[default]
|
||||
extend-ignore-identifiers-re = ["gho_.*"]
|
||||
extend-ignore-re = ["(#|//)\\s*spellchecker:ignore-next-line\\n.*"]
|
||||
|
||||
[default.extend-identifiers]
|
||||
alog = "alog"
|
||||
@@ -9,7 +8,6 @@ IST = "IST"
|
||||
MacOS = "macOS"
|
||||
AKS = "AKS"
|
||||
O_WRONLY = "O_WRONLY"
|
||||
AIBridge = "AI Bridge"
|
||||
|
||||
[default.extend-words]
|
||||
AKS = "AKS"
|
||||
@@ -30,7 +28,6 @@ HELO = "HELO"
|
||||
LKE = "LKE"
|
||||
byt = "byt"
|
||||
typ = "typ"
|
||||
Inferrable = "Inferrable"
|
||||
|
||||
[files]
|
||||
extend-exclude = [
|
||||
@@ -50,5 +47,5 @@ extend-exclude = [
|
||||
"provisioner/terraform/testdata/**",
|
||||
# notifications' golden files confuse the detector because of quoted-printable encoding
|
||||
"coderd/notifications/testdata/**",
|
||||
"agent/agentcontainers/testdata/devcontainercli/**",
|
||||
"agent/agentcontainers/testdata/devcontainercli/**"
|
||||
]
|
||||
|
||||
@@ -21,32 +21,27 @@ jobs:
|
||||
pull-requests: write # required to post PR review comments by the action
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Check Markdown links
|
||||
uses: umbrelladocs/action-linkspector@652f85bc57bb1e7d4327260decc10aa68f7694c3 # v1.4.0
|
||||
uses: umbrelladocs/action-linkspector@a0567ce1c7c13de4a2358587492ed43cab5d0102 # v1.3.4
|
||||
id: markdown-link-check
|
||||
# checks all markdown files from /docs including all subfolders
|
||||
with:
|
||||
reporter: github-pr-review
|
||||
config_file: ".github/.linkspector.yml"
|
||||
fail_on_error: "true"
|
||||
filter_mode: "file"
|
||||
filter_mode: "nofilter"
|
||||
|
||||
- name: Send Slack notification
|
||||
if: failure() && github.event_name == 'schedule'
|
||||
run: |
|
||||
curl \
|
||||
-X POST \
|
||||
-H 'Content-type: application/json' \
|
||||
-d '{"msg":"Broken links found in the documentation. Please check the logs at '"${LOGS_URL}"'"}' "${{ secrets.DOCS_LINK_SLACK_WEBHOOK }}"
|
||||
curl -X POST -H 'Content-type: application/json' -d '{"msg":"Broken links found in the documentation. Please check the logs at ${{ env.LOGS_URL }}"}' ${{ secrets.DOCS_LINK_SLACK_WEBHOOK }}
|
||||
echo "Sent Slack notification"
|
||||
env:
|
||||
LOGS_URL: https://github.com/coder/coder/actions/runs/${{ github.run_id }}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
rules:
|
||||
cache-poisoning:
|
||||
ignore:
|
||||
- "ci.yaml:184"
|
||||
-12
@@ -12,9 +12,6 @@ node_modules/
|
||||
vendor/
|
||||
yarn-error.log
|
||||
|
||||
# Test output files
|
||||
test-output/
|
||||
|
||||
# VSCode settings.
|
||||
**/.vscode/*
|
||||
# Allow VSCode recommendations and default settings in project root.
|
||||
@@ -53,8 +50,6 @@ site/stats/
|
||||
*.tfplan
|
||||
*.lock.hcl
|
||||
.terraform/
|
||||
!coderd/testdata/parameters/modules/.terraform/
|
||||
!provisioner/terraform/testdata/modules-source-caching/.terraform/
|
||||
|
||||
**/.coderv2/*
|
||||
**/__debug_bin
|
||||
@@ -87,10 +82,3 @@ result
|
||||
|
||||
# dlv debug binaries for go tests
|
||||
__debug_bin*
|
||||
|
||||
**/.claude/settings.local.json
|
||||
|
||||
/.env
|
||||
|
||||
# Ignore plans written by AI agents.
|
||||
PLAN.md
|
||||
|
||||
+2
-11
@@ -169,16 +169,6 @@ linters-settings:
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
- name: waitgroup-by-value
|
||||
usetesting:
|
||||
# Only os-setenv is enabled because we migrated to usetesting from another linter that
|
||||
# only covered os-setenv.
|
||||
os-setenv: true
|
||||
os-create-temp: false
|
||||
os-mkdir-temp: false
|
||||
os-temp-dir: false
|
||||
os-chdir: false
|
||||
context-background: false
|
||||
context-todo: false
|
||||
|
||||
# irrelevant as of Go v1.22: https://go.dev/blog/loopvar-preview
|
||||
govet:
|
||||
@@ -191,6 +181,7 @@ linters-settings:
|
||||
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- coderd/database/dbmem
|
||||
- node_modules
|
||||
- .git
|
||||
|
||||
@@ -262,6 +253,7 @@ linters:
|
||||
# - wastedassign
|
||||
|
||||
- staticcheck
|
||||
- tenv
|
||||
# In Go, it's possible for a package to test it's internal functionality
|
||||
# without testing any exported functions. This is enabled to promote
|
||||
# decomposing a package before testing it's internals. A function caller
|
||||
@@ -274,5 +266,4 @@ linters:
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- usetesting
|
||||
- dupl
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"go-language-server": {
|
||||
"type": "stdio",
|
||||
"command": "go",
|
||||
"args": [
|
||||
"run",
|
||||
"github.com/isaacphi/mcp-language-server@latest",
|
||||
"-workspace",
|
||||
"./",
|
||||
"-lsp",
|
||||
"go",
|
||||
"--",
|
||||
"run",
|
||||
"golang.org/x/tools/gopls@latest"
|
||||
],
|
||||
"env": {}
|
||||
},
|
||||
"typescript-language-server": {
|
||||
"type": "stdio",
|
||||
"command": "go",
|
||||
"args": [
|
||||
"run",
|
||||
"github.com/isaacphi/mcp-language-server@latest",
|
||||
"-workspace",
|
||||
"./site/",
|
||||
"-lsp",
|
||||
"pnpx",
|
||||
"--",
|
||||
"typescript-language-server",
|
||||
"--stdio"
|
||||
],
|
||||
"env": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
Vendored
+2
-4
@@ -49,18 +49,16 @@
|
||||
"[javascript][javascriptreact][json][jsonc][typescript][typescriptreact]": {
|
||||
"editor.defaultFormatter": "biomejs.biome",
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.fixAll.biome": "explicit"
|
||||
"quickfix.biome": "explicit"
|
||||
// "source.organizeImports.biome": "explicit"
|
||||
}
|
||||
},
|
||||
|
||||
"tailwindCSS.classFunctions": ["cva", "cn"],
|
||||
"[css][html][markdown][yaml]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"typos.config": ".github/workflows/typos.toml",
|
||||
"[markdown]": {
|
||||
"editor.defaultFormatter": "DavidAnson.vscode-markdownlint"
|
||||
},
|
||||
"biome.lsp.bin": "site/node_modules/.bin/biome"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
# Coder Development Guidelines
|
||||
|
||||
You are an experienced, pragmatic software engineer. You don't over-engineer a solution when a simple one is possible.
|
||||
Rule #1: If you want exception to ANY rule, YOU MUST STOP and get explicit permission first. BREAKING THE LETTER OR SPIRIT OF THE RULES IS FAILURE.
|
||||
|
||||
## Foundational rules
|
||||
|
||||
- Doing it right is better than doing it fast. You are not in a rush. NEVER skip steps or take shortcuts.
|
||||
- Tedious, systematic work is often the correct solution. Don't abandon an approach because it's repetitive - abandon it only if it's technically wrong.
|
||||
- Honesty is a core value.
|
||||
|
||||
## Our relationship
|
||||
|
||||
- Act as a critical peer reviewer. Your job is to disagree with me when I'm wrong, not to please me. Prioritize accuracy and reasoning over agreement.
|
||||
- YOU MUST speak up immediately when you don't know something or we're in over our heads
|
||||
- YOU MUST call out bad ideas, unreasonable expectations, and mistakes - I depend on this
|
||||
- NEVER be agreeable just to be nice - I NEED your HONEST technical judgment
|
||||
- NEVER write the phrase "You're absolutely right!" You are not a sycophant. We're working together because I value your opinion. Do not agree with me unless you can justify it with evidence or reasoning.
|
||||
- YOU MUST ALWAYS STOP and ask for clarification rather than making assumptions.
|
||||
- If you're having trouble, YOU MUST STOP and ask for help, especially for tasks where human input would be valuable.
|
||||
- When you disagree with my approach, YOU MUST push back. Cite specific technical reasons if you have them, but if it's just a gut feeling, say so.
|
||||
- If you're uncomfortable pushing back out loud, just say "Houston, we have a problem". I'll know what you mean
|
||||
- We discuss architectutral decisions (framework changes, major refactoring, system design) together before implementation. Routine fixes and clear implementations don't need discussion.
|
||||
|
||||
## Proactiveness
|
||||
|
||||
When asked to do something, just do it - including obvious follow-up actions needed to complete the task properly.
|
||||
Only pause to ask for confirmation when:
|
||||
|
||||
- Multiple valid approaches exist and the choice matters
|
||||
- The action would delete or significantly restructure existing code
|
||||
- You genuinely don't understand what's being asked
|
||||
- Your partner asked a question (answer the question, don't jump to implementation)
|
||||
|
||||
@.claude/docs/WORKFLOWS.md
|
||||
@package.json
|
||||
|
||||
## Essential Commands
|
||||
|
||||
| Task | Command | Notes |
|
||||
|-------------------|--------------------------|----------------------------------|
|
||||
| **Development** | `./scripts/develop.sh` | ⚠️ Don't use manual build |
|
||||
| **Build** | `make build` | Fat binaries (includes server) |
|
||||
| **Build Slim** | `make build-slim` | Slim binaries |
|
||||
| **Test** | `make test` | Full test suite |
|
||||
| **Test Single** | `make test RUN=TestName` | Faster than full suite |
|
||||
| **Test Postgres** | `make test-postgres` | Run tests with Postgres database |
|
||||
| **Test Race** | `make test-race` | Run tests with Go race detector |
|
||||
| **Lint** | `make lint` | Always run after changes |
|
||||
| **Generate** | `make gen` | After database changes |
|
||||
| **Format** | `make fmt` | Auto-format code |
|
||||
| **Clean** | `make clean` | Clean build artifacts |
|
||||
|
||||
### Documentation Commands
|
||||
|
||||
- `pnpm run format-docs` - Format markdown tables in docs
|
||||
- `pnpm run lint-docs` - Lint and fix markdown files
|
||||
- `pnpm run storybook` - Run Storybook (from site directory)
|
||||
|
||||
## Critical Patterns
|
||||
|
||||
### Database Changes (ALWAYS FOLLOW)
|
||||
|
||||
1. Modify `coderd/database/queries/*.sql` files
|
||||
2. Run `make gen`
|
||||
3. If audit errors: update `enterprise/audit/table.go`
|
||||
4. Run `make gen` again
|
||||
|
||||
### LSP Navigation (USE FIRST)
|
||||
|
||||
#### Go LSP (for backend code)
|
||||
|
||||
- **Find definitions**: `mcp__go-language-server__definition symbolName`
|
||||
- **Find references**: `mcp__go-language-server__references symbolName`
|
||||
- **Get type info**: `mcp__go-language-server__hover filePath line column`
|
||||
- **Rename symbol**: `mcp__go-language-server__rename_symbol filePath line column newName`
|
||||
|
||||
#### TypeScript LSP (for frontend code in site/)
|
||||
|
||||
- **Find definitions**: `mcp__typescript-language-server__definition symbolName`
|
||||
- **Find references**: `mcp__typescript-language-server__references symbolName`
|
||||
- **Get type info**: `mcp__typescript-language-server__hover filePath line column`
|
||||
- **Rename symbol**: `mcp__typescript-language-server__rename_symbol filePath line column newName`
|
||||
|
||||
### OAuth2 Error Handling
|
||||
|
||||
```go
|
||||
// OAuth2-compliant error responses
|
||||
writeOAuth2Error(ctx, rw, http.StatusBadRequest, "invalid_grant", "description")
|
||||
```
|
||||
|
||||
### Authorization Context
|
||||
|
||||
```go
|
||||
// Public endpoints needing system access
|
||||
app, err := api.Database.GetOAuth2ProviderAppByClientID(dbauthz.AsSystemRestricted(ctx), clientID)
|
||||
|
||||
// Authenticated endpoints with user context
|
||||
app, err := api.Database.GetOAuth2ProviderAppByClientID(ctx, clientID)
|
||||
```
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Full workflows available in imported WORKFLOWS.md
|
||||
|
||||
### New Feature Checklist
|
||||
|
||||
- [ ] Run `git pull` to ensure latest code
|
||||
- [ ] Check if feature touches database - you'll need migrations
|
||||
- [ ] Check if feature touches audit logs - update `enterprise/audit/table.go`
|
||||
|
||||
## Architecture
|
||||
|
||||
- **coderd**: Main API service
|
||||
- **provisionerd**: Infrastructure provisioning
|
||||
- **Agents**: Workspace services (SSH, port forwarding)
|
||||
- **Database**: PostgreSQL with `dbauthz` authorization
|
||||
|
||||
## Testing
|
||||
|
||||
### Race Condition Prevention
|
||||
|
||||
- Use unique identifiers: `fmt.Sprintf("test-client-%s-%d", t.Name(), time.Now().UnixNano())`
|
||||
- Never use hardcoded names in concurrent tests
|
||||
|
||||
### OAuth2 Testing
|
||||
|
||||
- Full suite: `./scripts/oauth2/test-mcp-oauth2.sh`
|
||||
- Manual testing: `./scripts/oauth2/test-manual-flow.sh`
|
||||
|
||||
### Timing Issues
|
||||
|
||||
NEVER use `time.Sleep` to mitigate timing issues. If an issue
|
||||
seems like it should use `time.Sleep`, read through https://github.com/coder/quartz and specifically the [README](https://github.com/coder/quartz/blob/main/README.md) to better understand how to handle timing issues.
|
||||
|
||||
## Code Style
|
||||
|
||||
### Detailed guidelines in imported WORKFLOWS.md
|
||||
|
||||
- Follow [Uber Go Style Guide](https://github.com/uber-go/guide/blob/master/style.md)
|
||||
- Commit format: `type(scope): message`
|
||||
|
||||
## Detailed Development Guides
|
||||
|
||||
@.claude/docs/OAUTH2.md
|
||||
@.claude/docs/TESTING.md
|
||||
@.claude/docs/TROUBLESHOOTING.md
|
||||
@.claude/docs/DATABASE.md
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. **Audit table errors** → Update `enterprise/audit/table.go`
|
||||
2. **OAuth2 errors** → Return RFC-compliant format
|
||||
3. **Race conditions** → Use unique test identifiers
|
||||
4. **Missing newlines** → Ensure files end with newline
|
||||
|
||||
---
|
||||
|
||||
*This file stays lean and actionable. Detailed workflows and explanations are imported automatically.*
|
||||
+2
-27
@@ -1,31 +1,6 @@
|
||||
# These APIs are versioned, so any changes need to be carefully reviewed for
|
||||
# whether to bump API major or minor versions.
|
||||
# These APIs are versioned, so any changes need to be carefully reviewed for whether
|
||||
# to bump API major or minor versions.
|
||||
agent/proto/ @spikecurtis @johnstcn
|
||||
provisionerd/proto/ @spikecurtis @johnstcn
|
||||
provisionersdk/proto/ @spikecurtis @johnstcn
|
||||
tailnet/proto/ @spikecurtis @johnstcn
|
||||
vpn/vpn.proto @spikecurtis @johnstcn
|
||||
vpn/version.go @spikecurtis @johnstcn
|
||||
|
||||
# This caching code is particularly tricky, and one must be very careful when
|
||||
# altering it.
|
||||
coderd/files/ @aslilac
|
||||
|
||||
coderd/dynamicparameters/ @Emyrk
|
||||
coderd/rbac/ @Emyrk
|
||||
|
||||
# Mainly dependent on coder/guts, which is maintained by @Emyrk
|
||||
scripts/apitypings/ @Emyrk
|
||||
scripts/gensite/ @aslilac
|
||||
|
||||
# The blood and guts of the autostop algorithm, which is quite complex and
|
||||
# requires elite ball knowledge of most of the scheduling code to make changes
|
||||
# without inadvertently affecting other parts of the codebase.
|
||||
coderd/schedule/autostop.go @deansheather @DanielleMaywood
|
||||
|
||||
# Usage tracking code requires intimate knowledge of Tallyman and Metronome, as
|
||||
# well as guidance from revenue.
|
||||
coderd/usage/ @deansheather @spikecurtis
|
||||
enterprise/coderd/usage/ @deansheather @spikecurtis
|
||||
|
||||
.github/ @jdomeracki-coder
|
||||
|
||||
+1
-1
@@ -1,2 +1,2 @@
|
||||
<!-- markdownlint-disable MD041 -->
|
||||
[https://coder.com/docs/about/contributing/CODE_OF_CONDUCT](https://coder.com/docs/about/contributing/CODE_OF_CONDUCT)
|
||||
[https://coder.com/docs/contributing/CODE_OF_CONDUCT](https://coder.com/docs/contributing/CODE_OF_CONDUCT)
|
||||
|
||||
@@ -36,9 +36,7 @@ GOOS := $(shell go env GOOS)
|
||||
GOARCH := $(shell go env GOARCH)
|
||||
GOOS_BIN_EXT := $(if $(filter windows, $(GOOS)),.exe,)
|
||||
VERSION := $(shell ./scripts/version.sh)
|
||||
|
||||
POSTGRES_VERSION ?= 17
|
||||
POSTGRES_IMAGE ?= us-docker.pkg.dev/coder-v2-images-public/public/postgres:$(POSTGRES_VERSION)
|
||||
POSTGRES_VERSION ?= 16
|
||||
|
||||
# Use the highest ZSTD compression level in CI.
|
||||
ifdef CI
|
||||
@@ -252,10 +250,6 @@ $(CODER_ALL_BINARIES): go.mod go.sum \
|
||||
fi
|
||||
|
||||
cp "$@" "./site/out/bin/coder-$$os-$$arch$$dot_ext"
|
||||
|
||||
if [[ "$${CODER_SIGN_GPG:-0}" == "1" ]]; then
|
||||
cp "$@.asc" "./site/out/bin/coder-$$os-$$arch$$dot_ext.asc"
|
||||
fi
|
||||
fi
|
||||
|
||||
# This task builds Coder Desktop dylibs
|
||||
@@ -460,31 +454,16 @@ fmt: fmt/ts fmt/go fmt/terraform fmt/shfmt fmt/biome fmt/markdown
|
||||
.PHONY: fmt
|
||||
|
||||
fmt/go:
|
||||
ifdef FILE
|
||||
# Format single file
|
||||
if [[ -f "$(FILE)" ]] && [[ "$(FILE)" == *.go ]] && ! grep -q "DO NOT EDIT" "$(FILE)"; then \
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/go$(RESET) $(FILE)"; \
|
||||
go run mvdan.cc/gofumpt@v0.8.0 -w -l "$(FILE)"; \
|
||||
fi
|
||||
else
|
||||
go mod tidy
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/go$(RESET)"
|
||||
# VS Code users should check out
|
||||
# https://github.com/mvdan/gofumpt#visual-studio-code
|
||||
find . $(FIND_EXCLUSIONS) -type f -name '*.go' -print0 | \
|
||||
xargs -0 grep -E --null -L '^// Code generated .* DO NOT EDIT\.$$' | \
|
||||
xargs -0 go run mvdan.cc/gofumpt@v0.8.0 -w -l
|
||||
endif
|
||||
xargs -0 grep --null -L "DO NOT EDIT" | \
|
||||
xargs -0 go run mvdan.cc/gofumpt@v0.4.0 -w -l
|
||||
.PHONY: fmt/go
|
||||
|
||||
fmt/ts: site/node_modules/.installed
|
||||
ifdef FILE
|
||||
# Format single TypeScript/JavaScript file
|
||||
if [[ -f "$(FILE)" ]] && [[ "$(FILE)" == *.ts ]] || [[ "$(FILE)" == *.tsx ]] || [[ "$(FILE)" == *.js ]] || [[ "$(FILE)" == *.jsx ]]; then \
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/ts$(RESET) $(FILE)"; \
|
||||
(cd site/ && pnpm exec biome format --write "../$(FILE)"); \
|
||||
fi
|
||||
else
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/ts$(RESET)"
|
||||
cd site
|
||||
# Avoid writing files in CI to reduce file write activity
|
||||
@@ -493,17 +472,9 @@ ifdef CI
|
||||
else
|
||||
pnpm run check:fix
|
||||
endif
|
||||
endif
|
||||
.PHONY: fmt/ts
|
||||
|
||||
fmt/biome: site/node_modules/.installed
|
||||
ifdef FILE
|
||||
# Format single file with biome
|
||||
if [[ -f "$(FILE)" ]] && [[ "$(FILE)" == *.ts ]] || [[ "$(FILE)" == *.tsx ]] || [[ "$(FILE)" == *.js ]] || [[ "$(FILE)" == *.jsx ]]; then \
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/biome$(RESET) $(FILE)"; \
|
||||
(cd site/ && pnpm exec biome format --write "../$(FILE)"); \
|
||||
fi
|
||||
else
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/biome$(RESET)"
|
||||
cd site/
|
||||
# Avoid writing files in CI to reduce file write activity
|
||||
@@ -512,30 +483,14 @@ ifdef CI
|
||||
else
|
||||
pnpm run format
|
||||
endif
|
||||
endif
|
||||
.PHONY: fmt/biome
|
||||
|
||||
fmt/terraform: $(wildcard *.tf)
|
||||
ifdef FILE
|
||||
# Format single Terraform file
|
||||
if [[ -f "$(FILE)" ]] && [[ "$(FILE)" == *.tf ]] || [[ "$(FILE)" == *.tfvars ]]; then \
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/terraform$(RESET) $(FILE)"; \
|
||||
terraform fmt "$(FILE)"; \
|
||||
fi
|
||||
else
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/terraform$(RESET)"
|
||||
terraform fmt -recursive
|
||||
endif
|
||||
.PHONY: fmt/terraform
|
||||
|
||||
fmt/shfmt: $(SHELL_SRC_FILES)
|
||||
ifdef FILE
|
||||
# Format single shell script
|
||||
if [[ -f "$(FILE)" ]] && [[ "$(FILE)" == *.sh ]]; then \
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/shfmt$(RESET) $(FILE)"; \
|
||||
shfmt -w "$(FILE)"; \
|
||||
fi
|
||||
else
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/shfmt$(RESET)"
|
||||
# Only do diff check in CI, errors on diff.
|
||||
ifdef CI
|
||||
@@ -543,25 +498,14 @@ ifdef CI
|
||||
else
|
||||
shfmt -w $(SHELL_SRC_FILES)
|
||||
endif
|
||||
endif
|
||||
.PHONY: fmt/shfmt
|
||||
|
||||
fmt/markdown: node_modules/.installed
|
||||
ifdef FILE
|
||||
# Format single markdown file
|
||||
if [[ -f "$(FILE)" ]] && [[ "$(FILE)" == *.md ]]; then \
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/markdown$(RESET) $(FILE)"; \
|
||||
pnpm exec markdown-table-formatter "$(FILE)"; \
|
||||
fi
|
||||
else
|
||||
echo "$(GREEN)==>$(RESET) $(BOLD)fmt/markdown$(RESET)"
|
||||
pnpm format-docs
|
||||
endif
|
||||
.PHONY: fmt/markdown
|
||||
|
||||
# Note: we don't run zizmor in the lint target because it takes a while. CI
|
||||
# runs it explicitly.
|
||||
lint: lint/shellcheck lint/go lint/ts lint/examples lint/helm lint/site-icons lint/markdown lint/actions/actionlint lint/check-scopes
|
||||
lint: lint/shellcheck lint/go lint/ts lint/examples lint/helm lint/site-icons lint/markdown
|
||||
.PHONY: lint
|
||||
|
||||
lint/site-icons:
|
||||
@@ -578,7 +522,6 @@ lint/go:
|
||||
./scripts/check_codersdk_imports.sh
|
||||
linter_ver=$(shell egrep -o 'GOLANGCI_LINT_VERSION=\S+' dogfood/coder/Dockerfile | cut -d '=' -f 2)
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint@v$$linter_ver run
|
||||
go run github.com/coder/paralleltestctx/cmd/paralleltestctx@v0.0.1 -custom-funcs="testutil.Context" ./...
|
||||
.PHONY: lint/go
|
||||
|
||||
lint/examples:
|
||||
@@ -600,31 +543,13 @@ lint/markdown: node_modules/.installed
|
||||
pnpm lint-docs
|
||||
.PHONY: lint/markdown
|
||||
|
||||
lint/actions: lint/actions/actionlint lint/actions/zizmor
|
||||
.PHONY: lint/actions
|
||||
|
||||
lint/actions/actionlint:
|
||||
go run github.com/rhysd/actionlint/cmd/actionlint@v1.7.7
|
||||
.PHONY: lint/actions/actionlint
|
||||
|
||||
lint/actions/zizmor:
|
||||
./scripts/zizmor.sh \
|
||||
--strict-collection \
|
||||
--persona=regular \
|
||||
.
|
||||
.PHONY: lint/actions/zizmor
|
||||
|
||||
# Verify api_key_scope enum contains all RBAC <resource>:<action> values.
|
||||
lint/check-scopes: coderd/database/dump.sql
|
||||
go run ./scripts/check-scopes
|
||||
.PHONY: lint/check-scopes
|
||||
|
||||
# All files generated by the database should be added here, and this can be used
|
||||
# as a target for jobs that need to run after the database is generated.
|
||||
DB_GEN_FILES := \
|
||||
coderd/database/dump.sql \
|
||||
coderd/database/querier.go \
|
||||
coderd/database/unique_constraint.go \
|
||||
coderd/database/dbmem/dbmem.go \
|
||||
coderd/database/dbmetrics/dbmetrics.go \
|
||||
coderd/database/dbauthz/dbauthz.go \
|
||||
coderd/database/dbmock/dbmock.go
|
||||
@@ -635,24 +560,16 @@ TAILNETTEST_MOCKS := \
|
||||
tailnet/tailnettest/workspaceupdatesprovidermock.go \
|
||||
tailnet/tailnettest/subscriptionmock.go
|
||||
|
||||
AIBRIDGED_MOCKS := \
|
||||
enterprise/aibridged/aibridgedmock/clientmock.go \
|
||||
enterprise/aibridged/aibridgedmock/poolmock.go
|
||||
|
||||
GEN_FILES := \
|
||||
tailnet/proto/tailnet.pb.go \
|
||||
agent/proto/agent.pb.go \
|
||||
agent/agentsocket/proto/agentsocket.pb.go \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
vpn/vpn.pb.go \
|
||||
enterprise/aibridged/proto/aibridged.pb.go \
|
||||
$(DB_GEN_FILES) \
|
||||
$(SITE_GEN_FILES) \
|
||||
coderd/rbac/object_gen.go \
|
||||
codersdk/rbacresources_gen.go \
|
||||
coderd/rbac/scopes_constants_gen.go \
|
||||
codersdk/apikey_scopes_gen.go \
|
||||
docs/admin/integrations/prometheus.md \
|
||||
docs/reference/cli/index.md \
|
||||
docs/admin/security/audit-logs.md \
|
||||
@@ -665,9 +582,7 @@ GEN_FILES := \
|
||||
coderd/database/pubsub/psmock/psmock.go \
|
||||
agent/agentcontainers/acmock/acmock.go \
|
||||
agent/agentcontainers/dcspec/dcspec_gen.go \
|
||||
coderd/httpmw/loggermw/loggermock/loggermock.go \
|
||||
codersdk/workspacesdk/agentconnmock/agentconnmock.go \
|
||||
$(AIBRIDGED_MOCKS)
|
||||
coderd/httpmw/loggermw/loggermock/loggermock.go
|
||||
|
||||
# all gen targets should be added here and to gen/mark-fresh
|
||||
gen: gen/db gen/golden-files $(GEN_FILES)
|
||||
@@ -677,7 +592,6 @@ gen/db: $(DB_GEN_FILES)
|
||||
.PHONY: gen/db
|
||||
|
||||
gen/golden-files: \
|
||||
agent/unit/testdata/.gen-golden \
|
||||
cli/testdata/.gen-golden \
|
||||
coderd/.gen-golden \
|
||||
coderd/notifications/.gen-golden \
|
||||
@@ -697,15 +611,12 @@ gen/mark-fresh:
|
||||
agent/proto/agent.pb.go \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
agent/agentsocket/proto/agentsocket.pb.go \
|
||||
vpn/vpn.pb.go \
|
||||
enterprise/aibridged/proto/aibridged.pb.go \
|
||||
coderd/database/dump.sql \
|
||||
$(DB_GEN_FILES) \
|
||||
site/src/api/typesGenerated.ts \
|
||||
coderd/rbac/object_gen.go \
|
||||
codersdk/rbacresources_gen.go \
|
||||
coderd/rbac/scopes_constants_gen.go \
|
||||
site/src/api/rbacresourcesGenerated.ts \
|
||||
site/src/api/countriesGenerated.ts \
|
||||
docs/admin/integrations/prometheus.md \
|
||||
@@ -721,8 +632,6 @@ gen/mark-fresh:
|
||||
agent/agentcontainers/acmock/acmock.go \
|
||||
agent/agentcontainers/dcspec/dcspec_gen.go \
|
||||
coderd/httpmw/loggermw/loggermock/loggermock.go \
|
||||
codersdk/workspacesdk/agentconnmock/agentconnmock.go \
|
||||
$(AIBRIDGED_MOCKS) \
|
||||
"
|
||||
|
||||
for file in $$files; do
|
||||
@@ -766,14 +675,6 @@ coderd/httpmw/loggermw/loggermock/loggermock.go: coderd/httpmw/loggermw/logger.g
|
||||
go generate ./coderd/httpmw/loggermw/loggermock/
|
||||
touch "$@"
|
||||
|
||||
codersdk/workspacesdk/agentconnmock/agentconnmock.go: codersdk/workspacesdk/agentconn.go
|
||||
go generate ./codersdk/workspacesdk/agentconnmock/
|
||||
touch "$@"
|
||||
|
||||
$(AIBRIDGED_MOCKS): enterprise/aibridged/client.go enterprise/aibridged/pool.go
|
||||
go generate ./enterprise/aibridged/aibridgedmock/
|
||||
touch "$@"
|
||||
|
||||
agent/agentcontainers/dcspec/dcspec_gen.go: \
|
||||
node_modules/.installed \
|
||||
agent/agentcontainers/dcspec/devContainer.base.schema.json \
|
||||
@@ -802,14 +703,6 @@ agent/proto/agent.pb.go: agent/proto/agent.proto
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./agent/proto/agent.proto
|
||||
|
||||
agent/agentsocket/proto/agentsocket.pb.go: agent/agentsocket/proto/agentsocket.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--go-drpc_out=. \
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./agent/agentsocket/proto/agentsocket.proto
|
||||
|
||||
provisionersdk/proto/provisioner.pb.go: provisionersdk/proto/provisioner.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
@@ -832,14 +725,6 @@ vpn/vpn.pb.go: vpn/vpn.proto
|
||||
--go_opt=paths=source_relative \
|
||||
./vpn/vpn.proto
|
||||
|
||||
enterprise/aibridged/proto/aibridged.pb.go: enterprise/aibridged/proto/aibridged.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--go-drpc_out=. \
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./enterprise/aibridged/proto/aibridged.proto
|
||||
|
||||
site/src/api/typesGenerated.ts: site/node_modules/.installed $(wildcard scripts/apitypings/*) $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
# -C sets the directory for the go run command
|
||||
go run -C ./scripts/apitypings main.go > $@
|
||||
@@ -866,15 +751,6 @@ coderd/rbac/object_gen.go: scripts/typegen/rbacobject.gotmpl scripts/typegen/mai
|
||||
rmdir -v "$$tempdir"
|
||||
touch "$@"
|
||||
|
||||
coderd/rbac/scopes_constants_gen.go: scripts/typegen/scopenames.gotmpl scripts/typegen/main.go coderd/rbac/policy/policy.go
|
||||
# Generate typed low-level ScopeName constants from RBACPermissions
|
||||
# Write to a temp file first to avoid truncating the package during build
|
||||
# since the generator imports the rbac package.
|
||||
tempfile=$(shell mktemp /tmp/scopes_constants_gen.XXXXXX)
|
||||
go run ./scripts/typegen/main.go rbac scopenames > "$$tempfile"
|
||||
mv -v "$$tempfile" coderd/rbac/scopes_constants_gen.go
|
||||
touch "$@"
|
||||
|
||||
codersdk/rbacresources_gen.go: scripts/typegen/codersdk.gotmpl scripts/typegen/main.go coderd/rbac/object.go coderd/rbac/policy/policy.go
|
||||
# Do no overwrite codersdk/rbacresources_gen.go directly, as it would make the file empty, breaking
|
||||
# the `codersdk` package and any parallel build targets.
|
||||
@@ -882,12 +758,6 @@ codersdk/rbacresources_gen.go: scripts/typegen/codersdk.gotmpl scripts/typegen/m
|
||||
mv /tmp/rbacresources_gen.go codersdk/rbacresources_gen.go
|
||||
touch "$@"
|
||||
|
||||
codersdk/apikey_scopes_gen.go: scripts/apikeyscopesgen/main.go coderd/rbac/scopes_catalog.go coderd/rbac/scopes.go
|
||||
# Generate SDK constants for external API key scopes.
|
||||
go run ./scripts/apikeyscopesgen > /tmp/apikey_scopes_gen.go
|
||||
mv /tmp/apikey_scopes_gen.go codersdk/apikey_scopes_gen.go
|
||||
touch "$@"
|
||||
|
||||
site/src/api/rbacresourcesGenerated.ts: site/node_modules/.installed scripts/typegen/codersdk.gotmpl scripts/typegen/main.go coderd/rbac/object.go coderd/rbac/policy/policy.go
|
||||
go run scripts/typegen/main.go rbac typescript > "$@"
|
||||
(cd site/ && pnpm exec biome format --write src/api/rbacresourcesGenerated.ts)
|
||||
@@ -963,10 +833,6 @@ clean/golden-files:
|
||||
-type f -name '*.golden' -delete
|
||||
.PHONY: clean/golden-files
|
||||
|
||||
agent/unit/testdata/.gen-golden: $(wildcard agent/unit/testdata/*.golden) $(GO_SRC_FILES) $(wildcard agent/unit/*_test.go)
|
||||
TZ=UTC go test ./agent/unit -run="TestGraph" -update
|
||||
touch "$@"
|
||||
|
||||
cli/testdata/.gen-golden: $(wildcard cli/testdata/*.golden) $(wildcard cli/*.tpl) $(GO_SRC_FILES) $(wildcard cli/*_test.go)
|
||||
TZ=UTC go test ./cli -run="Test(CommandHelp|ServerYAML|ErrorExamples|.*Golden)" -update
|
||||
touch "$@"
|
||||
@@ -1009,38 +875,12 @@ provisioner/terraform/testdata/version:
|
||||
fi
|
||||
.PHONY: provisioner/terraform/testdata/version
|
||||
|
||||
# Set the retry flags if TEST_RETRIES is set
|
||||
ifdef TEST_RETRIES
|
||||
GOTESTSUM_RETRY_FLAGS := --rerun-fails=$(TEST_RETRIES)
|
||||
else
|
||||
GOTESTSUM_RETRY_FLAGS :=
|
||||
endif
|
||||
|
||||
# default to 8x8 parallelism to avoid overwhelming our workspaces. Hopefully we can remove these defaults
|
||||
# when we get our test suite's resource utilization under control.
|
||||
GOTEST_FLAGS := -v -p $(or $(TEST_NUM_PARALLEL_PACKAGES),"8") -parallel=$(or $(TEST_NUM_PARALLEL_TESTS),"8")
|
||||
|
||||
# The most common use is to set TEST_COUNT=1 to avoid Go's test cache.
|
||||
ifdef TEST_COUNT
|
||||
GOTEST_FLAGS += -count=$(TEST_COUNT)
|
||||
endif
|
||||
|
||||
ifdef TEST_SHORT
|
||||
GOTEST_FLAGS += -short
|
||||
endif
|
||||
|
||||
ifdef RUN
|
||||
GOTEST_FLAGS += -run $(RUN)
|
||||
endif
|
||||
|
||||
TEST_PACKAGES ?= ./...
|
||||
|
||||
test:
|
||||
$(GIT_FLAGS) gotestsum --format standard-quiet $(GOTESTSUM_RETRY_FLAGS) --packages="$(TEST_PACKAGES)" -- $(GOTEST_FLAGS)
|
||||
$(GIT_FLAGS) gotestsum --format standard-quiet -- -v -short -count=1 ./... $(if $(RUN),-run $(RUN))
|
||||
.PHONY: test
|
||||
|
||||
test-cli:
|
||||
$(MAKE) test TEST_PACKAGES="./cli..."
|
||||
$(GIT_FLAGS) gotestsum --format standard-quiet -- -v -short -count=1 ./cli/...
|
||||
.PHONY: test-cli
|
||||
|
||||
# sqlc-cloud-is-setup will fail if no SQLc auth token is set. Use this as a
|
||||
@@ -1076,12 +916,12 @@ sqlc-vet: test-postgres-docker
|
||||
test-postgres: test-postgres-docker
|
||||
# The postgres test is prone to failure, so we limit parallelism for
|
||||
# more consistent execution.
|
||||
$(GIT_FLAGS) gotestsum \
|
||||
$(GIT_FLAGS) DB=ci gotestsum \
|
||||
--junitfile="gotests.xml" \
|
||||
--jsonfile="gotests.json" \
|
||||
$(GOTESTSUM_RETRY_FLAGS) \
|
||||
--packages="./..." -- \
|
||||
-timeout=20m \
|
||||
-failfast \
|
||||
-count=1
|
||||
.PHONY: test-postgres
|
||||
|
||||
@@ -1102,12 +942,12 @@ test-postgres-docker:
|
||||
docker rm -f test-postgres-docker-${POSTGRES_VERSION} || true
|
||||
|
||||
# Try pulling up to three times to avoid CI flakes.
|
||||
docker pull ${POSTGRES_IMAGE} || {
|
||||
docker pull gcr.io/coder-dev-1/postgres:${POSTGRES_VERSION} || {
|
||||
retries=2
|
||||
for try in $(seq 1 ${retries}); do
|
||||
echo "Failed to pull image, retrying (${try}/${retries})..."
|
||||
sleep 1
|
||||
if docker pull ${POSTGRES_IMAGE}; then
|
||||
if docker pull gcr.io/coder-dev-1/postgres:${POSTGRES_VERSION}; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
@@ -1135,7 +975,7 @@ test-postgres-docker:
|
||||
--restart no \
|
||||
--detach \
|
||||
--memory 16GB \
|
||||
${POSTGRES_IMAGE} \
|
||||
gcr.io/coder-dev-1/postgres:${POSTGRES_VERSION} \
|
||||
-c shared_buffers=2GB \
|
||||
-c effective_cache_size=1GB \
|
||||
-c work_mem=8MB \
|
||||
@@ -1192,8 +1032,3 @@ endif
|
||||
|
||||
dogfood/coder/nix.hash: flake.nix flake.lock
|
||||
sha256sum flake.nix flake.lock >./dogfood/coder/nix.hash
|
||||
|
||||
# Count the number of test databases created per test package.
|
||||
count-test-databases:
|
||||
PGPASSWORD=postgres psql -h localhost -U postgres -d coder_testing -P pager=off -c 'SELECT test_package, count(*) as count from test_databases GROUP BY test_package ORDER BY count DESC'
|
||||
.PHONY: count-test-databases
|
||||
|
||||
@@ -109,10 +109,9 @@ We are always working on new integrations. Please feel free to open an issue and
|
||||
### Official
|
||||
|
||||
- [**VS Code Extension**](https://marketplace.visualstudio.com/items?itemName=coder.coder-remote): Open any Coder workspace in VS Code with a single click
|
||||
- [**JetBrains Toolbox Plugin**](https://plugins.jetbrains.com/plugin/26968-coder): Open any Coder workspace from JetBrains Toolbox with a single click
|
||||
- [**JetBrains Gateway Plugin**](https://plugins.jetbrains.com/plugin/19620-coder): Open any Coder workspace in JetBrains Gateway with a single click
|
||||
- [**JetBrains Gateway Extension**](https://plugins.jetbrains.com/plugin/19620-coder): Open any Coder workspace in JetBrains Gateway with a single click
|
||||
- [**Dev Container Builder**](https://github.com/coder/envbuilder): Build development environments using `devcontainer.json` on Docker, Kubernetes, and OpenShift
|
||||
- [**Coder Registry**](https://registry.coder.com): Build and extend development environments with common use-cases
|
||||
- [**Module Registry**](https://registry.coder.com): Extend development environments with common use-cases
|
||||
- [**Kubernetes Log Stream**](https://github.com/coder/coder-logstream-kube): Stream Kubernetes Pod events to the Coder startup logs
|
||||
- [**Self-Hosted VS Code Extension Marketplace**](https://github.com/coder/code-marketplace): A private extension marketplace that works in restricted or airgapped networks integrating with [code-server](https://github.com/coder/code-server).
|
||||
- [**Setup Coder**](https://github.com/marketplace/actions/setup-coder): An action to setup coder CLI in GitHub workflows.
|
||||
|
||||
+115
-246
@@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"maps"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
@@ -41,7 +40,6 @@ import (
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/agentscripts"
|
||||
"github.com/coder/coder/v2/agent/agentsocket"
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/agent/proto/resourcesmonitor"
|
||||
@@ -72,21 +70,17 @@ const (
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Filesystem afero.Fs
|
||||
LogDir string
|
||||
TempDir string
|
||||
ScriptDataDir string
|
||||
Client Client
|
||||
ReconnectingPTYTimeout time.Duration
|
||||
EnvironmentVariables map[string]string
|
||||
Logger slog.Logger
|
||||
// IgnorePorts tells the api handler which ports to ignore when
|
||||
// listing all listening ports. This is helpful to hide ports that
|
||||
// are used by the agent, that the user does not care about.
|
||||
IgnorePorts map[int]string
|
||||
// ListeningPortsGetter is used to get the list of listening ports. Only
|
||||
// tests should set this. If unset, a default that queries the OS will be used.
|
||||
ListeningPortsGetter ListeningPortsGetter
|
||||
Filesystem afero.Fs
|
||||
LogDir string
|
||||
TempDir string
|
||||
ScriptDataDir string
|
||||
ExchangeToken func(ctx context.Context) (string, error)
|
||||
Client Client
|
||||
ReconnectingPTYTimeout time.Duration
|
||||
EnvironmentVariables map[string]string
|
||||
Logger slog.Logger
|
||||
IgnorePorts map[int]string
|
||||
PortCacheDuration time.Duration
|
||||
SSHMaxTimeout time.Duration
|
||||
TailnetListenPort uint16
|
||||
Subsystems []codersdk.AgentSubsystem
|
||||
@@ -95,19 +89,16 @@ type Options struct {
|
||||
ServiceBannerRefreshInterval time.Duration
|
||||
BlockFileTransfer bool
|
||||
Execer agentexec.Execer
|
||||
Devcontainers bool
|
||||
DevcontainerAPIOptions []agentcontainers.Option // Enable Devcontainers for these to be effective.
|
||||
Clock quartz.Clock
|
||||
SocketServerEnabled bool
|
||||
SocketPath string // Path for the agent socket server socket
|
||||
|
||||
ExperimentalDevcontainersEnabled bool
|
||||
ContainerAPIOptions []agentcontainers.Option // Enable ExperimentalDevcontainersEnabled for these to be effective.
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
ConnectRPC26(ctx context.Context) (
|
||||
proto.DRPCAgentClient26, tailnetproto.DRPCTailnetClient26, error,
|
||||
ConnectRPC24(ctx context.Context) (
|
||||
proto.DRPCAgentClient24, tailnetproto.DRPCTailnetClient24, error,
|
||||
)
|
||||
tailnet.DERPMapRewriter
|
||||
agentsdk.RefreshableSessionTokenProvider
|
||||
RewriteDERPMap(derpMap *tailcfg.DERPMap)
|
||||
}
|
||||
|
||||
type Agent interface {
|
||||
@@ -140,15 +131,19 @@ func New(options Options) Agent {
|
||||
}
|
||||
options.ScriptDataDir = options.TempDir
|
||||
}
|
||||
if options.ExchangeToken == nil {
|
||||
options.ExchangeToken = func(_ context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
if options.ReportMetadataInterval == 0 {
|
||||
options.ReportMetadataInterval = time.Second
|
||||
}
|
||||
if options.ServiceBannerRefreshInterval == 0 {
|
||||
options.ServiceBannerRefreshInterval = 2 * time.Minute
|
||||
}
|
||||
|
||||
if options.Clock == nil {
|
||||
options.Clock = quartz.NewReal()
|
||||
if options.PortCacheDuration == 0 {
|
||||
options.PortCacheDuration = 1 * time.Second
|
||||
}
|
||||
|
||||
prometheusRegistry := options.PrometheusRegistry
|
||||
@@ -160,38 +155,30 @@ func New(options Options) Agent {
|
||||
options.Execer = agentexec.DefaultExecer
|
||||
}
|
||||
|
||||
if options.ListeningPortsGetter == nil {
|
||||
options.ListeningPortsGetter = &osListeningPortsGetter{
|
||||
cacheDuration: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
hardCtx, hardCancel := context.WithCancel(context.Background())
|
||||
gracefulCtx, gracefulCancel := context.WithCancel(hardCtx)
|
||||
a := &agent{
|
||||
clock: options.Clock,
|
||||
tailnetListenPort: options.TailnetListenPort,
|
||||
reconnectingPTYTimeout: options.ReconnectingPTYTimeout,
|
||||
logger: options.Logger,
|
||||
gracefulCtx: gracefulCtx,
|
||||
gracefulCancel: gracefulCancel,
|
||||
hardCtx: hardCtx,
|
||||
hardCancel: hardCancel,
|
||||
coordDisconnected: make(chan struct{}),
|
||||
environmentVariables: options.EnvironmentVariables,
|
||||
client: options.Client,
|
||||
filesystem: options.Filesystem,
|
||||
logDir: options.LogDir,
|
||||
tempDir: options.TempDir,
|
||||
scriptDataDir: options.ScriptDataDir,
|
||||
lifecycleUpdate: make(chan struct{}, 1),
|
||||
lifecycleReported: make(chan codersdk.WorkspaceAgentLifecycle, 1),
|
||||
lifecycleStates: []agentsdk.PostLifecycleRequest{{State: codersdk.WorkspaceAgentLifecycleCreated}},
|
||||
reportConnectionsUpdate: make(chan struct{}, 1),
|
||||
listeningPortsHandler: listeningPortsHandler{
|
||||
getter: options.ListeningPortsGetter,
|
||||
ignorePorts: maps.Clone(options.IgnorePorts),
|
||||
},
|
||||
tailnetListenPort: options.TailnetListenPort,
|
||||
reconnectingPTYTimeout: options.ReconnectingPTYTimeout,
|
||||
logger: options.Logger,
|
||||
gracefulCtx: gracefulCtx,
|
||||
gracefulCancel: gracefulCancel,
|
||||
hardCtx: hardCtx,
|
||||
hardCancel: hardCancel,
|
||||
coordDisconnected: make(chan struct{}),
|
||||
environmentVariables: options.EnvironmentVariables,
|
||||
client: options.Client,
|
||||
exchangeToken: options.ExchangeToken,
|
||||
filesystem: options.Filesystem,
|
||||
logDir: options.LogDir,
|
||||
tempDir: options.TempDir,
|
||||
scriptDataDir: options.ScriptDataDir,
|
||||
lifecycleUpdate: make(chan struct{}, 1),
|
||||
lifecycleReported: make(chan codersdk.WorkspaceAgentLifecycle, 1),
|
||||
lifecycleStates: []agentsdk.PostLifecycleRequest{{State: codersdk.WorkspaceAgentLifecycleCreated}},
|
||||
reportConnectionsUpdate: make(chan struct{}, 1),
|
||||
ignorePorts: options.IgnorePorts,
|
||||
portCacheDuration: options.PortCacheDuration,
|
||||
reportMetadataInterval: options.ReportMetadataInterval,
|
||||
announcementBannersRefreshInterval: options.ServiceBannerRefreshInterval,
|
||||
sshMaxTimeout: options.SSHMaxTimeout,
|
||||
@@ -203,10 +190,8 @@ func New(options Options) Agent {
|
||||
metrics: newAgentMetrics(prometheusRegistry),
|
||||
execer: options.Execer,
|
||||
|
||||
devcontainers: options.Devcontainers,
|
||||
containerAPIOptions: options.DevcontainerAPIOptions,
|
||||
socketPath: options.SocketPath,
|
||||
socketServerEnabled: options.SocketServerEnabled,
|
||||
experimentalDevcontainersEnabled: options.ExperimentalDevcontainersEnabled,
|
||||
containerAPIOptions: options.ContainerAPIOptions,
|
||||
}
|
||||
// Initially, we have a closed channel, reflecting the fact that we are not initially connected.
|
||||
// Each time we connect we replace the channel (while holding the closeMutex) with a new one
|
||||
@@ -214,21 +199,26 @@ func New(options Options) Agent {
|
||||
// coordinator during shut down.
|
||||
close(a.coordDisconnected)
|
||||
a.announcementBanners.Store(new([]codersdk.BannerConfig))
|
||||
a.sessionToken.Store(new(string))
|
||||
a.init()
|
||||
return a
|
||||
}
|
||||
|
||||
type agent struct {
|
||||
clock quartz.Clock
|
||||
logger slog.Logger
|
||||
client Client
|
||||
tailnetListenPort uint16
|
||||
filesystem afero.Fs
|
||||
logDir string
|
||||
tempDir string
|
||||
scriptDataDir string
|
||||
listeningPortsHandler listeningPortsHandler
|
||||
subsystems []codersdk.AgentSubsystem
|
||||
logger slog.Logger
|
||||
client Client
|
||||
exchangeToken func(ctx context.Context) (string, error)
|
||||
tailnetListenPort uint16
|
||||
filesystem afero.Fs
|
||||
logDir string
|
||||
tempDir string
|
||||
scriptDataDir string
|
||||
// ignorePorts tells the api handler which ports to ignore when
|
||||
// listing all listening ports. This is helpful to hide ports that
|
||||
// are used by the agent, that the user does not care about.
|
||||
ignorePorts map[int]string
|
||||
portCacheDuration time.Duration
|
||||
subsystems []codersdk.AgentSubsystem
|
||||
|
||||
reconnectingPTYTimeout time.Duration
|
||||
reconnectingPTYServer *reconnectingpty.Server
|
||||
@@ -259,6 +249,7 @@ type agent struct {
|
||||
scriptRunner *agentscripts.Runner
|
||||
announcementBanners atomic.Pointer[[]codersdk.BannerConfig] // announcementBanners is atomic because it is periodically updated.
|
||||
announcementBannersRefreshInterval time.Duration
|
||||
sessionToken atomic.Pointer[string]
|
||||
sshServer *agentssh.Server
|
||||
sshMaxTimeout time.Duration
|
||||
blockFileTransfer bool
|
||||
@@ -281,13 +272,9 @@ type agent struct {
|
||||
metrics *agentMetrics
|
||||
execer agentexec.Execer
|
||||
|
||||
devcontainers bool
|
||||
containerAPIOptions []agentcontainers.Option
|
||||
containerAPI *agentcontainers.API
|
||||
|
||||
socketServerEnabled bool
|
||||
socketPath string
|
||||
socketServer *agentsocket.Server
|
||||
experimentalDevcontainersEnabled bool
|
||||
containerAPIOptions []agentcontainers.Option
|
||||
containerAPI atomic.Pointer[agentcontainers.API] // Set by apiHandler.
|
||||
}
|
||||
|
||||
func (a *agent) TailnetConn() *tailnet.Conn {
|
||||
@@ -324,7 +311,7 @@ func (a *agent) init() {
|
||||
return a.reportConnection(id, connectionType, ip)
|
||||
},
|
||||
|
||||
ExperimentalContainers: a.devcontainers,
|
||||
ExperimentalDevContainersEnabled: a.experimentalDevcontainersEnabled,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -344,17 +331,6 @@ func (a *agent) init() {
|
||||
// will not report anywhere.
|
||||
a.scriptRunner.RegisterMetrics(a.prometheusRegistry)
|
||||
|
||||
containerAPIOpts := []agentcontainers.Option{
|
||||
agentcontainers.WithExecer(a.execer),
|
||||
agentcontainers.WithCommandEnv(a.sshServer.CommandEnv),
|
||||
agentcontainers.WithScriptLogger(func(logSourceID uuid.UUID) agentcontainers.ScriptLogger {
|
||||
return a.logSender.GetScriptLogger(logSourceID)
|
||||
}),
|
||||
}
|
||||
containerAPIOpts = append(containerAPIOpts, a.containerAPIOptions...)
|
||||
|
||||
a.containerAPI = agentcontainers.NewAPI(a.logger.Named("containers"), containerAPIOpts...)
|
||||
|
||||
a.reconnectingPTYServer = reconnectingpty.NewServer(
|
||||
a.logger.Named("reconnecting-pty"),
|
||||
a.sshServer,
|
||||
@@ -364,35 +340,12 @@ func (a *agent) init() {
|
||||
a.metrics.connectionsTotal, a.metrics.reconnectingPTYErrors,
|
||||
a.reconnectingPTYTimeout,
|
||||
func(s *reconnectingpty.Server) {
|
||||
s.ExperimentalContainers = a.devcontainers
|
||||
s.ExperimentalDevcontainersEnabled = a.experimentalDevcontainersEnabled
|
||||
},
|
||||
)
|
||||
|
||||
a.initSocketServer()
|
||||
|
||||
go a.runLoop()
|
||||
}
|
||||
|
||||
// initSocketServer initializes server that allows direct communication with a workspace agent using IPC.
|
||||
func (a *agent) initSocketServer() {
|
||||
if !a.socketServerEnabled {
|
||||
a.logger.Info(a.hardCtx, "socket server is disabled")
|
||||
return
|
||||
}
|
||||
|
||||
server, err := agentsocket.NewServer(
|
||||
a.logger.Named("socket"),
|
||||
agentsocket.WithPath(a.socketPath),
|
||||
)
|
||||
if err != nil {
|
||||
a.logger.Warn(a.hardCtx, "failed to create socket server", slog.Error(err), slog.F("path", a.socketPath))
|
||||
return
|
||||
}
|
||||
|
||||
a.socketServer = server
|
||||
a.logger.Debug(a.hardCtx, "socket server started", slog.F("path", a.socketPath))
|
||||
}
|
||||
|
||||
// runLoop attempts to start the agent in a retry loop.
|
||||
// Coder may be offline temporarily, a connection issue
|
||||
// may be happening, but regardless after the intermittent
|
||||
@@ -503,7 +456,7 @@ func (t *trySingleflight) Do(key string, fn func()) {
|
||||
fn()
|
||||
}
|
||||
|
||||
func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
tickerDone := make(chan struct{})
|
||||
collectDone := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
@@ -594,6 +547,7 @@ func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient26
|
||||
// channel to synchronize the results and avoid both messy
|
||||
// mutex logic and overloading the API.
|
||||
for _, md := range manifest.Metadata {
|
||||
md := md
|
||||
// We send the result to the channel in the goroutine to avoid
|
||||
// sending the same result multiple times. So, we don't care about
|
||||
// the return values.
|
||||
@@ -718,7 +672,7 @@ func (a *agent) reportMetadata(ctx context.Context, aAPI proto.DRPCAgentClient26
|
||||
|
||||
// reportLifecycle reports the current lifecycle state once. All state
|
||||
// changes are reported in order.
|
||||
func (a *agent) reportLifecycle(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
func (a *agent) reportLifecycle(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
for {
|
||||
select {
|
||||
case <-a.lifecycleUpdate:
|
||||
@@ -798,7 +752,7 @@ func (a *agent) setLifecycle(state codersdk.WorkspaceAgentLifecycle) {
|
||||
}
|
||||
|
||||
// reportConnectionsLoop reports connections to the agent for auditing.
|
||||
func (a *agent) reportConnectionsLoop(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
func (a *agent) reportConnectionsLoop(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
for {
|
||||
select {
|
||||
case <-a.reportConnectionsUpdate:
|
||||
@@ -821,15 +775,11 @@ func (a *agent) reportConnectionsLoop(ctx context.Context, aAPI proto.DRPCAgentC
|
||||
logger.Debug(ctx, "reporting connection")
|
||||
_, err := aAPI.ReportConnection(ctx, payload)
|
||||
if err != nil {
|
||||
// Do not fail the loop if we fail to report a connection, just
|
||||
// log a warning.
|
||||
// Related to https://github.com/coder/coder/issues/20194
|
||||
logger.Warn(ctx, "failed to report connection to server", slog.Error(err))
|
||||
// keep going, we still need to remove it from the slice
|
||||
} else {
|
||||
logger.Debug(ctx, "successfully reported connection")
|
||||
return xerrors.Errorf("failed to report connection: %w", err)
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "successfully reported connection")
|
||||
|
||||
// Remove the payload we sent.
|
||||
a.reportConnectionsMu.Lock()
|
||||
a.reportConnections[0] = nil // Release the pointer from the underlying array.
|
||||
@@ -860,13 +810,6 @@ func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_T
|
||||
ip = host
|
||||
}
|
||||
|
||||
// If the IP is "localhost" (which it can be in some cases), set it to
|
||||
// 127.0.0.1 instead.
|
||||
// Related to https://github.com/coder/coder/issues/20194
|
||||
if ip == "localhost" {
|
||||
ip = "127.0.0.1"
|
||||
}
|
||||
|
||||
a.reportConnectionsMu.Lock()
|
||||
defer a.reportConnectionsMu.Unlock()
|
||||
|
||||
@@ -929,7 +872,7 @@ func (a *agent) reportConnection(id uuid.UUID, connectionType proto.Connection_T
|
||||
// fetchServiceBannerLoop fetches the service banner on an interval. It will
|
||||
// not be fetched immediately; the expectation is that it is primed elsewhere
|
||||
// (and must be done before the session actually starts).
|
||||
func (a *agent) fetchServiceBannerLoop(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
func (a *agent) fetchServiceBannerLoop(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
ticker := time.NewTicker(a.announcementBannersRefreshInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
@@ -958,13 +901,14 @@ func (a *agent) run() (retErr error) {
|
||||
// This allows the agent to refresh its token if necessary.
|
||||
// For instance identity this is required, since the instance
|
||||
// may not have re-provisioned, but a new agent ID was created.
|
||||
err := a.client.RefreshToken(a.hardCtx)
|
||||
sessionToken, err := a.exchangeToken(a.hardCtx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("refresh token: %w", err)
|
||||
return xerrors.Errorf("exchange token: %w", err)
|
||||
}
|
||||
a.sessionToken.Store(&sessionToken)
|
||||
|
||||
// ConnectRPC returns the dRPC connection we use for the Agent and Tailnet v2+ APIs
|
||||
aAPI, tAPI, err := a.client.ConnectRPC26(a.hardCtx)
|
||||
aAPI, tAPI, err := a.client.ConnectRPC24(a.hardCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -981,7 +925,7 @@ func (a *agent) run() (retErr error) {
|
||||
connMan := newAPIConnRoutineManager(a.gracefulCtx, a.hardCtx, a.logger, aAPI, tAPI)
|
||||
|
||||
connMan.startAgentAPI("init notification banners", gracefulShutdownBehaviorStop,
|
||||
func(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
func(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
bannersProto, err := aAPI.GetAnnouncementBanners(ctx, &proto.GetAnnouncementBannersRequest{})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch service banner: %w", err)
|
||||
@@ -998,7 +942,7 @@ func (a *agent) run() (retErr error) {
|
||||
// sending logs gets gracefulShutdownBehaviorRemain because we want to send logs generated by
|
||||
// shutdown scripts.
|
||||
connMan.startAgentAPI("send logs", gracefulShutdownBehaviorRemain,
|
||||
func(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
func(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
err := a.logSender.SendLoop(ctx, aAPI)
|
||||
if xerrors.Is(err, agentsdk.ErrLogLimitExceeded) {
|
||||
// we don't want this error to tear down the API connection and propagate to the
|
||||
@@ -1017,7 +961,7 @@ func (a *agent) run() (retErr error) {
|
||||
connMan.startAgentAPI("report metadata", gracefulShutdownBehaviorStop, a.reportMetadata)
|
||||
|
||||
// resources monitor can cease as soon as we start gracefully shutting down.
|
||||
connMan.startAgentAPI("resources monitor", gracefulShutdownBehaviorStop, func(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
connMan.startAgentAPI("resources monitor", gracefulShutdownBehaviorStop, func(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
logger := a.logger.Named("resources_monitor")
|
||||
clk := quartz.NewReal()
|
||||
config, err := aAPI.GetResourcesMonitoringConfiguration(ctx, &proto.GetResourcesMonitoringConfigurationRequest{})
|
||||
@@ -1064,7 +1008,7 @@ func (a *agent) run() (retErr error) {
|
||||
connMan.startAgentAPI("handle manifest", gracefulShutdownBehaviorStop, a.handleManifest(manifestOK))
|
||||
|
||||
connMan.startAgentAPI("app health reporter", gracefulShutdownBehaviorStop,
|
||||
func(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
func(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
if err := manifestOK.wait(ctx); err != nil {
|
||||
return xerrors.Errorf("no manifest: %w", err)
|
||||
}
|
||||
@@ -1097,7 +1041,7 @@ func (a *agent) run() (retErr error) {
|
||||
|
||||
connMan.startAgentAPI("fetch service banner loop", gracefulShutdownBehaviorStop, a.fetchServiceBannerLoop)
|
||||
|
||||
connMan.startAgentAPI("stats report loop", gracefulShutdownBehaviorStop, func(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
connMan.startAgentAPI("stats report loop", gracefulShutdownBehaviorStop, func(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
if err := networkOK.wait(ctx); err != nil {
|
||||
return xerrors.Errorf("no network: %w", err)
|
||||
}
|
||||
@@ -1112,8 +1056,8 @@ func (a *agent) run() (retErr error) {
|
||||
}
|
||||
|
||||
// handleManifest returns a function that fetches and processes the manifest
|
||||
func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
return func(ctx context.Context, aAPI proto.DRPCAgentClient26) error {
|
||||
func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
return func(ctx context.Context, aAPI proto.DRPCAgentClient24) error {
|
||||
var (
|
||||
sentResult = false
|
||||
err error
|
||||
@@ -1127,7 +1071,7 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch metadata: %w", err)
|
||||
}
|
||||
a.logger.Info(ctx, "fetched manifest")
|
||||
a.logger.Info(ctx, "fetched manifest", slog.F("manifest", mp))
|
||||
manifest, err := agentsdk.ManifestFromProto(mp)
|
||||
if err != nil {
|
||||
a.logger.Critical(ctx, "failed to convert manifest", slog.F("manifest", mp), slog.Error(err))
|
||||
@@ -1136,18 +1080,6 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
if manifest.AgentID == uuid.Nil {
|
||||
return xerrors.New("nil agentID returned by manifest")
|
||||
}
|
||||
if manifest.ParentID != uuid.Nil {
|
||||
// This is a sub agent, disable all the features that should not
|
||||
// be used by sub agents.
|
||||
a.logger.Debug(ctx, "sub agent detected, disabling features",
|
||||
slog.F("parent_id", manifest.ParentID),
|
||||
slog.F("agent_id", manifest.AgentID),
|
||||
)
|
||||
if a.devcontainers {
|
||||
a.logger.Info(ctx, "devcontainers are not supported on sub agents, disabling feature")
|
||||
a.devcontainers = false
|
||||
}
|
||||
}
|
||||
a.client.RewriteDERPMap(manifest.DERPMap)
|
||||
|
||||
// Expand the directory and send it back to coderd so external
|
||||
@@ -1159,8 +1091,6 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expand directory: %w", err)
|
||||
}
|
||||
// Normalize all devcontainer paths by making them absolute.
|
||||
manifest.Devcontainers = agentcontainers.ExpandAllDevcontainerPaths(a.logger, expandPathToAbs, manifest.Devcontainers)
|
||||
subsys, err := agentsdk.ProtoFromSubsystems(a.subsystems)
|
||||
if err != nil {
|
||||
a.logger.Critical(ctx, "failed to convert subsystems", slog.Error(err))
|
||||
@@ -1198,27 +1128,17 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
}
|
||||
|
||||
var (
|
||||
scripts = manifest.Scripts
|
||||
devcontainerScripts map[uuid.UUID]codersdk.WorkspaceAgentScript
|
||||
scripts = manifest.Scripts
|
||||
scriptRunnerOpts []agentscripts.InitOption
|
||||
)
|
||||
if a.devcontainers {
|
||||
// Init the container API with the manifest and client so that
|
||||
// we can start accepting requests. The final start of the API
|
||||
// happens after the startup scripts have been executed to
|
||||
// ensure the presence of required tools. This means we can
|
||||
// return existing devcontainers but actual container detection
|
||||
// and creation will be deferred.
|
||||
a.containerAPI.Init(
|
||||
agentcontainers.WithManifestInfo(manifest.OwnerName, manifest.WorkspaceName, manifest.AgentName, manifest.Directory),
|
||||
agentcontainers.WithDevcontainers(manifest.Devcontainers, manifest.Scripts),
|
||||
agentcontainers.WithSubAgentClient(agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI)),
|
||||
)
|
||||
|
||||
// Since devcontainer are enabled, remove devcontainer scripts
|
||||
// from the main scripts list to avoid showing an error.
|
||||
scripts, devcontainerScripts = agentcontainers.ExtractDevcontainerScripts(manifest.Devcontainers, scripts)
|
||||
if a.experimentalDevcontainersEnabled {
|
||||
var dcScripts []codersdk.WorkspaceAgentScript
|
||||
scripts, dcScripts = agentcontainers.ExtractAndInitializeDevcontainerScripts(a.logger, expandPathToAbs, manifest.Devcontainers, scripts)
|
||||
// See ExtractAndInitializeDevcontainerScripts for motivation
|
||||
// behind running dcScripts as post start scripts.
|
||||
scriptRunnerOpts = append(scriptRunnerOpts, agentscripts.WithPostStartScripts(dcScripts...))
|
||||
}
|
||||
err = a.scriptRunner.Init(scripts, aAPI.ScriptCompleted)
|
||||
err = a.scriptRunner.Init(scripts, aAPI.ScriptCompleted, scriptRunnerOpts...)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("init script runner: %w", err)
|
||||
}
|
||||
@@ -1235,18 +1155,7 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
// finished (both start and post start). For instance, an
|
||||
// autostarted devcontainer will be included in this time.
|
||||
err := a.scriptRunner.Execute(a.gracefulCtx, agentscripts.ExecuteStartScripts)
|
||||
|
||||
if a.devcontainers {
|
||||
// Start the container API after the startup scripts have
|
||||
// been executed to ensure that the required tools can be
|
||||
// installed.
|
||||
a.containerAPI.Start()
|
||||
for _, dc := range manifest.Devcontainers {
|
||||
cErr := a.createDevcontainer(ctx, aAPI, dc, devcontainerScripts[dc.ID])
|
||||
err = errors.Join(err, cErr)
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.Join(err, a.scriptRunner.Execute(a.gracefulCtx, agentscripts.ExecutePostStartScripts))
|
||||
dur := time.Since(start).Seconds()
|
||||
if err != nil {
|
||||
a.logger.Warn(ctx, "startup script(s) failed", slog.Error(err))
|
||||
@@ -1265,6 +1174,12 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
}
|
||||
a.metrics.startupScriptSeconds.WithLabelValues(label).Set(dur)
|
||||
a.scriptRunner.StartCron()
|
||||
if containerAPI := a.containerAPI.Load(); containerAPI != nil {
|
||||
// Inform the container API that the agent is ready.
|
||||
// This allows us to start watching for changes to
|
||||
// the devcontainer configuration files.
|
||||
containerAPI.SignalReady()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("track conn goroutine: %w", err)
|
||||
@@ -1274,42 +1189,10 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *agent) createDevcontainer(
|
||||
ctx context.Context,
|
||||
aAPI proto.DRPCAgentClient26,
|
||||
dc codersdk.WorkspaceAgentDevcontainer,
|
||||
script codersdk.WorkspaceAgentScript,
|
||||
) (err error) {
|
||||
var (
|
||||
exitCode = int32(0)
|
||||
startTime = a.clock.Now()
|
||||
status = proto.Timing_OK
|
||||
)
|
||||
if err = a.containerAPI.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath); err != nil {
|
||||
exitCode = 1
|
||||
status = proto.Timing_EXIT_FAILURE
|
||||
}
|
||||
endTime := a.clock.Now()
|
||||
|
||||
if _, scriptErr := aAPI.ScriptCompleted(ctx, &proto.WorkspaceAgentScriptCompletedRequest{
|
||||
Timing: &proto.Timing{
|
||||
ScriptId: script.ID[:],
|
||||
Start: timestamppb.New(startTime),
|
||||
End: timestamppb.New(endTime),
|
||||
ExitCode: exitCode,
|
||||
Stage: proto.Timing_START,
|
||||
Status: status,
|
||||
},
|
||||
}); scriptErr != nil {
|
||||
a.logger.Warn(ctx, "reporting script completed failed", slog.Error(scriptErr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createOrUpdateNetwork waits for the manifest to be set using manifestOK, then creates or updates
|
||||
// the tailnet using the information in the manifest
|
||||
func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(context.Context, proto.DRPCAgentClient26) error {
|
||||
return func(ctx context.Context, aAPI proto.DRPCAgentClient26) (retErr error) {
|
||||
func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(context.Context, proto.DRPCAgentClient24) error {
|
||||
return func(ctx context.Context, _ proto.DRPCAgentClient24) (retErr error) {
|
||||
if err := manifestOK.wait(ctx); err != nil {
|
||||
return xerrors.Errorf("no manifest: %w", err)
|
||||
}
|
||||
@@ -1361,12 +1244,6 @@ func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(co
|
||||
network.SetDERPMap(manifest.DERPMap)
|
||||
network.SetDERPForceWebSockets(manifest.DERPForceWebSockets)
|
||||
network.SetBlockEndpoints(manifest.DisableDirectConnections)
|
||||
|
||||
// Update the subagent client if the container API is available.
|
||||
if a.containerAPI != nil {
|
||||
client := agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI)
|
||||
a.containerAPI.UpdateSubAgentClient(client)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1397,10 +1274,9 @@ func (a *agent) updateCommandEnv(current []string) (updated []string, err error)
|
||||
"CODER": "true",
|
||||
"CODER_WORKSPACE_NAME": manifest.WorkspaceName,
|
||||
"CODER_WORKSPACE_AGENT_NAME": manifest.AgentName,
|
||||
"CODER_WORKSPACE_OWNER_NAME": manifest.OwnerName,
|
||||
|
||||
// Specific Coder subcommands require the agent token exposed!
|
||||
"CODER_AGENT_TOKEN": a.client.GetSessionToken(),
|
||||
"CODER_AGENT_TOKEN": *a.sessionToken.Load(),
|
||||
|
||||
// Git on Windows resolves with UNIX-style paths.
|
||||
// If using backslashes, it's unable to find the executable.
|
||||
@@ -1615,7 +1491,10 @@ func (a *agent) createTailnet(
|
||||
}()
|
||||
if err = a.trackGoroutine(func() {
|
||||
defer apiListener.Close()
|
||||
apiHandler := a.apiHandler()
|
||||
apiHandler, closeAPIHAndler := a.apiHandler()
|
||||
defer func() {
|
||||
_ = closeAPIHAndler()
|
||||
}()
|
||||
server := &http.Server{
|
||||
BaseContext: func(net.Listener) context.Context { return ctx },
|
||||
Handler: apiHandler,
|
||||
@@ -1629,6 +1508,7 @@ func (a *agent) createTailnet(
|
||||
case <-ctx.Done():
|
||||
case <-a.hardCtx.Done():
|
||||
}
|
||||
_ = closeAPIHAndler()
|
||||
_ = server.Close()
|
||||
}()
|
||||
|
||||
@@ -1960,7 +1840,6 @@ func (a *agent) Close() error {
|
||||
lifecycleState = codersdk.WorkspaceAgentLifecycleShutdownError
|
||||
}
|
||||
}
|
||||
|
||||
a.setLifecycle(lifecycleState)
|
||||
|
||||
err = a.scriptRunner.Close()
|
||||
@@ -1968,16 +1847,6 @@ func (a *agent) Close() error {
|
||||
a.logger.Error(a.hardCtx, "script runner close", slog.Error(err))
|
||||
}
|
||||
|
||||
if a.socketServer != nil {
|
||||
if err := a.socketServer.Close(); err != nil {
|
||||
a.logger.Error(a.hardCtx, "socket server close", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
if err := a.containerAPI.Close(); err != nil {
|
||||
a.logger.Error(a.hardCtx, "container API close", slog.Error(err))
|
||||
}
|
||||
|
||||
// Wait for the graceful shutdown to complete, but don't wait forever so
|
||||
// that we don't break user expectations.
|
||||
go func() {
|
||||
@@ -2095,7 +1964,7 @@ const (
|
||||
|
||||
type apiConnRoutineManager struct {
|
||||
logger slog.Logger
|
||||
aAPI proto.DRPCAgentClient26
|
||||
aAPI proto.DRPCAgentClient24
|
||||
tAPI tailnetproto.DRPCTailnetClient24
|
||||
eg *errgroup.Group
|
||||
stopCtx context.Context
|
||||
@@ -2104,7 +1973,7 @@ type apiConnRoutineManager struct {
|
||||
|
||||
func newAPIConnRoutineManager(
|
||||
gracefulCtx, hardCtx context.Context, logger slog.Logger,
|
||||
aAPI proto.DRPCAgentClient26, tAPI tailnetproto.DRPCTailnetClient24,
|
||||
aAPI proto.DRPCAgentClient24, tAPI tailnetproto.DRPCTailnetClient24,
|
||||
) *apiConnRoutineManager {
|
||||
// routines that remain in operation during graceful shutdown use the remainCtx. They'll still
|
||||
// exit if the errgroup hits an error, which usually means a problem with the conn.
|
||||
@@ -2137,7 +2006,7 @@ func newAPIConnRoutineManager(
|
||||
// but for Tailnet.
|
||||
func (a *apiConnRoutineManager) startAgentAPI(
|
||||
name string, behavior gracefulShutdownBehavior,
|
||||
f func(context.Context, proto.DRPCAgentClient26) error,
|
||||
f func(context.Context, proto.DRPCAgentClient24) error,
|
||||
) {
|
||||
logger := a.logger.With(slog.F("name", name))
|
||||
var ctx context.Context
|
||||
|
||||
+173
-770
File diff suppressed because it is too large
Load Diff
Generated
+16
-149
@@ -1,9 +1,9 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: .. (interfaces: ContainerCLI,DevcontainerCLI)
|
||||
// Source: .. (interfaces: Lister)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI
|
||||
// mockgen -destination ./acmock.go -package acmock .. Lister
|
||||
//
|
||||
|
||||
// Package acmock is a generated GoMock package.
|
||||
@@ -13,86 +13,36 @@ import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
agentcontainers "github.com/coder/coder/v2/agent/agentcontainers"
|
||||
codersdk "github.com/coder/coder/v2/codersdk"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// MockContainerCLI is a mock of ContainerCLI interface.
|
||||
type MockContainerCLI struct {
|
||||
// MockLister is a mock of Lister interface.
|
||||
type MockLister struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockContainerCLIMockRecorder
|
||||
recorder *MockListerMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockContainerCLIMockRecorder is the mock recorder for MockContainerCLI.
|
||||
type MockContainerCLIMockRecorder struct {
|
||||
mock *MockContainerCLI
|
||||
// MockListerMockRecorder is the mock recorder for MockLister.
|
||||
type MockListerMockRecorder struct {
|
||||
mock *MockLister
|
||||
}
|
||||
|
||||
// NewMockContainerCLI creates a new mock instance.
|
||||
func NewMockContainerCLI(ctrl *gomock.Controller) *MockContainerCLI {
|
||||
mock := &MockContainerCLI{ctrl: ctrl}
|
||||
mock.recorder = &MockContainerCLIMockRecorder{mock}
|
||||
// NewMockLister creates a new mock instance.
|
||||
func NewMockLister(ctrl *gomock.Controller) *MockLister {
|
||||
mock := &MockLister{ctrl: ctrl}
|
||||
mock.recorder = &MockListerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockContainerCLI) EXPECT() *MockContainerCLIMockRecorder {
|
||||
func (m *MockLister) EXPECT() *MockListerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Copy mocks base method.
|
||||
func (m *MockContainerCLI) Copy(ctx context.Context, containerName, src, dst string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Copy", ctx, containerName, src, dst)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Copy indicates an expected call of Copy.
|
||||
func (mr *MockContainerCLIMockRecorder) Copy(ctx, containerName, src, dst any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Copy", reflect.TypeOf((*MockContainerCLI)(nil).Copy), ctx, containerName, src, dst)
|
||||
}
|
||||
|
||||
// DetectArchitecture mocks base method.
|
||||
func (m *MockContainerCLI) DetectArchitecture(ctx context.Context, containerName string) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DetectArchitecture", ctx, containerName)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DetectArchitecture indicates an expected call of DetectArchitecture.
|
||||
func (mr *MockContainerCLIMockRecorder) DetectArchitecture(ctx, containerName any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetectArchitecture", reflect.TypeOf((*MockContainerCLI)(nil).DetectArchitecture), ctx, containerName)
|
||||
}
|
||||
|
||||
// ExecAs mocks base method.
|
||||
func (m *MockContainerCLI) ExecAs(ctx context.Context, containerName, user string, args ...string) ([]byte, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []any{ctx, containerName, user}
|
||||
for _, a := range args {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "ExecAs", varargs...)
|
||||
ret0, _ := ret[0].([]byte)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ExecAs indicates an expected call of ExecAs.
|
||||
func (mr *MockContainerCLIMockRecorder) ExecAs(ctx, containerName, user any, args ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]any{ctx, containerName, user}, args...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecAs", reflect.TypeOf((*MockContainerCLI)(nil).ExecAs), varargs...)
|
||||
}
|
||||
|
||||
// List mocks base method.
|
||||
func (m *MockContainerCLI) List(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) {
|
||||
func (m *MockLister) List(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "List", ctx)
|
||||
ret0, _ := ret[0].(codersdk.WorkspaceAgentListContainersResponse)
|
||||
@@ -101,90 +51,7 @@ func (m *MockContainerCLI) List(ctx context.Context) (codersdk.WorkspaceAgentLis
|
||||
}
|
||||
|
||||
// List indicates an expected call of List.
|
||||
func (mr *MockContainerCLIMockRecorder) List(ctx any) *gomock.Call {
|
||||
func (mr *MockListerMockRecorder) List(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockContainerCLI)(nil).List), ctx)
|
||||
}
|
||||
|
||||
// MockDevcontainerCLI is a mock of DevcontainerCLI interface.
|
||||
type MockDevcontainerCLI struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockDevcontainerCLIMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockDevcontainerCLIMockRecorder is the mock recorder for MockDevcontainerCLI.
|
||||
type MockDevcontainerCLIMockRecorder struct {
|
||||
mock *MockDevcontainerCLI
|
||||
}
|
||||
|
||||
// NewMockDevcontainerCLI creates a new mock instance.
|
||||
func NewMockDevcontainerCLI(ctrl *gomock.Controller) *MockDevcontainerCLI {
|
||||
mock := &MockDevcontainerCLI{ctrl: ctrl}
|
||||
mock.recorder = &MockDevcontainerCLIMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockDevcontainerCLI) EXPECT() *MockDevcontainerCLIMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Exec mocks base method.
|
||||
func (m *MockDevcontainerCLI) Exec(ctx context.Context, workspaceFolder, configPath, cmd string, cmdArgs []string, opts ...agentcontainers.DevcontainerCLIExecOptions) error {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []any{ctx, workspaceFolder, configPath, cmd, cmdArgs}
|
||||
for _, a := range opts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "Exec", varargs...)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Exec indicates an expected call of Exec.
|
||||
func (mr *MockDevcontainerCLIMockRecorder) Exec(ctx, workspaceFolder, configPath, cmd, cmdArgs any, opts ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]any{ctx, workspaceFolder, configPath, cmd, cmdArgs}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockDevcontainerCLI)(nil).Exec), varargs...)
|
||||
}
|
||||
|
||||
// ReadConfig mocks base method.
|
||||
func (m *MockDevcontainerCLI) ReadConfig(ctx context.Context, workspaceFolder, configPath string, env []string, opts ...agentcontainers.DevcontainerCLIReadConfigOptions) (agentcontainers.DevcontainerConfig, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []any{ctx, workspaceFolder, configPath, env}
|
||||
for _, a := range opts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "ReadConfig", varargs...)
|
||||
ret0, _ := ret[0].(agentcontainers.DevcontainerConfig)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ReadConfig indicates an expected call of ReadConfig.
|
||||
func (mr *MockDevcontainerCLIMockRecorder) ReadConfig(ctx, workspaceFolder, configPath, env any, opts ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]any{ctx, workspaceFolder, configPath, env}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadConfig", reflect.TypeOf((*MockDevcontainerCLI)(nil).ReadConfig), varargs...)
|
||||
}
|
||||
|
||||
// Up mocks base method.
|
||||
func (m *MockDevcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath string, opts ...agentcontainers.DevcontainerCLIUpOptions) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []any{ctx, workspaceFolder, configPath}
|
||||
for _, a := range opts {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "Up", varargs...)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Up indicates an expected call of Up.
|
||||
func (mr *MockDevcontainerCLIMockRecorder) Up(ctx, workspaceFolder, configPath any, opts ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]any{ctx, workspaceFolder, configPath}, opts...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Up", reflect.TypeOf((*MockDevcontainerCLI)(nil).Up), varargs...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockLister)(nil).List), ctx)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package acmock contains a mock implementation of agentcontainers.Lister for use in tests.
|
||||
package acmock
|
||||
|
||||
//go:generate mockgen -destination ./acmock.go -package acmock .. ContainerCLI,DevcontainerCLI
|
||||
//go:generate mockgen -destination ./acmock.go -package acmock .. Lister
|
||||
|
||||
+296
-1784
File diff suppressed because it is too large
Load Diff
@@ -1,358 +1,163 @@
|
||||
package agentcontainers
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"github.com/coder/coder/v2/provisioner"
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers/acmock"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
func TestSafeAgentName(t *testing.T) {
|
||||
func TestAPI(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
folderName string
|
||||
expected string
|
||||
fallback bool
|
||||
}{
|
||||
// Basic valid names
|
||||
{
|
||||
folderName: "simple",
|
||||
expected: "simple",
|
||||
},
|
||||
{
|
||||
folderName: "with-hyphens",
|
||||
expected: "with-hyphens",
|
||||
},
|
||||
{
|
||||
folderName: "123numbers",
|
||||
expected: "123numbers",
|
||||
},
|
||||
{
|
||||
folderName: "mixed123",
|
||||
expected: "mixed123",
|
||||
},
|
||||
// List tests the API.getContainers method using a mock
|
||||
// implementation. It specifically tests caching behavior.
|
||||
t.Run("List", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Names that need transformation
|
||||
{
|
||||
folderName: "With_Underscores",
|
||||
expected: "with-underscores",
|
||||
},
|
||||
{
|
||||
folderName: "With Spaces",
|
||||
expected: "with-spaces",
|
||||
},
|
||||
{
|
||||
folderName: "UPPERCASE",
|
||||
expected: "uppercase",
|
||||
},
|
||||
{
|
||||
folderName: "Mixed_Case-Name",
|
||||
expected: "mixed-case-name",
|
||||
},
|
||||
fakeCt := fakeContainer(t)
|
||||
fakeCt2 := fakeContainer(t)
|
||||
makeResponse := func(cts ...codersdk.WorkspaceAgentContainer) codersdk.WorkspaceAgentListContainersResponse {
|
||||
return codersdk.WorkspaceAgentListContainersResponse{Containers: cts}
|
||||
}
|
||||
|
||||
// Names with special characters that get replaced
|
||||
{
|
||||
folderName: "special@#$chars",
|
||||
expected: "special-chars",
|
||||
},
|
||||
{
|
||||
folderName: "dots.and.more",
|
||||
expected: "dots-and-more",
|
||||
},
|
||||
{
|
||||
folderName: "multiple___underscores",
|
||||
expected: "multiple-underscores",
|
||||
},
|
||||
{
|
||||
folderName: "multiple---hyphens",
|
||||
expected: "multiple-hyphens",
|
||||
},
|
||||
// Each test case is called multiple times to ensure idempotency
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
// data to be stored in the handler
|
||||
cacheData codersdk.WorkspaceAgentListContainersResponse
|
||||
// duration of cache
|
||||
cacheDur time.Duration
|
||||
// relative age of the cached data
|
||||
cacheAge time.Duration
|
||||
// function to set up expectations for the mock
|
||||
setupMock func(*acmock.MockLister)
|
||||
// expected result
|
||||
expected codersdk.WorkspaceAgentListContainersResponse
|
||||
// expected error
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "no cache",
|
||||
setupMock: func(mcl *acmock.MockLister) {
|
||||
mcl.EXPECT().List(gomock.Any()).Return(makeResponse(fakeCt), nil).AnyTimes()
|
||||
},
|
||||
expected: makeResponse(fakeCt),
|
||||
},
|
||||
{
|
||||
name: "no data",
|
||||
cacheData: makeResponse(),
|
||||
cacheAge: 2 * time.Second,
|
||||
cacheDur: time.Second,
|
||||
setupMock: func(mcl *acmock.MockLister) {
|
||||
mcl.EXPECT().List(gomock.Any()).Return(makeResponse(fakeCt), nil).AnyTimes()
|
||||
},
|
||||
expected: makeResponse(fakeCt),
|
||||
},
|
||||
{
|
||||
name: "cached data",
|
||||
cacheAge: time.Second,
|
||||
cacheData: makeResponse(fakeCt),
|
||||
cacheDur: 2 * time.Second,
|
||||
expected: makeResponse(fakeCt),
|
||||
},
|
||||
{
|
||||
name: "lister error",
|
||||
setupMock: func(mcl *acmock.MockLister) {
|
||||
mcl.EXPECT().List(gomock.Any()).Return(makeResponse(), assert.AnError).AnyTimes()
|
||||
},
|
||||
expectedErr: assert.AnError.Error(),
|
||||
},
|
||||
{
|
||||
name: "stale cache",
|
||||
cacheAge: 2 * time.Second,
|
||||
cacheData: makeResponse(fakeCt),
|
||||
cacheDur: time.Second,
|
||||
setupMock: func(mcl *acmock.MockLister) {
|
||||
mcl.EXPECT().List(gomock.Any()).Return(makeResponse(fakeCt2), nil).AnyTimes()
|
||||
},
|
||||
expected: makeResponse(fakeCt2),
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
ctx = testutil.Context(t, testutil.WaitShort)
|
||||
clk = quartz.NewMock(t)
|
||||
ctrl = gomock.NewController(t)
|
||||
mockLister = acmock.NewMockLister(ctrl)
|
||||
now = time.Now().UTC()
|
||||
logger = slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
||||
api = NewAPI(logger, WithLister(mockLister))
|
||||
)
|
||||
defer api.Close()
|
||||
|
||||
// Edge cases with leading/trailing special chars
|
||||
{
|
||||
folderName: "-leading-hyphen",
|
||||
expected: "leading-hyphen",
|
||||
},
|
||||
{
|
||||
folderName: "trailing-hyphen-",
|
||||
expected: "trailing-hyphen",
|
||||
},
|
||||
{
|
||||
folderName: "_leading_underscore",
|
||||
expected: "leading-underscore",
|
||||
},
|
||||
{
|
||||
folderName: "trailing_underscore_",
|
||||
expected: "trailing-underscore",
|
||||
},
|
||||
{
|
||||
folderName: "---multiple-leading",
|
||||
expected: "multiple-leading",
|
||||
},
|
||||
{
|
||||
folderName: "trailing-multiple---",
|
||||
expected: "trailing-multiple",
|
||||
},
|
||||
api.cacheDuration = tc.cacheDur
|
||||
api.clock = clk
|
||||
api.containers = tc.cacheData
|
||||
if tc.cacheAge != 0 {
|
||||
api.mtime = now.Add(-tc.cacheAge)
|
||||
}
|
||||
if tc.setupMock != nil {
|
||||
tc.setupMock(mockLister)
|
||||
}
|
||||
|
||||
// Complex transformation cases
|
||||
{
|
||||
folderName: "___very---complex@@@name___",
|
||||
expected: "very-complex-name",
|
||||
},
|
||||
{
|
||||
folderName: "my.project-folder_v2",
|
||||
expected: "my-project-folder-v2",
|
||||
},
|
||||
clk.Set(now).MustWait(ctx)
|
||||
|
||||
// Empty and fallback cases - now correctly uses friendlyName fallback
|
||||
{
|
||||
folderName: "",
|
||||
expected: "friendly-fallback",
|
||||
fallback: true,
|
||||
},
|
||||
{
|
||||
folderName: "---",
|
||||
expected: "friendly-fallback",
|
||||
fallback: true,
|
||||
},
|
||||
{
|
||||
folderName: "___",
|
||||
expected: "friendly-fallback",
|
||||
fallback: true,
|
||||
},
|
||||
{
|
||||
folderName: "@#$",
|
||||
expected: "friendly-fallback",
|
||||
fallback: true,
|
||||
},
|
||||
|
||||
// Additional edge cases
|
||||
{
|
||||
folderName: "a",
|
||||
expected: "a",
|
||||
},
|
||||
{
|
||||
folderName: "1",
|
||||
expected: "1",
|
||||
},
|
||||
{
|
||||
folderName: "a1b2c3",
|
||||
expected: "a1b2c3",
|
||||
},
|
||||
{
|
||||
folderName: "CamelCase",
|
||||
expected: "camelcase",
|
||||
},
|
||||
{
|
||||
folderName: "snake_case_name",
|
||||
expected: "snake-case-name",
|
||||
},
|
||||
{
|
||||
folderName: "kebab-case-name",
|
||||
expected: "kebab-case-name",
|
||||
},
|
||||
{
|
||||
folderName: "mix3d_C4s3-N4m3",
|
||||
expected: "mix3d-c4s3-n4m3",
|
||||
},
|
||||
{
|
||||
folderName: "123-456-789",
|
||||
expected: "123-456-789",
|
||||
},
|
||||
{
|
||||
folderName: "abc123def456",
|
||||
expected: "abc123def456",
|
||||
},
|
||||
{
|
||||
folderName: " spaces everywhere ",
|
||||
expected: "spaces-everywhere",
|
||||
},
|
||||
{
|
||||
folderName: "unicode-café-naïve",
|
||||
expected: "unicode-caf-na-ve",
|
||||
},
|
||||
{
|
||||
folderName: "path/with/slashes",
|
||||
expected: "path-with-slashes",
|
||||
},
|
||||
{
|
||||
folderName: "file.tar.gz",
|
||||
expected: "file-tar-gz",
|
||||
},
|
||||
{
|
||||
folderName: "version-1.2.3-alpha",
|
||||
expected: "version-1-2-3-alpha",
|
||||
},
|
||||
|
||||
// Truncation test for names exceeding 64 characters
|
||||
{
|
||||
folderName: "this-is-a-very-long-folder-name-that-exceeds-sixty-four-characters-limit-and-should-be-truncated",
|
||||
expected: "this-is-a-very-long-folder-name-that-exceeds-sixty-four-characte",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.folderName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
name, usingWorkspaceFolder := safeAgentName(tt.folderName, "friendly-fallback")
|
||||
|
||||
assert.Equal(t, tt.expected, name)
|
||||
assert.True(t, provisioner.AgentNameRegex.Match([]byte(name)))
|
||||
assert.Equal(t, tt.fallback, !usingWorkspaceFolder)
|
||||
})
|
||||
}
|
||||
// Repeat the test to ensure idempotency
|
||||
for i := 0; i < 2; i++ {
|
||||
actual, err := api.getContainers(ctx)
|
||||
if tc.expectedErr != "" {
|
||||
require.Empty(t, actual, "expected no data (attempt %d)", i)
|
||||
require.ErrorContains(t, err, tc.expectedErr, "expected error (attempt %d)", i)
|
||||
} else {
|
||||
require.NoError(t, err, "expected no error (attempt %d)", i)
|
||||
require.Equal(t, tc.expected, actual, "expected containers to be equal (attempt %d)", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestExpandedAgentName(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
workspaceFolder string
|
||||
friendlyName string
|
||||
depth int
|
||||
expected string
|
||||
fallback bool
|
||||
}{
|
||||
{
|
||||
name: "simple path depth 1",
|
||||
workspaceFolder: "/home/coder/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 0,
|
||||
expected: "project",
|
||||
func fakeContainer(t *testing.T, mut ...func(*codersdk.WorkspaceAgentContainer)) codersdk.WorkspaceAgentContainer {
|
||||
t.Helper()
|
||||
ct := codersdk.WorkspaceAgentContainer{
|
||||
CreatedAt: time.Now().UTC(),
|
||||
ID: uuid.New().String(),
|
||||
FriendlyName: testutil.GetRandomName(t),
|
||||
Image: testutil.GetRandomName(t) + ":" + strings.Split(uuid.New().String(), "-")[0],
|
||||
Labels: map[string]string{
|
||||
testutil.GetRandomName(t): testutil.GetRandomName(t),
|
||||
},
|
||||
{
|
||||
name: "simple path depth 2",
|
||||
workspaceFolder: "/home/coder/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "coder-project",
|
||||
},
|
||||
{
|
||||
name: "simple path depth 3",
|
||||
workspaceFolder: "/home/coder/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 2,
|
||||
expected: "home-coder-project",
|
||||
},
|
||||
{
|
||||
name: "simple path depth exceeds available",
|
||||
workspaceFolder: "/home/coder/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 9,
|
||||
expected: "home-coder-project",
|
||||
},
|
||||
// Cases with special characters that need sanitization
|
||||
{
|
||||
name: "path with spaces and special chars",
|
||||
workspaceFolder: "/home/coder/My Project_v2",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "coder-my-project-v2",
|
||||
},
|
||||
{
|
||||
name: "path with dots and underscores",
|
||||
workspaceFolder: "/home/user.name/project_folder.git",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "user-name-project-folder-git",
|
||||
},
|
||||
// Edge cases
|
||||
{
|
||||
name: "empty path",
|
||||
workspaceFolder: "",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 0,
|
||||
expected: "friendly-fallback",
|
||||
fallback: true,
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
workspaceFolder: "/",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 0,
|
||||
expected: "friendly-fallback",
|
||||
fallback: true,
|
||||
},
|
||||
{
|
||||
name: "single component",
|
||||
workspaceFolder: "project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 0,
|
||||
expected: "project",
|
||||
},
|
||||
{
|
||||
name: "single component with depth 2",
|
||||
workspaceFolder: "project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "project",
|
||||
},
|
||||
// Collision simulation cases
|
||||
{
|
||||
name: "foo/project depth 1",
|
||||
workspaceFolder: "/home/coder/foo/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 0,
|
||||
expected: "project",
|
||||
},
|
||||
{
|
||||
name: "foo/project depth 2",
|
||||
workspaceFolder: "/home/coder/foo/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "foo-project",
|
||||
},
|
||||
{
|
||||
name: "bar/project depth 1",
|
||||
workspaceFolder: "/home/coder/bar/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 0,
|
||||
expected: "project",
|
||||
},
|
||||
{
|
||||
name: "bar/project depth 2",
|
||||
workspaceFolder: "/home/coder/bar/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "bar-project",
|
||||
},
|
||||
// Path with trailing slashes
|
||||
{
|
||||
name: "path with trailing slash",
|
||||
workspaceFolder: "/home/coder/project/",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "coder-project",
|
||||
},
|
||||
{
|
||||
name: "path with multiple trailing slashes",
|
||||
workspaceFolder: "/home/coder/project///",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "coder-project",
|
||||
},
|
||||
// Path with leading slashes
|
||||
{
|
||||
name: "path with multiple leading slashes",
|
||||
workspaceFolder: "///home/coder/project",
|
||||
friendlyName: "friendly-fallback",
|
||||
depth: 1,
|
||||
expected: "coder-project",
|
||||
Running: true,
|
||||
Ports: []codersdk.WorkspaceAgentContainerPort{
|
||||
{
|
||||
Network: "tcp",
|
||||
Port: testutil.RandomPortNoListen(t),
|
||||
HostPort: testutil.RandomPortNoListen(t),
|
||||
//nolint:gosec // this is a test
|
||||
HostIP: []string{"127.0.0.1", "[::1]", "localhost", "0.0.0.0", "[::]", testutil.GetRandomName(t)}[rand.Intn(6)],
|
||||
},
|
||||
},
|
||||
Status: testutil.MustRandString(t, 10),
|
||||
Volumes: map[string]string{testutil.GetRandomName(t): testutil.GetRandomName(t)},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
name, usingWorkspaceFolder := expandedAgentName(tt.workspaceFolder, tt.friendlyName, tt.depth)
|
||||
|
||||
assert.Equal(t, tt.expected, name)
|
||||
assert.True(t, provisioner.AgentNameRegex.Match([]byte(name)))
|
||||
assert.Equal(t, tt.fallback, !usingWorkspaceFolder)
|
||||
})
|
||||
for _, m := range mut {
|
||||
m(&ct)
|
||||
}
|
||||
return ct
|
||||
}
|
||||
|
||||
+134
-3450
File diff suppressed because it is too large
Load Diff
@@ -6,32 +6,19 @@ import (
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
// ContainerCLI is an interface for interacting with containers in a workspace.
|
||||
type ContainerCLI interface {
|
||||
// Lister is an interface for listing containers visible to the
|
||||
// workspace agent.
|
||||
type Lister interface {
|
||||
// List returns a list of containers visible to the workspace agent.
|
||||
// This should include running and stopped containers.
|
||||
List(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error)
|
||||
// DetectArchitecture detects the architecture of a container.
|
||||
DetectArchitecture(ctx context.Context, containerName string) (string, error)
|
||||
// Copy copies a file from the host to a container.
|
||||
Copy(ctx context.Context, containerName, src, dst string) error
|
||||
// ExecAs executes a command in a container as a specific user.
|
||||
ExecAs(ctx context.Context, containerName, user string, args ...string) ([]byte, error)
|
||||
}
|
||||
|
||||
// noopContainerCLI is a ContainerCLI that does nothing.
|
||||
type noopContainerCLI struct{}
|
||||
// NoopLister is a Lister interface that never returns any containers.
|
||||
type NoopLister struct{}
|
||||
|
||||
var _ ContainerCLI = noopContainerCLI{}
|
||||
var _ Lister = NoopLister{}
|
||||
|
||||
func (noopContainerCLI) List(_ context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) {
|
||||
func (NoopLister) List(_ context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) {
|
||||
return codersdk.WorkspaceAgentListContainersResponse{}, nil
|
||||
}
|
||||
|
||||
func (noopContainerCLI) DetectArchitecture(_ context.Context, _ string) (string, error) {
|
||||
return "<none>", nil
|
||||
}
|
||||
func (noopContainerCLI) Copy(_ context.Context, _ string, _ string, _ string) error { return nil }
|
||||
func (noopContainerCLI) ExecAs(_ context.Context, _ string, _ string, _ ...string) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -228,23 +228,23 @@ func run(ctx context.Context, execer agentexec.Execer, cmd string, args ...strin
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
// dockerCLI is an implementation for Docker CLI that lists containers.
|
||||
type dockerCLI struct {
|
||||
// DockerCLILister is a ContainerLister that lists containers using the docker CLI
|
||||
type DockerCLILister struct {
|
||||
execer agentexec.Execer
|
||||
}
|
||||
|
||||
var _ ContainerCLI = (*dockerCLI)(nil)
|
||||
var _ Lister = &DockerCLILister{}
|
||||
|
||||
func NewDockerCLI(execer agentexec.Execer) ContainerCLI {
|
||||
return &dockerCLI{
|
||||
execer: execer,
|
||||
func NewDocker(execer agentexec.Execer) Lister {
|
||||
return &DockerCLILister{
|
||||
execer: agentexec.DefaultExecer,
|
||||
}
|
||||
}
|
||||
|
||||
func (dcli *dockerCLI) List(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) {
|
||||
func (dcl *DockerCLILister) List(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) {
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
// List all container IDs, one per line, with no truncation
|
||||
cmd := dcli.execer.CommandContext(ctx, "docker", "ps", "--all", "--quiet", "--no-trunc")
|
||||
cmd := dcl.execer.CommandContext(ctx, "docker", "ps", "--all", "--quiet", "--no-trunc")
|
||||
cmd.Stdout = &stdoutBuf
|
||||
cmd.Stderr = &stderrBuf
|
||||
if err := cmd.Run(); err != nil {
|
||||
@@ -288,7 +288,7 @@ func (dcli *dockerCLI) List(ctx context.Context) (codersdk.WorkspaceAgentListCon
|
||||
// will still contain valid JSON. We will just end up missing
|
||||
// information about the removed container. We could potentially
|
||||
// log this error, but I'm not sure it's worth it.
|
||||
dockerInspectStdout, dockerInspectStderr, err := runDockerInspect(ctx, dcli.execer, ids...)
|
||||
dockerInspectStdout, dockerInspectStderr, err := runDockerInspect(ctx, dcl.execer, ids...)
|
||||
if err != nil {
|
||||
return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("run docker inspect: %w: %s", err, dockerInspectStderr)
|
||||
}
|
||||
@@ -311,10 +311,6 @@ func (dcli *dockerCLI) List(ctx context.Context) (codersdk.WorkspaceAgentListCon
|
||||
// container IDs and returns the parsed output.
|
||||
// The stderr output is also returned for logging purposes.
|
||||
func runDockerInspect(ctx context.Context, execer agentexec.Execer, ids ...string) (stdout, stderr []byte, err error) {
|
||||
if ctx.Err() != nil {
|
||||
// If the context is done, we don't want to run the command.
|
||||
return []byte{}, []byte{}, ctx.Err()
|
||||
}
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
cmd := execer.CommandContext(ctx, "docker", append([]string{"inspect"}, ids...)...)
|
||||
cmd.Stdout = &stdoutBuf
|
||||
@@ -323,12 +319,6 @@ func runDockerInspect(ctx context.Context, execer agentexec.Execer, ids ...strin
|
||||
stdout = bytes.TrimSpace(stdoutBuf.Bytes())
|
||||
stderr = bytes.TrimSpace(stderrBuf.Bytes())
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// If the context was canceled while running the command,
|
||||
// return the context error instead of the command error,
|
||||
// which is likely to be "signal: killed".
|
||||
return stdout, stderr, ctx.Err()
|
||||
}
|
||||
if bytes.Contains(stderr, []byte("No such object:")) {
|
||||
// This can happen if a container is deleted between the time we check for its existence and the time we inspect it.
|
||||
return stdout, stderr, nil
|
||||
@@ -527,71 +517,3 @@ func isLoopbackOrUnspecified(ips string) bool {
|
||||
}
|
||||
return nip.IsLoopback() || nip.IsUnspecified()
|
||||
}
|
||||
|
||||
// DetectArchitecture detects the architecture of a container by inspecting its
|
||||
// image.
|
||||
func (dcli *dockerCLI) DetectArchitecture(ctx context.Context, containerName string) (string, error) {
|
||||
// Inspect the container to get the image name, which contains the architecture.
|
||||
stdout, stderr, err := runCmd(ctx, dcli.execer, "docker", "inspect", "--format", "{{.Config.Image}}", containerName)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("inspect container %s: %w: %s", containerName, err, stderr)
|
||||
}
|
||||
imageName := string(stdout)
|
||||
if imageName == "" {
|
||||
return "", xerrors.Errorf("no image found for container %s", containerName)
|
||||
}
|
||||
|
||||
stdout, stderr, err = runCmd(ctx, dcli.execer, "docker", "inspect", "--format", "{{.Architecture}}", imageName)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("inspect image %s: %w: %s", imageName, err, stderr)
|
||||
}
|
||||
arch := string(stdout)
|
||||
if arch == "" {
|
||||
return "", xerrors.Errorf("no architecture found for image %s", imageName)
|
||||
}
|
||||
return arch, nil
|
||||
}
|
||||
|
||||
// Copy copies a file from the host to a container.
|
||||
func (dcli *dockerCLI) Copy(ctx context.Context, containerName, src, dst string) error {
|
||||
_, stderr, err := runCmd(ctx, dcli.execer, "docker", "cp", src, containerName+":"+dst)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("copy %s to %s:%s: %w: %s", src, containerName, dst, err, stderr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecAs executes a command in a container as a specific user.
|
||||
func (dcli *dockerCLI) ExecAs(ctx context.Context, containerName, uid string, args ...string) ([]byte, error) {
|
||||
execArgs := []string{"exec"}
|
||||
if uid != "" {
|
||||
altUID := uid
|
||||
if uid == "root" {
|
||||
// UID 0 is more portable than the name root, so we use that
|
||||
// because some containers may not have a user named "root".
|
||||
altUID = "0"
|
||||
}
|
||||
execArgs = append(execArgs, "--user", altUID)
|
||||
}
|
||||
execArgs = append(execArgs, containerName)
|
||||
execArgs = append(execArgs, args...)
|
||||
|
||||
stdout, stderr, err := runCmd(ctx, dcli.execer, "docker", execArgs...)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("exec in container %s as user %s: %w: %s", containerName, uid, err, stderr)
|
||||
}
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
// runCmd is a helper function that runs a command with the given
|
||||
// arguments and returns the stdout and stderr output.
|
||||
func runCmd(ctx context.Context, execer agentexec.Execer, cmd string, args ...string) (stdout, stderr []byte, err error) {
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
c := execer.CommandContext(ctx, cmd, args...)
|
||||
c.Stdout = &stdoutBuf
|
||||
c.Stderr = &stderrBuf
|
||||
err = c.Run()
|
||||
stdout = bytes.TrimSpace(stdoutBuf.Bytes())
|
||||
stderr = bytes.TrimSpace(stderrBuf.Bytes())
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
package agentcontainers_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// TestIntegrationDockerCLI tests the DetectArchitecture, Copy, and
|
||||
// ExecAs methods using a real Docker container. All tests share a
|
||||
// single container to avoid setup overhead.
|
||||
//
|
||||
// Run manually with: CODER_TEST_USE_DOCKER=1 go test ./agent/agentcontainers -run TestIntegrationDockerCLI
|
||||
//
|
||||
//nolint:tparallel,paralleltest // Docker integration tests don't run in parallel to avoid flakiness.
|
||||
func TestIntegrationDockerCLI(t *testing.T) {
|
||||
if ctud, ok := os.LookupEnv("CODER_TEST_USE_DOCKER"); !ok || ctud != "1" {
|
||||
t.Skip("Set CODER_TEST_USE_DOCKER=1 to run this test")
|
||||
}
|
||||
|
||||
pool, err := dockertest.NewPool("")
|
||||
require.NoError(t, err, "Could not connect to docker")
|
||||
|
||||
// Start a simple busybox container for all subtests to share.
|
||||
ct, err := pool.RunWithOptions(&dockertest.RunOptions{
|
||||
Repository: "busybox",
|
||||
Tag: "latest",
|
||||
Cmd: []string{"sleep", "infinity"},
|
||||
}, func(config *docker.HostConfig) {
|
||||
config.AutoRemove = true
|
||||
config.RestartPolicy = docker.RestartPolicy{Name: "no"}
|
||||
})
|
||||
require.NoError(t, err, "Could not start test docker container")
|
||||
t.Logf("Created container %q", ct.Container.Name)
|
||||
t.Cleanup(func() {
|
||||
assert.NoError(t, pool.Purge(ct), "Could not purge resource %q", ct.Container.Name)
|
||||
t.Logf("Purged container %q", ct.Container.Name)
|
||||
})
|
||||
|
||||
// Wait for container to start.
|
||||
require.Eventually(t, func() bool {
|
||||
ct, ok := pool.ContainerByName(ct.Container.Name)
|
||||
return ok && ct.Container.State.Running
|
||||
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
|
||||
|
||||
dcli := agentcontainers.NewDockerCLI(agentexec.DefaultExecer)
|
||||
containerName := strings.TrimPrefix(ct.Container.Name, "/")
|
||||
|
||||
t.Run("DetectArchitecture", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
arch, err := dcli.DetectArchitecture(ctx, containerName)
|
||||
require.NoError(t, err, "DetectArchitecture failed")
|
||||
require.NotEmpty(t, arch, "arch has no content")
|
||||
require.Equal(t, runtime.GOARCH, arch, "architecture does not match runtime, did you run this test with a remote Docker socket?")
|
||||
|
||||
t.Logf("Detected architecture: %s", arch)
|
||||
})
|
||||
|
||||
t.Run("Copy", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
want := "Help, I'm trapped!"
|
||||
tempFile := filepath.Join(t.TempDir(), "test-file.txt")
|
||||
err := os.WriteFile(tempFile, []byte(want), 0o600)
|
||||
require.NoError(t, err, "create test file failed")
|
||||
|
||||
destPath := "/tmp/copied-file.txt"
|
||||
err = dcli.Copy(ctx, containerName, tempFile, destPath)
|
||||
require.NoError(t, err, "Copy failed")
|
||||
|
||||
got, err := dcli.ExecAs(ctx, containerName, "", "cat", destPath)
|
||||
require.NoError(t, err, "ExecAs failed after Copy")
|
||||
require.Equal(t, want, string(got), "copied file content did not match original")
|
||||
|
||||
t.Logf("Successfully copied file from %s to container %s:%s", tempFile, containerName, destPath)
|
||||
})
|
||||
|
||||
t.Run("ExecAs", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
// Test ExecAs without specifying user (should use container's default).
|
||||
want := "root"
|
||||
got, err := dcli.ExecAs(ctx, containerName, "", "whoami")
|
||||
require.NoError(t, err, "ExecAs without user should succeed")
|
||||
require.Equal(t, want, string(got), "ExecAs without user should output expected string")
|
||||
|
||||
// Test ExecAs with numeric UID (non root).
|
||||
want = "1000"
|
||||
_, err = dcli.ExecAs(ctx, containerName, want, "whoami")
|
||||
require.Error(t, err, "ExecAs with UID 1000 should fail as user does not exist in busybox")
|
||||
require.Contains(t, err.Error(), "whoami: unknown uid 1000", "ExecAs with UID 1000 should return 'unknown uid' error")
|
||||
|
||||
// Test ExecAs with root user (should convert "root" to "0", which still outputs root due to passwd).
|
||||
want = "root"
|
||||
got, err = dcli.ExecAs(ctx, containerName, "root", "whoami")
|
||||
require.NoError(t, err, "ExecAs with root user should succeed")
|
||||
require.Equal(t, want, string(got), "ExecAs with root user should output expected string")
|
||||
|
||||
// Test ExecAs with numeric UID.
|
||||
want = "root"
|
||||
got, err = dcli.ExecAs(ctx, containerName, "0", "whoami")
|
||||
require.NoError(t, err, "ExecAs with UID 0 should succeed")
|
||||
require.Equal(t, want, string(got), "ExecAs with UID 0 should output expected string")
|
||||
|
||||
// Test ExecAs with multiple arguments.
|
||||
want = "multiple args test"
|
||||
got, err = dcli.ExecAs(ctx, containerName, "", "sh", "-c", "echo '"+want+"'")
|
||||
require.NoError(t, err, "ExecAs with multiple arguments should succeed")
|
||||
require.Equal(t, want, string(got), "ExecAs with multiple arguments should output expected string")
|
||||
|
||||
t.Logf("Successfully executed commands in container %s", containerName)
|
||||
})
|
||||
}
|
||||
@@ -41,6 +41,7 @@ func TestWrapDockerExec(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt // appease the linter even though this isn't needed anymore
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actualCmd, actualArgs := wrapDockerExec("my-container", tt.containerUser, tt.cmdArgs[0], tt.cmdArgs[1:]...)
|
||||
@@ -53,6 +54,7 @@ func TestWrapDockerExec(t *testing.T) {
|
||||
func TestConvertDockerPort(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
//nolint:paralleltest // variable recapture no longer required
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
in string
|
||||
@@ -99,6 +101,7 @@ func TestConvertDockerPort(t *testing.T) {
|
||||
expectError: "invalid port",
|
||||
},
|
||||
} {
|
||||
//nolint: paralleltest // variable recapture no longer required
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actualPort, actualNetwork, actualErr := convertDockerPort(tc.in)
|
||||
@@ -148,6 +151,7 @@ func TestConvertDockerVolume(t *testing.T) {
|
||||
expectError: "invalid volume",
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
})
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestIntegrationDocker(t *testing.T) {
|
||||
return ok && ct.Container.State.Running
|
||||
}, testutil.WaitShort, testutil.IntervalSlow, "Container did not start in time")
|
||||
|
||||
dcl := agentcontainers.NewDockerCLI(agentexec.DefaultExecer)
|
||||
dcl := agentcontainers.NewDocker(agentexec.DefaultExecer)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
actual, err := dcl.List(ctx)
|
||||
require.NoError(t, err, "Could not list containers")
|
||||
|
||||
@@ -61,7 +61,7 @@ fi
|
||||
exec 3>&-
|
||||
|
||||
# Format the generated code.
|
||||
go run mvdan.cc/gofumpt@v0.8.0 -w -l "${TMPDIR}/${DEST_FILENAME}"
|
||||
go run mvdan.cc/gofumpt@v0.4.0 -w -l "${TMPDIR}/${DEST_FILENAME}"
|
||||
|
||||
# Add a header so that Go recognizes this as a generated file.
|
||||
if grep -q -- "\[-i extension\]" < <(sed -h 2>&1); then
|
||||
|
||||
@@ -2,10 +2,10 @@ package agentcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"strings"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
@@ -18,25 +18,37 @@ const (
|
||||
// DevcontainerConfigFileLabel is the label that contains the path to
|
||||
// the devcontainer.json configuration file.
|
||||
DevcontainerConfigFileLabel = "devcontainer.config_file"
|
||||
// DevcontainerIsTestRunLabel is set if the devcontainer is part of a test
|
||||
// and should be excluded.
|
||||
DevcontainerIsTestRunLabel = "devcontainer.is_test_run"
|
||||
// The default workspace folder inside the devcontainer.
|
||||
DevcontainerDefaultContainerWorkspaceFolder = "/workspaces"
|
||||
)
|
||||
|
||||
func ExtractDevcontainerScripts(
|
||||
const devcontainerUpScriptTemplate = `
|
||||
if ! which devcontainer > /dev/null 2>&1; then
|
||||
echo "ERROR: Unable to start devcontainer, @devcontainers/cli is not installed."
|
||||
exit 1
|
||||
fi
|
||||
devcontainer up %s
|
||||
`
|
||||
|
||||
// ExtractAndInitializeDevcontainerScripts extracts devcontainer scripts from
|
||||
// the given scripts and devcontainers. The devcontainer scripts are removed
|
||||
// from the returned scripts so that they can be run separately.
|
||||
//
|
||||
// Dev Containers have an inherent dependency on start scripts, since they
|
||||
// initialize the workspace (e.g. git clone, npm install, etc). This is
|
||||
// important if e.g. a Coder module to install @devcontainer/cli is used.
|
||||
func ExtractAndInitializeDevcontainerScripts(
|
||||
logger slog.Logger,
|
||||
expandPath func(string) (string, error),
|
||||
devcontainers []codersdk.WorkspaceAgentDevcontainer,
|
||||
scripts []codersdk.WorkspaceAgentScript,
|
||||
) (filteredScripts []codersdk.WorkspaceAgentScript, devcontainerScripts map[uuid.UUID]codersdk.WorkspaceAgentScript) {
|
||||
devcontainerScripts = make(map[uuid.UUID]codersdk.WorkspaceAgentScript)
|
||||
) (filteredScripts []codersdk.WorkspaceAgentScript, devcontainerScripts []codersdk.WorkspaceAgentScript) {
|
||||
ScriptLoop:
|
||||
for _, script := range scripts {
|
||||
for _, dc := range devcontainers {
|
||||
// The devcontainer scripts match the devcontainer ID for
|
||||
// identification.
|
||||
if script.ID == dc.ID {
|
||||
devcontainerScripts[dc.ID] = script
|
||||
dc = expandDevcontainerPaths(logger, expandPath, dc)
|
||||
devcontainerScripts = append(devcontainerScripts, devcontainerStartupScript(dc, script))
|
||||
continue ScriptLoop
|
||||
}
|
||||
}
|
||||
@@ -47,15 +59,20 @@ ScriptLoop:
|
||||
return filteredScripts, devcontainerScripts
|
||||
}
|
||||
|
||||
// ExpandAllDevcontainerPaths expands all devcontainer paths in the given
|
||||
// devcontainers. This is required by the devcontainer CLI, which requires
|
||||
// absolute paths for the workspace folder and config path.
|
||||
func ExpandAllDevcontainerPaths(logger slog.Logger, expandPath func(string) (string, error), devcontainers []codersdk.WorkspaceAgentDevcontainer) []codersdk.WorkspaceAgentDevcontainer {
|
||||
expanded := make([]codersdk.WorkspaceAgentDevcontainer, 0, len(devcontainers))
|
||||
for _, dc := range devcontainers {
|
||||
expanded = append(expanded, expandDevcontainerPaths(logger, expandPath, dc))
|
||||
func devcontainerStartupScript(dc codersdk.WorkspaceAgentDevcontainer, script codersdk.WorkspaceAgentScript) codersdk.WorkspaceAgentScript {
|
||||
args := []string{
|
||||
"--log-format json",
|
||||
fmt.Sprintf("--workspace-folder %q", dc.WorkspaceFolder),
|
||||
}
|
||||
return expanded
|
||||
if dc.ConfigPath != "" {
|
||||
args = append(args, fmt.Sprintf("--config %q", dc.ConfigPath))
|
||||
}
|
||||
cmd := fmt.Sprintf(devcontainerUpScriptTemplate, strings.Join(args, " "))
|
||||
script.Script = cmd
|
||||
// Disable RunOnStart, scripts have this set so that when devcontainers
|
||||
// have not been enabled, a warning will be surfaced in the agent logs.
|
||||
script.RunOnStart = false
|
||||
return script
|
||||
}
|
||||
|
||||
func expandDevcontainerPaths(logger slog.Logger, expandPath func(string) (string, error), dc codersdk.WorkspaceAgentDevcontainer) codersdk.WorkspaceAgentDevcontainer {
|
||||
|
||||
@@ -0,0 +1,276 @@
|
||||
package agentcontainers_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func TestExtractAndInitializeDevcontainerScripts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
scriptIDs := []uuid.UUID{uuid.New(), uuid.New()}
|
||||
devcontainerIDs := []uuid.UUID{uuid.New(), uuid.New()}
|
||||
|
||||
type args struct {
|
||||
expandPath func(string) (string, error)
|
||||
devcontainers []codersdk.WorkspaceAgentDevcontainer
|
||||
scripts []codersdk.WorkspaceAgentScript
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantFilteredScripts []codersdk.WorkspaceAgentScript
|
||||
wantDevcontainerScripts []codersdk.WorkspaceAgentScript
|
||||
|
||||
skipOnWindowsDueToPathSeparator bool
|
||||
}{
|
||||
{
|
||||
name: "no scripts",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: nil,
|
||||
scripts: nil,
|
||||
},
|
||||
wantFilteredScripts: nil,
|
||||
wantDevcontainerScripts: nil,
|
||||
},
|
||||
{
|
||||
name: "no devcontainers",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: nil,
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0]},
|
||||
{ID: scriptIDs[1]},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0]},
|
||||
{ID: scriptIDs[1]},
|
||||
},
|
||||
wantDevcontainerScripts: nil,
|
||||
},
|
||||
{
|
||||
name: "no scripts match devcontainers",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{ID: devcontainerIDs[0]},
|
||||
{ID: devcontainerIDs[1]},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0]},
|
||||
{ID: scriptIDs[1]},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0]},
|
||||
{ID: scriptIDs[1]},
|
||||
},
|
||||
wantDevcontainerScripts: nil,
|
||||
},
|
||||
{
|
||||
name: "scripts match devcontainers and sets RunOnStart=false",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{ID: devcontainerIDs[0], WorkspaceFolder: "workspace1"},
|
||||
{ID: devcontainerIDs[1], WorkspaceFolder: "workspace2"},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0], RunOnStart: true},
|
||||
{ID: scriptIDs[1], RunOnStart: true},
|
||||
{ID: devcontainerIDs[0], RunOnStart: true},
|
||||
{ID: devcontainerIDs[1], RunOnStart: true},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: scriptIDs[0], RunOnStart: true},
|
||||
{ID: scriptIDs[1], RunOnStart: true},
|
||||
},
|
||||
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"workspace1\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"workspace2\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "scripts match devcontainers with config path",
|
||||
args: args{
|
||||
expandPath: nil,
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
WorkspaceFolder: "workspace1",
|
||||
ConfigPath: "config1",
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
WorkspaceFolder: "workspace2",
|
||||
ConfigPath: "config2",
|
||||
},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: devcontainerIDs[0]},
|
||||
{ID: devcontainerIDs[1]},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
|
||||
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"workspace1\" --config \"workspace1/config1\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"workspace2\" --config \"workspace2/config2\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
},
|
||||
skipOnWindowsDueToPathSeparator: true,
|
||||
},
|
||||
{
|
||||
name: "scripts match devcontainers with expand path",
|
||||
args: args{
|
||||
expandPath: func(s string) (string, error) {
|
||||
return "/home/" + s, nil
|
||||
},
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
WorkspaceFolder: "workspace1",
|
||||
ConfigPath: "config1",
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
WorkspaceFolder: "workspace2",
|
||||
ConfigPath: "config2",
|
||||
},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: devcontainerIDs[0], RunOnStart: true},
|
||||
{ID: devcontainerIDs[1], RunOnStart: true},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
|
||||
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace1\" --config \"/home/workspace1/config1\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace2\" --config \"/home/workspace2/config2\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
},
|
||||
skipOnWindowsDueToPathSeparator: true,
|
||||
},
|
||||
{
|
||||
name: "expand config path when ~",
|
||||
args: args{
|
||||
expandPath: func(s string) (string, error) {
|
||||
s = strings.Replace(s, "~/", "", 1)
|
||||
if filepath.IsAbs(s) {
|
||||
return s, nil
|
||||
}
|
||||
return "/home/" + s, nil
|
||||
},
|
||||
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
WorkspaceFolder: "workspace1",
|
||||
ConfigPath: "~/config1",
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
WorkspaceFolder: "workspace2",
|
||||
ConfigPath: "/config2",
|
||||
},
|
||||
},
|
||||
scripts: []codersdk.WorkspaceAgentScript{
|
||||
{ID: devcontainerIDs[0], RunOnStart: true},
|
||||
{ID: devcontainerIDs[1], RunOnStart: true},
|
||||
},
|
||||
},
|
||||
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
|
||||
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
|
||||
{
|
||||
ID: devcontainerIDs[0],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace1\" --config \"/home/config1\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
{
|
||||
ID: devcontainerIDs[1],
|
||||
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace2\" --config \"/config2\"",
|
||||
RunOnStart: false,
|
||||
},
|
||||
},
|
||||
skipOnWindowsDueToPathSeparator: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if tt.skipOnWindowsDueToPathSeparator && filepath.Separator == '\\' {
|
||||
t.Skip("Skipping test on Windows due to path separator difference.")
|
||||
}
|
||||
|
||||
logger := slogtest.Make(t, nil)
|
||||
if tt.args.expandPath == nil {
|
||||
tt.args.expandPath = func(s string) (string, error) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
gotFilteredScripts, gotDevcontainerScripts := agentcontainers.ExtractAndInitializeDevcontainerScripts(
|
||||
logger,
|
||||
tt.args.expandPath,
|
||||
tt.args.devcontainers,
|
||||
tt.args.scripts,
|
||||
)
|
||||
|
||||
if diff := cmp.Diff(tt.wantFilteredScripts, gotFilteredScripts, cmpopts.EquateEmpty()); diff != "" {
|
||||
t.Errorf("ExtractAndInitializeDevcontainerScripts() gotFilteredScripts mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// Preprocess the devcontainer scripts to remove scripting part.
|
||||
for i := range gotDevcontainerScripts {
|
||||
gotDevcontainerScripts[i].Script = textGrep("devcontainer up", gotDevcontainerScripts[i].Script)
|
||||
require.NotEmpty(t, gotDevcontainerScripts[i].Script, "devcontainer up script not found")
|
||||
}
|
||||
if diff := cmp.Diff(tt.wantDevcontainerScripts, gotDevcontainerScripts); diff != "" {
|
||||
t.Errorf("ExtractAndInitializeDevcontainerScripts() gotDevcontainerScripts mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// textGrep returns matching lines from multiline string.
|
||||
func textGrep(want, got string) (filtered string) {
|
||||
var lines []string
|
||||
for _, line := range strings.Split(got, "\n") {
|
||||
if strings.Contains(line, want) {
|
||||
lines = append(lines, line)
|
||||
}
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
@@ -6,208 +6,39 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
// DevcontainerConfig is a wrapper around the output from `read-configuration`.
|
||||
// Unfortunately we cannot make use of `dcspec` as the output doesn't appear to
|
||||
// match.
|
||||
type DevcontainerConfig struct {
|
||||
MergedConfiguration DevcontainerMergedConfiguration `json:"mergedConfiguration"`
|
||||
Configuration DevcontainerConfiguration `json:"configuration"`
|
||||
Workspace DevcontainerWorkspace `json:"workspace"`
|
||||
}
|
||||
|
||||
type DevcontainerMergedConfiguration struct {
|
||||
Customizations DevcontainerMergedCustomizations `json:"customizations,omitempty"`
|
||||
Features DevcontainerFeatures `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
type DevcontainerMergedCustomizations struct {
|
||||
Coder []CoderCustomization `json:"coder,omitempty"`
|
||||
}
|
||||
|
||||
type DevcontainerFeatures map[string]any
|
||||
|
||||
// OptionsAsEnvs converts the DevcontainerFeatures into a list of
|
||||
// environment variables that can be used to set feature options.
|
||||
// The format is FEATURE_<FEATURE_NAME>_OPTION_<OPTION_NAME>=<value>.
|
||||
// For example, if the feature is:
|
||||
//
|
||||
// "ghcr.io/coder/devcontainer-features/code-server:1": {
|
||||
// "port": 9090,
|
||||
// }
|
||||
//
|
||||
// It will produce:
|
||||
//
|
||||
// FEATURE_CODE_SERVER_OPTION_PORT=9090
|
||||
//
|
||||
// Note that the feature name is derived from the last part of the key,
|
||||
// so "ghcr.io/coder/devcontainer-features/code-server:1" becomes
|
||||
// "CODE_SERVER". The version part (e.g. ":1") is removed, and dashes in
|
||||
// the feature and option names are replaced with underscores.
|
||||
func (f DevcontainerFeatures) OptionsAsEnvs() []string {
|
||||
var env []string
|
||||
for k, v := range f {
|
||||
vv, ok := v.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// Take the last part of the key as the feature name/path.
|
||||
k = k[strings.LastIndex(k, "/")+1:]
|
||||
// Remove ":" and anything following it.
|
||||
if idx := strings.Index(k, ":"); idx != -1 {
|
||||
k = k[:idx]
|
||||
}
|
||||
k = strings.ReplaceAll(k, "-", "_")
|
||||
for k2, v2 := range vv {
|
||||
k2 = strings.ReplaceAll(k2, "-", "_")
|
||||
env = append(env, fmt.Sprintf("FEATURE_%s_OPTION_%s=%s", strings.ToUpper(k), strings.ToUpper(k2), fmt.Sprintf("%v", v2)))
|
||||
}
|
||||
}
|
||||
slices.Sort(env)
|
||||
return env
|
||||
}
|
||||
|
||||
type DevcontainerConfiguration struct {
|
||||
Customizations DevcontainerCustomizations `json:"customizations,omitempty"`
|
||||
}
|
||||
|
||||
type DevcontainerCustomizations struct {
|
||||
Coder CoderCustomization `json:"coder,omitempty"`
|
||||
}
|
||||
|
||||
type CoderCustomization struct {
|
||||
DisplayApps map[codersdk.DisplayApp]bool `json:"displayApps,omitempty"`
|
||||
Apps []SubAgentApp `json:"apps,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Ignore bool `json:"ignore,omitempty"`
|
||||
AutoStart bool `json:"autoStart,omitempty"`
|
||||
}
|
||||
|
||||
type DevcontainerWorkspace struct {
|
||||
WorkspaceFolder string `json:"workspaceFolder"`
|
||||
}
|
||||
|
||||
// DevcontainerCLI is an interface for the devcontainer CLI.
|
||||
type DevcontainerCLI interface {
|
||||
Up(ctx context.Context, workspaceFolder, configPath string, opts ...DevcontainerCLIUpOptions) (id string, err error)
|
||||
Exec(ctx context.Context, workspaceFolder, configPath string, cmd string, cmdArgs []string, opts ...DevcontainerCLIExecOptions) error
|
||||
ReadConfig(ctx context.Context, workspaceFolder, configPath string, env []string, opts ...DevcontainerCLIReadConfigOptions) (DevcontainerConfig, error)
|
||||
}
|
||||
|
||||
// DevcontainerCLIUpOptions are options for the devcontainer CLI Up
|
||||
// DevcontainerCLIUpOptions are options for the devcontainer CLI up
|
||||
// command.
|
||||
type DevcontainerCLIUpOptions func(*DevcontainerCLIUpConfig)
|
||||
|
||||
type DevcontainerCLIUpConfig struct {
|
||||
Args []string // Additional arguments for the Up command.
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
}
|
||||
type DevcontainerCLIUpOptions func(*devcontainerCLIUpConfig)
|
||||
|
||||
// WithRemoveExistingContainer is an option to remove the existing
|
||||
// container.
|
||||
func WithRemoveExistingContainer() DevcontainerCLIUpOptions {
|
||||
return func(o *DevcontainerCLIUpConfig) {
|
||||
o.Args = append(o.Args, "--remove-existing-container")
|
||||
return func(o *devcontainerCLIUpConfig) {
|
||||
o.removeExistingContainer = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithUpOutput sets additional stdout and stderr writers for logs
|
||||
// during Up operations.
|
||||
func WithUpOutput(stdout, stderr io.Writer) DevcontainerCLIUpOptions {
|
||||
return func(o *DevcontainerCLIUpConfig) {
|
||||
o.Stdout = stdout
|
||||
o.Stderr = stderr
|
||||
type devcontainerCLIUpConfig struct {
|
||||
removeExistingContainer bool
|
||||
}
|
||||
|
||||
func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) devcontainerCLIUpConfig {
|
||||
conf := devcontainerCLIUpConfig{
|
||||
removeExistingContainer: false,
|
||||
}
|
||||
}
|
||||
|
||||
// DevcontainerCLIExecOptions are options for the devcontainer CLI Exec
|
||||
// command.
|
||||
type DevcontainerCLIExecOptions func(*DevcontainerCLIExecConfig)
|
||||
|
||||
type DevcontainerCLIExecConfig struct {
|
||||
Args []string // Additional arguments for the Exec command.
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
}
|
||||
|
||||
// WithExecOutput sets additional stdout and stderr writers for logs
|
||||
// during Exec operations.
|
||||
func WithExecOutput(stdout, stderr io.Writer) DevcontainerCLIExecOptions {
|
||||
return func(o *DevcontainerCLIExecConfig) {
|
||||
o.Stdout = stdout
|
||||
o.Stderr = stderr
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecContainerID sets the container ID to target a specific
|
||||
// container.
|
||||
func WithExecContainerID(id string) DevcontainerCLIExecOptions {
|
||||
return func(o *DevcontainerCLIExecConfig) {
|
||||
o.Args = append(o.Args, "--container-id", id)
|
||||
}
|
||||
}
|
||||
|
||||
// WithRemoteEnv sets environment variables for the Exec command.
|
||||
func WithRemoteEnv(env ...string) DevcontainerCLIExecOptions {
|
||||
return func(o *DevcontainerCLIExecConfig) {
|
||||
for _, e := range env {
|
||||
o.Args = append(o.Args, "--remote-env", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DevcontainerCLIExecOptions are options for the devcontainer CLI ReadConfig
|
||||
// command.
|
||||
type DevcontainerCLIReadConfigOptions func(*devcontainerCLIReadConfigConfig)
|
||||
|
||||
type devcontainerCLIReadConfigConfig struct {
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
}
|
||||
|
||||
// WithReadConfigOutput sets additional stdout and stderr writers for logs
|
||||
// during ReadConfig operations.
|
||||
func WithReadConfigOutput(stdout, stderr io.Writer) DevcontainerCLIReadConfigOptions {
|
||||
return func(o *devcontainerCLIReadConfigConfig) {
|
||||
o.stdout = stdout
|
||||
o.stderr = stderr
|
||||
}
|
||||
}
|
||||
|
||||
func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) DevcontainerCLIUpConfig {
|
||||
conf := DevcontainerCLIUpConfig{Stdout: io.Discard, Stderr: io.Discard}
|
||||
for _, opt := range opts {
|
||||
if opt != nil {
|
||||
opt(&conf)
|
||||
}
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
func applyDevcontainerCLIExecOptions(opts []DevcontainerCLIExecOptions) DevcontainerCLIExecConfig {
|
||||
conf := DevcontainerCLIExecConfig{Stdout: io.Discard, Stderr: io.Discard}
|
||||
for _, opt := range opts {
|
||||
if opt != nil {
|
||||
opt(&conf)
|
||||
}
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
func applyDevcontainerCLIReadConfigOptions(opts []DevcontainerCLIReadConfigOptions) devcontainerCLIReadConfigConfig {
|
||||
conf := devcontainerCLIReadConfigConfig{stdout: io.Discard, stderr: io.Discard}
|
||||
for _, opt := range opts {
|
||||
if opt != nil {
|
||||
opt(&conf)
|
||||
@@ -232,7 +63,7 @@ func NewDevcontainerCLI(logger slog.Logger, execer agentexec.Execer) Devcontaine
|
||||
|
||||
func (d *devcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath string, opts ...DevcontainerCLIUpOptions) (string, error) {
|
||||
conf := applyDevcontainerCLIUpOptions(opts)
|
||||
logger := d.logger.With(slog.F("workspace_folder", workspaceFolder), slog.F("config_path", configPath))
|
||||
logger := d.logger.With(slog.F("workspace_folder", workspaceFolder), slog.F("config_path", configPath), slog.F("recreate", conf.removeExistingContainer))
|
||||
|
||||
args := []string{
|
||||
"up",
|
||||
@@ -242,35 +73,23 @@ func (d *devcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath st
|
||||
if configPath != "" {
|
||||
args = append(args, "--config", configPath)
|
||||
}
|
||||
args = append(args, conf.Args...)
|
||||
if conf.removeExistingContainer {
|
||||
args = append(args, "--remove-existing-container")
|
||||
}
|
||||
cmd := d.execer.CommandContext(ctx, "devcontainer", args...)
|
||||
|
||||
// Capture stdout for parsing and stream logs for both default and provided writers.
|
||||
var stdoutBuf bytes.Buffer
|
||||
cmd.Stdout = io.MultiWriter(
|
||||
&stdoutBuf,
|
||||
&devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stdout", true)),
|
||||
writer: conf.Stdout,
|
||||
},
|
||||
)
|
||||
// Stream stderr logs and provided writer if any.
|
||||
cmd.Stderr = &devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stderr", true)),
|
||||
writer: conf.Stderr,
|
||||
}
|
||||
var stdout bytes.Buffer
|
||||
cmd.Stdout = io.MultiWriter(&stdout, &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))})
|
||||
cmd.Stderr = &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
_, err2 := parseDevcontainerCLILastLine[devcontainerCLIResult](ctx, logger, stdoutBuf.Bytes())
|
||||
if err2 != nil {
|
||||
if _, err2 := parseDevcontainerCLILastLine(ctx, logger, stdout.Bytes()); err2 != nil {
|
||||
err = errors.Join(err, err2)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
result, err := parseDevcontainerCLILastLine[devcontainerCLIResult](ctx, logger, stdoutBuf.Bytes())
|
||||
result, err := parseDevcontainerCLILastLine(ctx, logger, stdout.Bytes())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -278,92 +97,9 @@ func (d *devcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath st
|
||||
return result.ContainerID, nil
|
||||
}
|
||||
|
||||
func (d *devcontainerCLI) Exec(ctx context.Context, workspaceFolder, configPath string, cmd string, cmdArgs []string, opts ...DevcontainerCLIExecOptions) error {
|
||||
conf := applyDevcontainerCLIExecOptions(opts)
|
||||
logger := d.logger.With(slog.F("workspace_folder", workspaceFolder), slog.F("config_path", configPath))
|
||||
|
||||
args := []string{"exec"}
|
||||
// For now, always set workspace folder even if --container-id is provided.
|
||||
// Otherwise the environment of exec will be incomplete, like `pwd` will be
|
||||
// /home/coder instead of /workspaces/coder. The downside is that the local
|
||||
// `devcontainer.json` config will overwrite settings serialized in the
|
||||
// container label.
|
||||
if workspaceFolder != "" {
|
||||
args = append(args, "--workspace-folder", workspaceFolder)
|
||||
}
|
||||
if configPath != "" {
|
||||
args = append(args, "--config", configPath)
|
||||
}
|
||||
args = append(args, conf.Args...)
|
||||
args = append(args, cmd)
|
||||
args = append(args, cmdArgs...)
|
||||
c := d.execer.CommandContext(ctx, "devcontainer", args...)
|
||||
|
||||
c.Stdout = io.MultiWriter(conf.Stdout, &devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stdout", true)),
|
||||
writer: io.Discard,
|
||||
})
|
||||
c.Stderr = io.MultiWriter(conf.Stderr, &devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stderr", true)),
|
||||
writer: io.Discard,
|
||||
})
|
||||
|
||||
if err := c.Run(); err != nil {
|
||||
return xerrors.Errorf("devcontainer exec failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *devcontainerCLI) ReadConfig(ctx context.Context, workspaceFolder, configPath string, env []string, opts ...DevcontainerCLIReadConfigOptions) (DevcontainerConfig, error) {
|
||||
conf := applyDevcontainerCLIReadConfigOptions(opts)
|
||||
logger := d.logger.With(slog.F("workspace_folder", workspaceFolder), slog.F("config_path", configPath))
|
||||
|
||||
args := []string{"read-configuration", "--include-merged-configuration"}
|
||||
if workspaceFolder != "" {
|
||||
args = append(args, "--workspace-folder", workspaceFolder)
|
||||
}
|
||||
if configPath != "" {
|
||||
args = append(args, "--config", configPath)
|
||||
}
|
||||
|
||||
c := d.execer.CommandContext(ctx, "devcontainer", args...)
|
||||
c.Env = append(c.Env, env...)
|
||||
|
||||
var stdoutBuf bytes.Buffer
|
||||
c.Stdout = io.MultiWriter(
|
||||
&stdoutBuf,
|
||||
&devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stdout", true)),
|
||||
writer: conf.stdout,
|
||||
},
|
||||
)
|
||||
c.Stderr = &devcontainerCLILogWriter{
|
||||
ctx: ctx,
|
||||
logger: logger.With(slog.F("stderr", true)),
|
||||
writer: conf.stderr,
|
||||
}
|
||||
|
||||
if err := c.Run(); err != nil {
|
||||
return DevcontainerConfig{}, xerrors.Errorf("devcontainer read-configuration failed: %w", err)
|
||||
}
|
||||
|
||||
config, err := parseDevcontainerCLILastLine[DevcontainerConfig](ctx, logger, stdoutBuf.Bytes())
|
||||
if err != nil {
|
||||
return DevcontainerConfig{}, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// parseDevcontainerCLILastLine parses the last line of the devcontainer CLI output
|
||||
// which is a JSON object.
|
||||
func parseDevcontainerCLILastLine[T any](ctx context.Context, logger slog.Logger, p []byte) (T, error) {
|
||||
var result T
|
||||
|
||||
func parseDevcontainerCLILastLine(ctx context.Context, logger slog.Logger, p []byte) (result devcontainerCLIResult, err error) {
|
||||
s := bufio.NewScanner(bytes.NewReader(p))
|
||||
var lastLine []byte
|
||||
for s.Scan() {
|
||||
@@ -373,19 +109,19 @@ func parseDevcontainerCLILastLine[T any](ctx context.Context, logger slog.Logger
|
||||
}
|
||||
lastLine = b
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
if err = s.Err(); err != nil {
|
||||
return result, err
|
||||
}
|
||||
if len(lastLine) == 0 || lastLine[0] != '{' {
|
||||
logger.Error(ctx, "devcontainer result is not json", slog.F("result", string(lastLine)))
|
||||
return result, xerrors.Errorf("devcontainer result is not json: %q", string(lastLine))
|
||||
}
|
||||
if err := json.Unmarshal(lastLine, &result); err != nil {
|
||||
if err = json.Unmarshal(lastLine, &result); err != nil {
|
||||
logger.Error(ctx, "parse devcontainer result failed", slog.Error(err), slog.F("result", string(lastLine)))
|
||||
return result, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return result, result.Err()
|
||||
}
|
||||
|
||||
// devcontainerCLIResult is the result of the devcontainer CLI command.
|
||||
@@ -404,18 +140,6 @@ type devcontainerCLIResult struct {
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
func (r *devcontainerCLIResult) UnmarshalJSON(data []byte) error {
|
||||
type wrapperResult devcontainerCLIResult
|
||||
|
||||
var wrappedResult wrapperResult
|
||||
if err := json.Unmarshal(data, &wrappedResult); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*r = devcontainerCLIResult(wrappedResult)
|
||||
return r.Err()
|
||||
}
|
||||
|
||||
func (r devcontainerCLIResult) Err() error {
|
||||
if r.Outcome == "success" {
|
||||
return nil
|
||||
@@ -438,7 +162,6 @@ type devcontainerCLIJSONLogLine struct {
|
||||
type devcontainerCLILogWriter struct {
|
||||
ctx context.Context
|
||||
logger slog.Logger
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (l *devcontainerCLILogWriter) Write(p []byte) (n int, err error) {
|
||||
@@ -459,20 +182,8 @@ func (l *devcontainerCLILogWriter) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
if logLine.Level >= 3 {
|
||||
l.logger.Info(l.ctx, "@devcontainer/cli", slog.F("line", string(line)))
|
||||
_, _ = l.writer.Write([]byte(strings.TrimSpace(logLine.Text) + "\n"))
|
||||
continue
|
||||
}
|
||||
// If we've successfully parsed the final log line, it will successfully parse
|
||||
// but will not fill out any of the fields for `logLine`. In this scenario we
|
||||
// assume it is the final log line, unmarshal it as that, and check if the
|
||||
// outcome is a non-empty string.
|
||||
if logLine.Level == 0 {
|
||||
var lastLine devcontainerCLIResult
|
||||
if err := json.Unmarshal(line, &lastLine); err == nil && lastLine.Outcome != "" {
|
||||
_, _ = l.writer.Write(line)
|
||||
_, _ = l.writer.Write([]byte{'\n'})
|
||||
}
|
||||
}
|
||||
l.logger.Debug(l.ctx, "@devcontainer/cli", slog.F("line", string(line)))
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
|
||||
@@ -3,7 +3,6 @@ package agentcontainers_test
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -11,11 +10,9 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -25,7 +22,6 @@ import (
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
@@ -130,291 +126,6 @@ func TestDevcontainerCLI_ArgsAndParsing(t *testing.T) {
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Exec", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
workspaceFolder string
|
||||
configPath string
|
||||
cmd string
|
||||
cmdArgs []string
|
||||
opts []agentcontainers.DevcontainerCLIExecOptions
|
||||
wantArgs string
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
name: "simple command",
|
||||
workspaceFolder: "/test/workspace",
|
||||
configPath: "",
|
||||
cmd: "echo",
|
||||
cmdArgs: []string{"hello"},
|
||||
wantArgs: "exec --workspace-folder /test/workspace echo hello",
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "command with multiple args",
|
||||
workspaceFolder: "/test/workspace",
|
||||
configPath: "/test/config.json",
|
||||
cmd: "ls",
|
||||
cmdArgs: []string{"-la", "/workspace"},
|
||||
wantArgs: "exec --workspace-folder /test/workspace --config /test/config.json ls -la /workspace",
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "empty command args",
|
||||
workspaceFolder: "/test/workspace",
|
||||
configPath: "",
|
||||
cmd: "bash",
|
||||
cmdArgs: nil,
|
||||
wantArgs: "exec --workspace-folder /test/workspace bash",
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "workspace not found",
|
||||
workspaceFolder: "/nonexistent/workspace",
|
||||
configPath: "",
|
||||
cmd: "echo",
|
||||
cmdArgs: []string{"test"},
|
||||
wantArgs: "exec --workspace-folder /nonexistent/workspace echo test",
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
name: "with container ID",
|
||||
workspaceFolder: "/test/workspace",
|
||||
configPath: "",
|
||||
cmd: "echo",
|
||||
cmdArgs: []string{"hello"},
|
||||
opts: []agentcontainers.DevcontainerCLIExecOptions{agentcontainers.WithExecContainerID("test-container-123")},
|
||||
wantArgs: "exec --workspace-folder /test/workspace --container-id test-container-123 echo hello",
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "with container ID and config",
|
||||
workspaceFolder: "/test/workspace",
|
||||
configPath: "/test/config.json",
|
||||
cmd: "bash",
|
||||
cmdArgs: []string{"-c", "ls -la"},
|
||||
opts: []agentcontainers.DevcontainerCLIExecOptions{agentcontainers.WithExecContainerID("my-container")},
|
||||
wantArgs: "exec --workspace-folder /test/workspace --config /test/config.json --container-id my-container bash -c ls -la",
|
||||
wantError: false,
|
||||
},
|
||||
{
|
||||
name: "with container ID and output capture",
|
||||
workspaceFolder: "/test/workspace",
|
||||
configPath: "",
|
||||
cmd: "cat",
|
||||
cmdArgs: []string{"/etc/hostname"},
|
||||
opts: []agentcontainers.DevcontainerCLIExecOptions{
|
||||
agentcontainers.WithExecContainerID("test-container-789"),
|
||||
},
|
||||
wantArgs: "exec --workspace-folder /test/workspace --container-id test-container-789 cat /etc/hostname",
|
||||
wantError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
|
||||
testExecer := &testDevcontainerExecer{
|
||||
testExePath: testExePath,
|
||||
wantArgs: tt.wantArgs,
|
||||
wantError: tt.wantError,
|
||||
logFile: "", // Exec doesn't need log file parsing
|
||||
}
|
||||
|
||||
dccli := agentcontainers.NewDevcontainerCLI(logger, testExecer)
|
||||
err := dccli.Exec(ctx, tt.workspaceFolder, tt.configPath, tt.cmd, tt.cmdArgs, tt.opts...)
|
||||
if tt.wantError {
|
||||
assert.Error(t, err, "want error")
|
||||
} else {
|
||||
assert.NoError(t, err, "want no error")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ReadConfig", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
logFile string
|
||||
workspaceFolder string
|
||||
configPath string
|
||||
opts []agentcontainers.DevcontainerCLIReadConfigOptions
|
||||
wantArgs string
|
||||
wantError bool
|
||||
wantConfig agentcontainers.DevcontainerConfig
|
||||
}{
|
||||
{
|
||||
name: "WithCoderCustomization",
|
||||
logFile: "read-config-with-coder-customization.log",
|
||||
workspaceFolder: "/test/workspace",
|
||||
configPath: "",
|
||||
wantArgs: "read-configuration --include-merged-configuration --workspace-folder /test/workspace",
|
||||
wantError: false,
|
||||
wantConfig: agentcontainers.DevcontainerConfig{
|
||||
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
|
||||
Customizations: agentcontainers.DevcontainerMergedCustomizations{
|
||||
Coder: []agentcontainers.CoderCustomization{
|
||||
{
|
||||
DisplayApps: map[codersdk.DisplayApp]bool{
|
||||
codersdk.DisplayAppVSCodeDesktop: true,
|
||||
codersdk.DisplayAppWebTerminal: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
DisplayApps: map[codersdk.DisplayApp]bool{
|
||||
codersdk.DisplayAppVSCodeInsiders: true,
|
||||
codersdk.DisplayAppWebTerminal: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "WithoutCoderCustomization",
|
||||
logFile: "read-config-without-coder-customization.log",
|
||||
workspaceFolder: "/test/workspace",
|
||||
configPath: "/test/config.json",
|
||||
wantArgs: "read-configuration --include-merged-configuration --workspace-folder /test/workspace --config /test/config.json",
|
||||
wantError: false,
|
||||
wantConfig: agentcontainers.DevcontainerConfig{
|
||||
MergedConfiguration: agentcontainers.DevcontainerMergedConfiguration{
|
||||
Customizations: agentcontainers.DevcontainerMergedCustomizations{
|
||||
Coder: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "FileNotFound",
|
||||
logFile: "read-config-error-not-found.log",
|
||||
workspaceFolder: "/nonexistent/workspace",
|
||||
configPath: "",
|
||||
wantArgs: "read-configuration --include-merged-configuration --workspace-folder /nonexistent/workspace",
|
||||
wantError: true,
|
||||
wantConfig: agentcontainers.DevcontainerConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
|
||||
testExecer := &testDevcontainerExecer{
|
||||
testExePath: testExePath,
|
||||
wantArgs: tt.wantArgs,
|
||||
wantError: tt.wantError,
|
||||
logFile: filepath.Join("testdata", "devcontainercli", "readconfig", tt.logFile),
|
||||
}
|
||||
|
||||
dccli := agentcontainers.NewDevcontainerCLI(logger, testExecer)
|
||||
config, err := dccli.ReadConfig(ctx, tt.workspaceFolder, tt.configPath, []string{}, tt.opts...)
|
||||
if tt.wantError {
|
||||
assert.Error(t, err, "want error")
|
||||
assert.Equal(t, agentcontainers.DevcontainerConfig{}, config, "expected empty config on error")
|
||||
} else {
|
||||
assert.NoError(t, err, "want no error")
|
||||
assert.Equal(t, tt.wantConfig, config, "expected config to match")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestDevcontainerCLI_WithOutput tests that WithUpOutput and WithExecOutput capture CLI
|
||||
// logs to provided writers.
|
||||
func TestDevcontainerCLI_WithOutput(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Prepare test executable and logger.
|
||||
testExePath, err := os.Executable()
|
||||
require.NoError(t, err, "get test executable path")
|
||||
|
||||
t.Run("Up", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows uses CRLF line endings, golden file is LF")
|
||||
}
|
||||
|
||||
// Buffers to capture stdout and stderr.
|
||||
outBuf := &bytes.Buffer{}
|
||||
errBuf := &bytes.Buffer{}
|
||||
|
||||
// Simulate CLI execution with a standard up.log file.
|
||||
wantArgs := "up --log-format json --workspace-folder /test/workspace"
|
||||
testExecer := &testDevcontainerExecer{
|
||||
testExePath: testExePath,
|
||||
wantArgs: wantArgs,
|
||||
wantError: false,
|
||||
logFile: filepath.Join("testdata", "devcontainercli", "parse", "up.log"),
|
||||
}
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
dccli := agentcontainers.NewDevcontainerCLI(logger, testExecer)
|
||||
|
||||
// Call Up with WithUpOutput to capture CLI logs.
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
containerID, err := dccli.Up(ctx, "/test/workspace", "", agentcontainers.WithUpOutput(outBuf, errBuf))
|
||||
require.NoError(t, err, "Up should succeed")
|
||||
require.NotEmpty(t, containerID, "expected non-empty container ID")
|
||||
|
||||
// Read expected log content.
|
||||
expLog, err := os.ReadFile(filepath.Join("testdata", "devcontainercli", "parse", "up.golden"))
|
||||
require.NoError(t, err, "reading expected log file")
|
||||
|
||||
// Verify stdout buffer contains the CLI logs and stderr is empty.
|
||||
assert.Equal(t, string(expLog), outBuf.String(), "stdout buffer should match CLI logs")
|
||||
assert.Empty(t, errBuf.String(), "stderr buffer should be empty on success")
|
||||
})
|
||||
|
||||
t.Run("Exec", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
logFile := filepath.Join(t.TempDir(), "exec.log")
|
||||
f, err := os.Create(logFile)
|
||||
require.NoError(t, err, "create exec log file")
|
||||
_, err = f.WriteString("exec command log\n")
|
||||
require.NoError(t, err, "write to exec log file")
|
||||
err = f.Close()
|
||||
require.NoError(t, err, "close exec log file")
|
||||
|
||||
// Buffers to capture stdout and stderr.
|
||||
outBuf := &bytes.Buffer{}
|
||||
errBuf := &bytes.Buffer{}
|
||||
|
||||
// Simulate CLI execution for exec command with container ID.
|
||||
wantArgs := "exec --workspace-folder /test/workspace --container-id test-container-456 echo hello"
|
||||
testExecer := &testDevcontainerExecer{
|
||||
testExePath: testExePath,
|
||||
wantArgs: wantArgs,
|
||||
wantError: false,
|
||||
logFile: logFile,
|
||||
}
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
|
||||
dccli := agentcontainers.NewDevcontainerCLI(logger, testExecer)
|
||||
|
||||
// Call Exec with WithExecOutput and WithContainerID to capture any command output.
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
err = dccli.Exec(ctx, "/test/workspace", "", "echo", []string{"hello"},
|
||||
agentcontainers.WithExecContainerID("test-container-456"),
|
||||
agentcontainers.WithExecOutput(outBuf, errBuf),
|
||||
)
|
||||
require.NoError(t, err, "Exec should succeed")
|
||||
|
||||
assert.NotEmpty(t, outBuf.String(), "stdout buffer should not be empty for exec with log file")
|
||||
assert.Empty(t, errBuf.String(), "stderr buffer should be empty")
|
||||
})
|
||||
}
|
||||
|
||||
// testDevcontainerExecer implements the agentexec.Execer interface for testing.
|
||||
@@ -493,16 +204,13 @@ func TestDevcontainerHelperProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
logFilePath := os.Getenv("TEST_DEVCONTAINER_LOG_FILE")
|
||||
if logFilePath != "" {
|
||||
// Read and output log file for commands that need it (like "up")
|
||||
output, err := os.ReadFile(logFilePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Reading log file %s failed: %v\n", logFilePath, err)
|
||||
os.Exit(2)
|
||||
}
|
||||
_, _ = io.Copy(os.Stdout, bytes.NewReader(output))
|
||||
output, err := os.ReadFile(logFilePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Reading log file %s failed: %v\n", logFilePath, err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
_, _ = io.Copy(os.Stdout, bytes.NewReader(output))
|
||||
if os.Getenv("TEST_DEVCONTAINER_WANT_ERROR") == "true" {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -593,7 +301,7 @@ func setupDevcontainerWorkspace(t *testing.T, workspaceFolder string) string {
|
||||
"containerEnv": {
|
||||
"TEST_CONTAINER": "true"
|
||||
},
|
||||
"runArgs": ["--label=com.coder.test=devcontainercli", "--label=` + agentcontainers.DevcontainerIsTestRunLabel + `=true"]
|
||||
"runArgs": ["--label", "com.coder.test=devcontainercli"]
|
||||
}`
|
||||
err = os.WriteFile(configPath, []byte(content), 0o600)
|
||||
require.NoError(t, err, "create devcontainer.json file")
|
||||
@@ -644,107 +352,3 @@ func removeDevcontainerByID(t *testing.T, pool *dockertest.Pool, id string) {
|
||||
assert.NoError(t, err, "remove container failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDevcontainerFeatures_OptionsAsEnvs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
realConfigJSON := `{
|
||||
"mergedConfiguration": {
|
||||
"features": {
|
||||
"./code-server": {
|
||||
"port": 9090
|
||||
},
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
||||
"moby": "false"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
var realConfig agentcontainers.DevcontainerConfig
|
||||
err := json.Unmarshal([]byte(realConfigJSON), &realConfig)
|
||||
require.NoError(t, err, "unmarshal JSON payload")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
features agentcontainers.DevcontainerFeatures
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "code-server feature",
|
||||
features: agentcontainers.DevcontainerFeatures{
|
||||
"./code-server": map[string]any{
|
||||
"port": 9090,
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"FEATURE_CODE_SERVER_OPTION_PORT=9090",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "docker-in-docker feature",
|
||||
features: agentcontainers.DevcontainerFeatures{
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": map[string]any{
|
||||
"moby": "false",
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"FEATURE_DOCKER_IN_DOCKER_OPTION_MOBY=false",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple features with multiple options",
|
||||
features: agentcontainers.DevcontainerFeatures{
|
||||
"./code-server": map[string]any{
|
||||
"port": 9090,
|
||||
"password": "secret",
|
||||
},
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": map[string]any{
|
||||
"moby": "false",
|
||||
"docker-dash-compose-version": "v2",
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"FEATURE_CODE_SERVER_OPTION_PASSWORD=secret",
|
||||
"FEATURE_CODE_SERVER_OPTION_PORT=9090",
|
||||
"FEATURE_DOCKER_IN_DOCKER_OPTION_DOCKER_DASH_COMPOSE_VERSION=v2",
|
||||
"FEATURE_DOCKER_IN_DOCKER_OPTION_MOBY=false",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "feature with non-map value (should be ignored)",
|
||||
features: agentcontainers.DevcontainerFeatures{
|
||||
"./code-server": map[string]any{
|
||||
"port": 9090,
|
||||
},
|
||||
"./invalid-feature": "not-a-map",
|
||||
},
|
||||
want: []string{
|
||||
"FEATURE_CODE_SERVER_OPTION_PORT=9090",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "real config example",
|
||||
features: realConfig.MergedConfiguration.Features,
|
||||
want: []string{
|
||||
"FEATURE_CODE_SERVER_OPTION_PORT=9090",
|
||||
"FEATURE_DOCKER_IN_DOCKER_OPTION_MOBY=false",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty features",
|
||||
features: agentcontainers.DevcontainerFeatures{},
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := tt.features.OptionsAsEnvs()
|
||||
if diff := cmp.Diff(tt.want, got); diff != "" {
|
||||
require.Failf(t, "OptionsAsEnvs() mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
package agentcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/agent/agentexec"
|
||||
"github.com/coder/coder/v2/agent/usershell"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
)
|
||||
|
||||
// CommandEnv is a function that returns the shell, working directory,
|
||||
// and environment variables to use when executing a command. It takes
|
||||
// an EnvInfoer and a pre-existing environment slice as arguments.
|
||||
// This signature matches agentssh.Server.CommandEnv.
|
||||
type CommandEnv func(ei usershell.EnvInfoer, addEnv []string) (shell, dir string, env []string, err error)
|
||||
|
||||
// commandEnvExecer is an agentexec.Execer that uses a CommandEnv to
|
||||
// determine the shell, working directory, and environment variables
|
||||
// for commands. It wraps another agentexec.Execer to provide the
|
||||
// necessary context.
|
||||
type commandEnvExecer struct {
|
||||
logger slog.Logger
|
||||
commandEnv CommandEnv
|
||||
execer agentexec.Execer
|
||||
}
|
||||
|
||||
func newCommandEnvExecer(
|
||||
logger slog.Logger,
|
||||
commandEnv CommandEnv,
|
||||
execer agentexec.Execer,
|
||||
) *commandEnvExecer {
|
||||
return &commandEnvExecer{
|
||||
logger: logger,
|
||||
commandEnv: commandEnv,
|
||||
execer: execer,
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure commandEnvExecer implements agentexec.Execer.
|
||||
var _ agentexec.Execer = (*commandEnvExecer)(nil)
|
||||
|
||||
func (e *commandEnvExecer) prepare(ctx context.Context, inName string, inArgs ...string) (name string, args []string, dir string, env []string) {
|
||||
shell, dir, env, err := e.commandEnv(nil, nil)
|
||||
if err != nil {
|
||||
e.logger.Error(ctx, "get command environment failed", slog.Error(err))
|
||||
return inName, inArgs, "", nil
|
||||
}
|
||||
|
||||
caller := "-c"
|
||||
if runtime.GOOS == "windows" {
|
||||
caller = "/c"
|
||||
}
|
||||
name = shell
|
||||
for _, arg := range append([]string{inName}, inArgs...) {
|
||||
args = append(args, fmt.Sprintf("%q", arg))
|
||||
}
|
||||
args = []string{caller, strings.Join(args, " ")}
|
||||
return name, args, dir, env
|
||||
}
|
||||
|
||||
func (e *commandEnvExecer) CommandContext(ctx context.Context, cmd string, args ...string) *exec.Cmd {
|
||||
name, args, dir, env := e.prepare(ctx, cmd, args...)
|
||||
c := e.execer.CommandContext(ctx, name, args...)
|
||||
c.Dir = dir
|
||||
c.Env = env
|
||||
return c
|
||||
}
|
||||
|
||||
func (e *commandEnvExecer) PTYCommandContext(ctx context.Context, cmd string, args ...string) *pty.Cmd {
|
||||
name, args, dir, env := e.prepare(ctx, cmd, args...)
|
||||
c := e.execer.PTYCommandContext(ctx, name, args...)
|
||||
c.Dir = dir
|
||||
c.Env = env
|
||||
return c
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package ignore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing/format/config"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
)
|
||||
|
||||
const (
|
||||
gitconfigFile = ".gitconfig"
|
||||
gitignoreFile = ".gitignore"
|
||||
gitInfoExcludeFile = ".git/info/exclude"
|
||||
)
|
||||
|
||||
func FilePathToParts(path string) []string {
|
||||
components := []string{}
|
||||
|
||||
if path == "" {
|
||||
return components
|
||||
}
|
||||
|
||||
for segment := range strings.SplitSeq(filepath.Clean(path), string(filepath.Separator)) {
|
||||
if segment != "" {
|
||||
components = append(components, segment)
|
||||
}
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func readIgnoreFile(fileSystem afero.Fs, path, ignore string) ([]gitignore.Pattern, error) {
|
||||
var ps []gitignore.Pattern
|
||||
|
||||
data, err := afero.ReadFile(fileSystem, filepath.Join(path, ignore))
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for s := range strings.SplitSeq(string(data), "\n") {
|
||||
if !strings.HasPrefix(s, "#") && len(strings.TrimSpace(s)) > 0 {
|
||||
ps = append(ps, gitignore.ParsePattern(s, FilePathToParts(path)))
|
||||
}
|
||||
}
|
||||
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func ReadPatterns(ctx context.Context, logger slog.Logger, fileSystem afero.Fs, path string) ([]gitignore.Pattern, error) {
|
||||
var ps []gitignore.Pattern
|
||||
|
||||
subPs, err := readIgnoreFile(fileSystem, path, gitInfoExcludeFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ps = append(ps, subPs...)
|
||||
|
||||
if err := afero.Walk(fileSystem, path, func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
logger.Error(ctx, "encountered error while walking for git ignore files",
|
||||
slog.F("path", path),
|
||||
slog.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
subPs, err := readIgnoreFile(fileSystem, path, gitignoreFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ps = append(ps, subPs...)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func loadPatterns(fileSystem afero.Fs, path string) ([]gitignore.Pattern, error) {
|
||||
data, err := afero.ReadFile(fileSystem, path)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
decoder := config.NewDecoder(bytes.NewBuffer(data))
|
||||
|
||||
conf := config.New()
|
||||
if err := decoder.Decode(conf); err != nil {
|
||||
return nil, xerrors.Errorf("decode config: %w", err)
|
||||
}
|
||||
|
||||
excludes := conf.Section("core").Options.Get("excludesfile")
|
||||
if excludes == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return readIgnoreFile(fileSystem, "", excludes)
|
||||
}
|
||||
|
||||
func LoadGlobalPatterns(fileSystem afero.Fs) ([]gitignore.Pattern, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return loadPatterns(fileSystem, filepath.Join(home, gitconfigFile))
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package ignore_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentcontainers/ignore"
|
||||
)
|
||||
|
||||
func TestFilePathToParts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
path string
|
||||
expected []string
|
||||
}{
|
||||
{"", []string{}},
|
||||
{"/", []string{}},
|
||||
{"foo", []string{"foo"}},
|
||||
{"/foo", []string{"foo"}},
|
||||
{"./foo/bar", []string{"foo", "bar"}},
|
||||
{"../foo/bar", []string{"..", "foo", "bar"}},
|
||||
{"foo/bar/baz", []string{"foo", "bar", "baz"}},
|
||||
{"/foo/bar/baz", []string{"foo", "bar", "baz"}},
|
||||
{"foo/../bar", []string{"bar"}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("`%s`", tt.path), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
parts := ignore.FilePathToParts(tt.path)
|
||||
require.Equal(t, tt.expected, parts)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,294 +0,0 @@
|
||||
package agentcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
// SubAgent represents an agent running in a dev container.
|
||||
type SubAgent struct {
|
||||
ID uuid.UUID
|
||||
Name string
|
||||
AuthToken uuid.UUID
|
||||
Directory string
|
||||
Architecture string
|
||||
OperatingSystem string
|
||||
Apps []SubAgentApp
|
||||
DisplayApps []codersdk.DisplayApp
|
||||
}
|
||||
|
||||
// CloneConfig makes a copy of SubAgent without ID and AuthToken. The
|
||||
// name is inherited from the devcontainer.
|
||||
func (s SubAgent) CloneConfig(dc codersdk.WorkspaceAgentDevcontainer) SubAgent {
|
||||
return SubAgent{
|
||||
Name: dc.Name,
|
||||
Directory: s.Directory,
|
||||
Architecture: s.Architecture,
|
||||
OperatingSystem: s.OperatingSystem,
|
||||
DisplayApps: slices.Clone(s.DisplayApps),
|
||||
Apps: slices.Clone(s.Apps),
|
||||
}
|
||||
}
|
||||
|
||||
func (s SubAgent) EqualConfig(other SubAgent) bool {
|
||||
return s.Name == other.Name &&
|
||||
s.Directory == other.Directory &&
|
||||
s.Architecture == other.Architecture &&
|
||||
s.OperatingSystem == other.OperatingSystem &&
|
||||
slices.Equal(s.DisplayApps, other.DisplayApps) &&
|
||||
slices.Equal(s.Apps, other.Apps)
|
||||
}
|
||||
|
||||
type SubAgentApp struct {
|
||||
Slug string `json:"slug"`
|
||||
Command string `json:"command"`
|
||||
DisplayName string `json:"displayName"`
|
||||
External bool `json:"external"`
|
||||
Group string `json:"group"`
|
||||
HealthCheck SubAgentHealthCheck `json:"healthCheck"`
|
||||
Hidden bool `json:"hidden"`
|
||||
Icon string `json:"icon"`
|
||||
OpenIn codersdk.WorkspaceAppOpenIn `json:"openIn"`
|
||||
Order int32 `json:"order"`
|
||||
Share codersdk.WorkspaceAppSharingLevel `json:"share"`
|
||||
Subdomain bool `json:"subdomain"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
func (app SubAgentApp) ToProtoApp() (*agentproto.CreateSubAgentRequest_App, error) {
|
||||
proto := agentproto.CreateSubAgentRequest_App{
|
||||
Slug: app.Slug,
|
||||
External: &app.External,
|
||||
Hidden: &app.Hidden,
|
||||
Order: &app.Order,
|
||||
Subdomain: &app.Subdomain,
|
||||
}
|
||||
|
||||
if app.Command != "" {
|
||||
proto.Command = &app.Command
|
||||
}
|
||||
if app.DisplayName != "" {
|
||||
proto.DisplayName = &app.DisplayName
|
||||
}
|
||||
if app.Group != "" {
|
||||
proto.Group = &app.Group
|
||||
}
|
||||
if app.Icon != "" {
|
||||
proto.Icon = &app.Icon
|
||||
}
|
||||
if app.URL != "" {
|
||||
proto.Url = &app.URL
|
||||
}
|
||||
|
||||
if app.HealthCheck.URL != "" {
|
||||
proto.Healthcheck = &agentproto.CreateSubAgentRequest_App_Healthcheck{
|
||||
Interval: app.HealthCheck.Interval,
|
||||
Threshold: app.HealthCheck.Threshold,
|
||||
Url: app.HealthCheck.URL,
|
||||
}
|
||||
}
|
||||
|
||||
if app.OpenIn != "" {
|
||||
switch app.OpenIn {
|
||||
case codersdk.WorkspaceAppOpenInSlimWindow:
|
||||
proto.OpenIn = agentproto.CreateSubAgentRequest_App_SLIM_WINDOW.Enum()
|
||||
case codersdk.WorkspaceAppOpenInTab:
|
||||
proto.OpenIn = agentproto.CreateSubAgentRequest_App_TAB.Enum()
|
||||
default:
|
||||
return nil, xerrors.Errorf("unexpected codersdk.WorkspaceAppOpenIn: %#v", app.OpenIn)
|
||||
}
|
||||
}
|
||||
|
||||
if app.Share != "" {
|
||||
switch app.Share {
|
||||
case codersdk.WorkspaceAppSharingLevelAuthenticated:
|
||||
proto.Share = agentproto.CreateSubAgentRequest_App_AUTHENTICATED.Enum()
|
||||
case codersdk.WorkspaceAppSharingLevelOwner:
|
||||
proto.Share = agentproto.CreateSubAgentRequest_App_OWNER.Enum()
|
||||
case codersdk.WorkspaceAppSharingLevelPublic:
|
||||
proto.Share = agentproto.CreateSubAgentRequest_App_PUBLIC.Enum()
|
||||
case codersdk.WorkspaceAppSharingLevelOrganization:
|
||||
proto.Share = agentproto.CreateSubAgentRequest_App_ORGANIZATION.Enum()
|
||||
default:
|
||||
return nil, xerrors.Errorf("unexpected codersdk.WorkspaceAppSharingLevel: %#v", app.Share)
|
||||
}
|
||||
}
|
||||
|
||||
return &proto, nil
|
||||
}
|
||||
|
||||
type SubAgentHealthCheck struct {
|
||||
Interval int32 `json:"interval"`
|
||||
Threshold int32 `json:"threshold"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// SubAgentClient is an interface for managing sub agents and allows
|
||||
// changing the implementation without having to deal with the
|
||||
// agentproto package directly.
|
||||
type SubAgentClient interface {
|
||||
// List returns a list of all agents.
|
||||
List(ctx context.Context) ([]SubAgent, error)
|
||||
// Create adds a new agent.
|
||||
Create(ctx context.Context, agent SubAgent) (SubAgent, error)
|
||||
// Delete removes an agent by its ID.
|
||||
Delete(ctx context.Context, id uuid.UUID) error
|
||||
}
|
||||
|
||||
// NewSubAgentClient returns a SubAgentClient that uses the provided
|
||||
// agent API client.
|
||||
type subAgentAPIClient struct {
|
||||
logger slog.Logger
|
||||
api agentproto.DRPCAgentClient26
|
||||
}
|
||||
|
||||
var _ SubAgentClient = (*subAgentAPIClient)(nil)
|
||||
|
||||
func NewSubAgentClientFromAPI(logger slog.Logger, agentAPI agentproto.DRPCAgentClient26) SubAgentClient {
|
||||
if agentAPI == nil {
|
||||
panic("developer error: agentAPI cannot be nil")
|
||||
}
|
||||
return &subAgentAPIClient{
|
||||
logger: logger.Named("subagentclient"),
|
||||
api: agentAPI,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *subAgentAPIClient) List(ctx context.Context) ([]SubAgent, error) {
|
||||
a.logger.Debug(ctx, "listing sub agents")
|
||||
resp, err := a.api.ListSubAgents(ctx, &agentproto.ListSubAgentsRequest{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agents := make([]SubAgent, len(resp.Agents))
|
||||
for i, agent := range resp.Agents {
|
||||
id, err := uuid.FromBytes(agent.GetId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authToken, err := uuid.FromBytes(agent.GetAuthToken())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agents[i] = SubAgent{
|
||||
ID: id,
|
||||
Name: agent.GetName(),
|
||||
AuthToken: authToken,
|
||||
}
|
||||
}
|
||||
return agents, nil
|
||||
}
|
||||
|
||||
func (a *subAgentAPIClient) Create(ctx context.Context, agent SubAgent) (_ SubAgent, err error) {
|
||||
a.logger.Debug(ctx, "creating sub agent", slog.F("name", agent.Name), slog.F("directory", agent.Directory))
|
||||
|
||||
displayApps := make([]agentproto.CreateSubAgentRequest_DisplayApp, 0, len(agent.DisplayApps))
|
||||
for _, displayApp := range agent.DisplayApps {
|
||||
var app agentproto.CreateSubAgentRequest_DisplayApp
|
||||
switch displayApp {
|
||||
case codersdk.DisplayAppPortForward:
|
||||
app = agentproto.CreateSubAgentRequest_PORT_FORWARDING_HELPER
|
||||
case codersdk.DisplayAppSSH:
|
||||
app = agentproto.CreateSubAgentRequest_SSH_HELPER
|
||||
case codersdk.DisplayAppVSCodeDesktop:
|
||||
app = agentproto.CreateSubAgentRequest_VSCODE
|
||||
case codersdk.DisplayAppVSCodeInsiders:
|
||||
app = agentproto.CreateSubAgentRequest_VSCODE_INSIDERS
|
||||
case codersdk.DisplayAppWebTerminal:
|
||||
app = agentproto.CreateSubAgentRequest_WEB_TERMINAL
|
||||
default:
|
||||
return SubAgent{}, xerrors.Errorf("unexpected codersdk.DisplayApp: %#v", displayApp)
|
||||
}
|
||||
|
||||
displayApps = append(displayApps, app)
|
||||
}
|
||||
|
||||
apps := make([]*agentproto.CreateSubAgentRequest_App, 0, len(agent.Apps))
|
||||
for _, app := range agent.Apps {
|
||||
protoApp, err := app.ToProtoApp()
|
||||
if err != nil {
|
||||
return SubAgent{}, xerrors.Errorf("convert app: %w", err)
|
||||
}
|
||||
|
||||
apps = append(apps, protoApp)
|
||||
}
|
||||
|
||||
resp, err := a.api.CreateSubAgent(ctx, &agentproto.CreateSubAgentRequest{
|
||||
Name: agent.Name,
|
||||
Directory: agent.Directory,
|
||||
Architecture: agent.Architecture,
|
||||
OperatingSystem: agent.OperatingSystem,
|
||||
DisplayApps: displayApps,
|
||||
Apps: apps,
|
||||
})
|
||||
if err != nil {
|
||||
return SubAgent{}, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// Best effort.
|
||||
_, _ = a.api.DeleteSubAgent(ctx, &agentproto.DeleteSubAgentRequest{
|
||||
Id: resp.GetAgent().GetId(),
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
agent.Name = resp.GetAgent().GetName()
|
||||
agent.ID, err = uuid.FromBytes(resp.GetAgent().GetId())
|
||||
if err != nil {
|
||||
return SubAgent{}, err
|
||||
}
|
||||
agent.AuthToken, err = uuid.FromBytes(resp.GetAgent().GetAuthToken())
|
||||
if err != nil {
|
||||
return SubAgent{}, err
|
||||
}
|
||||
|
||||
for _, appError := range resp.GetAppCreationErrors() {
|
||||
app := apps[appError.GetIndex()]
|
||||
|
||||
a.logger.Warn(ctx, "unable to create app",
|
||||
slog.F("agent_name", agent.Name),
|
||||
slog.F("agent_id", agent.ID),
|
||||
slog.F("directory", agent.Directory),
|
||||
slog.F("app_slug", app.Slug),
|
||||
slog.F("field", appError.GetField()),
|
||||
slog.F("error", appError.GetError()),
|
||||
)
|
||||
}
|
||||
|
||||
return agent, nil
|
||||
}
|
||||
|
||||
func (a *subAgentAPIClient) Delete(ctx context.Context, id uuid.UUID) error {
|
||||
a.logger.Debug(ctx, "deleting sub agent", slog.F("id", id.String()))
|
||||
_, err := a.api.DeleteSubAgent(ctx, &agentproto.DeleteSubAgentRequest{
|
||||
Id: id[:],
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// noopSubAgentClient is a SubAgentClient that does nothing.
|
||||
type noopSubAgentClient struct{}
|
||||
|
||||
var _ SubAgentClient = noopSubAgentClient{}
|
||||
|
||||
func (noopSubAgentClient) List(_ context.Context) ([]SubAgent, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (noopSubAgentClient) Create(_ context.Context, _ SubAgent) (SubAgent, error) {
|
||||
return SubAgent{}, xerrors.New("noopSubAgentClient does not support creating sub agents")
|
||||
}
|
||||
|
||||
func (noopSubAgentClient) Delete(_ context.Context, _ uuid.UUID) error {
|
||||
return xerrors.New("noopSubAgentClient does not support deleting sub agents")
|
||||
}
|
||||
@@ -1,308 +0,0 @@
|
||||
package agentcontainers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentcontainers"
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestSubAgentClient_CreateWithDisplayApps(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("CreateWithDisplayApps", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
displayApps []codersdk.DisplayApp
|
||||
expectedApps []agentproto.CreateSubAgentRequest_DisplayApp
|
||||
}{
|
||||
{
|
||||
name: "single display app",
|
||||
displayApps: []codersdk.DisplayApp{codersdk.DisplayAppVSCodeDesktop},
|
||||
expectedApps: []agentproto.CreateSubAgentRequest_DisplayApp{
|
||||
agentproto.CreateSubAgentRequest_VSCODE,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple display apps",
|
||||
displayApps: []codersdk.DisplayApp{
|
||||
codersdk.DisplayAppVSCodeDesktop,
|
||||
codersdk.DisplayAppSSH,
|
||||
codersdk.DisplayAppPortForward,
|
||||
},
|
||||
expectedApps: []agentproto.CreateSubAgentRequest_DisplayApp{
|
||||
agentproto.CreateSubAgentRequest_VSCODE,
|
||||
agentproto.CreateSubAgentRequest_SSH_HELPER,
|
||||
agentproto.CreateSubAgentRequest_PORT_FORWARDING_HELPER,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all display apps",
|
||||
displayApps: []codersdk.DisplayApp{
|
||||
codersdk.DisplayAppPortForward,
|
||||
codersdk.DisplayAppSSH,
|
||||
codersdk.DisplayAppVSCodeDesktop,
|
||||
codersdk.DisplayAppVSCodeInsiders,
|
||||
codersdk.DisplayAppWebTerminal,
|
||||
},
|
||||
expectedApps: []agentproto.CreateSubAgentRequest_DisplayApp{
|
||||
agentproto.CreateSubAgentRequest_PORT_FORWARDING_HELPER,
|
||||
agentproto.CreateSubAgentRequest_SSH_HELPER,
|
||||
agentproto.CreateSubAgentRequest_VSCODE,
|
||||
agentproto.CreateSubAgentRequest_VSCODE_INSIDERS,
|
||||
agentproto.CreateSubAgentRequest_WEB_TERMINAL,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no display apps",
|
||||
displayApps: []codersdk.DisplayApp{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
logger := testutil.Logger(t)
|
||||
statsCh := make(chan *agentproto.Stats)
|
||||
|
||||
agentAPI := agenttest.NewClient(t, logger, uuid.New(), agentsdk.Manifest{}, statsCh, tailnet.NewCoordinator(logger))
|
||||
|
||||
agentClient, _, err := agentAPI.ConnectRPC26(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
subAgentClient := agentcontainers.NewSubAgentClientFromAPI(logger, agentClient)
|
||||
|
||||
// When: We create a sub agent with display apps.
|
||||
subAgent, err := subAgentClient.Create(ctx, agentcontainers.SubAgent{
|
||||
Name: "sub-agent-" + tt.name,
|
||||
Directory: "/workspaces/coder",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
DisplayApps: tt.displayApps,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
displayApps, err := agentAPI.GetSubAgentDisplayApps(subAgent.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect the apps to be created.
|
||||
require.Equal(t, tt.expectedApps, displayApps)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("CreateWithApps", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
apps []agentcontainers.SubAgentApp
|
||||
expectedApps []*agentproto.CreateSubAgentRequest_App
|
||||
}{
|
||||
{
|
||||
name: "SlugOnly",
|
||||
apps: []agentcontainers.SubAgentApp{
|
||||
{
|
||||
Slug: "code-server",
|
||||
},
|
||||
},
|
||||
expectedApps: []*agentproto.CreateSubAgentRequest_App{
|
||||
{
|
||||
Slug: "code-server",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AllFields",
|
||||
apps: []agentcontainers.SubAgentApp{
|
||||
{
|
||||
Slug: "jupyter",
|
||||
Command: "jupyter lab --port=8888",
|
||||
DisplayName: "Jupyter Lab",
|
||||
External: false,
|
||||
Group: "Development",
|
||||
HealthCheck: agentcontainers.SubAgentHealthCheck{
|
||||
Interval: 30,
|
||||
Threshold: 3,
|
||||
URL: "http://localhost:8888/api",
|
||||
},
|
||||
Hidden: false,
|
||||
Icon: "/icon/jupyter.svg",
|
||||
OpenIn: codersdk.WorkspaceAppOpenInTab,
|
||||
Order: int32(1),
|
||||
Share: codersdk.WorkspaceAppSharingLevelAuthenticated,
|
||||
Subdomain: true,
|
||||
URL: "http://localhost:8888",
|
||||
},
|
||||
},
|
||||
expectedApps: []*agentproto.CreateSubAgentRequest_App{
|
||||
{
|
||||
Slug: "jupyter",
|
||||
Command: ptr.Ref("jupyter lab --port=8888"),
|
||||
DisplayName: ptr.Ref("Jupyter Lab"),
|
||||
External: ptr.Ref(false),
|
||||
Group: ptr.Ref("Development"),
|
||||
Healthcheck: &agentproto.CreateSubAgentRequest_App_Healthcheck{
|
||||
Interval: 30,
|
||||
Threshold: 3,
|
||||
Url: "http://localhost:8888/api",
|
||||
},
|
||||
Hidden: ptr.Ref(false),
|
||||
Icon: ptr.Ref("/icon/jupyter.svg"),
|
||||
OpenIn: agentproto.CreateSubAgentRequest_App_TAB.Enum(),
|
||||
Order: ptr.Ref(int32(1)),
|
||||
Share: agentproto.CreateSubAgentRequest_App_AUTHENTICATED.Enum(),
|
||||
Subdomain: ptr.Ref(true),
|
||||
Url: ptr.Ref("http://localhost:8888"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AllSharingLevels",
|
||||
apps: []agentcontainers.SubAgentApp{
|
||||
{
|
||||
Slug: "owner-app",
|
||||
Share: codersdk.WorkspaceAppSharingLevelOwner,
|
||||
},
|
||||
{
|
||||
Slug: "authenticated-app",
|
||||
Share: codersdk.WorkspaceAppSharingLevelAuthenticated,
|
||||
},
|
||||
{
|
||||
Slug: "public-app",
|
||||
Share: codersdk.WorkspaceAppSharingLevelPublic,
|
||||
},
|
||||
{
|
||||
Slug: "organization-app",
|
||||
Share: codersdk.WorkspaceAppSharingLevelOrganization,
|
||||
},
|
||||
},
|
||||
expectedApps: []*agentproto.CreateSubAgentRequest_App{
|
||||
{
|
||||
Slug: "owner-app",
|
||||
Share: agentproto.CreateSubAgentRequest_App_OWNER.Enum(),
|
||||
},
|
||||
{
|
||||
Slug: "authenticated-app",
|
||||
Share: agentproto.CreateSubAgentRequest_App_AUTHENTICATED.Enum(),
|
||||
},
|
||||
{
|
||||
Slug: "public-app",
|
||||
Share: agentproto.CreateSubAgentRequest_App_PUBLIC.Enum(),
|
||||
},
|
||||
{
|
||||
Slug: "organization-app",
|
||||
Share: agentproto.CreateSubAgentRequest_App_ORGANIZATION.Enum(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "WithHealthCheck",
|
||||
apps: []agentcontainers.SubAgentApp{
|
||||
{
|
||||
Slug: "health-app",
|
||||
HealthCheck: agentcontainers.SubAgentHealthCheck{
|
||||
Interval: 60,
|
||||
Threshold: 5,
|
||||
URL: "http://localhost:3000/health",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedApps: []*agentproto.CreateSubAgentRequest_App{
|
||||
{
|
||||
Slug: "health-app",
|
||||
Healthcheck: &agentproto.CreateSubAgentRequest_App_Healthcheck{
|
||||
Interval: 60,
|
||||
Threshold: 5,
|
||||
Url: "http://localhost:3000/health",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
logger := testutil.Logger(t)
|
||||
statsCh := make(chan *agentproto.Stats)
|
||||
|
||||
agentAPI := agenttest.NewClient(t, logger, uuid.New(), agentsdk.Manifest{}, statsCh, tailnet.NewCoordinator(logger))
|
||||
|
||||
agentClient, _, err := agentAPI.ConnectRPC26(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
subAgentClient := agentcontainers.NewSubAgentClientFromAPI(logger, agentClient)
|
||||
|
||||
// When: We create a sub agent with display apps.
|
||||
subAgent, err := subAgentClient.Create(ctx, agentcontainers.SubAgent{
|
||||
Name: "sub-agent-" + tt.name,
|
||||
Directory: "/workspaces/coder",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
Apps: tt.apps,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
apps, err := agentAPI.GetSubAgentApps(subAgent.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect the apps to be created.
|
||||
require.Len(t, apps, len(tt.expectedApps))
|
||||
for i, expectedApp := range tt.expectedApps {
|
||||
actualApp := apps[i]
|
||||
|
||||
assert.Equal(t, expectedApp.Slug, actualApp.Slug)
|
||||
assert.Equal(t, expectedApp.Command, actualApp.Command)
|
||||
assert.Equal(t, expectedApp.DisplayName, actualApp.DisplayName)
|
||||
assert.Equal(t, ptr.NilToEmpty(expectedApp.External), ptr.NilToEmpty(actualApp.External))
|
||||
assert.Equal(t, expectedApp.Group, actualApp.Group)
|
||||
assert.Equal(t, ptr.NilToEmpty(expectedApp.Hidden), ptr.NilToEmpty(actualApp.Hidden))
|
||||
assert.Equal(t, expectedApp.Icon, actualApp.Icon)
|
||||
assert.Equal(t, ptr.NilToEmpty(expectedApp.Order), ptr.NilToEmpty(actualApp.Order))
|
||||
assert.Equal(t, ptr.NilToEmpty(expectedApp.Subdomain), ptr.NilToEmpty(actualApp.Subdomain))
|
||||
assert.Equal(t, expectedApp.Url, actualApp.Url)
|
||||
|
||||
if expectedApp.OpenIn != nil {
|
||||
require.NotNil(t, actualApp.OpenIn)
|
||||
assert.Equal(t, *expectedApp.OpenIn, *actualApp.OpenIn)
|
||||
} else {
|
||||
assert.Equal(t, expectedApp.OpenIn, actualApp.OpenIn)
|
||||
}
|
||||
|
||||
if expectedApp.Share != nil {
|
||||
require.NotNil(t, actualApp.Share)
|
||||
assert.Equal(t, *expectedApp.Share, *actualApp.Share)
|
||||
} else {
|
||||
assert.Equal(t, expectedApp.Share, actualApp.Share)
|
||||
}
|
||||
|
||||
if expectedApp.Healthcheck != nil {
|
||||
require.NotNil(t, expectedApp.Healthcheck)
|
||||
assert.Equal(t, expectedApp.Healthcheck.Interval, actualApp.Healthcheck.Interval)
|
||||
assert.Equal(t, expectedApp.Healthcheck.Threshold, actualApp.Healthcheck.Threshold)
|
||||
assert.Equal(t, expectedApp.Healthcheck.Url, actualApp.Healthcheck.Url)
|
||||
} else {
|
||||
assert.Equal(t, expectedApp.Healthcheck, actualApp.Healthcheck)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
@devcontainers/cli 0.75.0. Node.js v23.9.0. darwin 24.4.0 arm64.
|
||||
Resolving Feature dependencies for 'ghcr.io/devcontainers/features/docker-in-docker:2'...
|
||||
Soft-dependency 'ghcr.io/devcontainers/features/common-utils' is not required. Removing from installation order...
|
||||
Files to omit: ''
|
||||
Run: docker buildx build --load --build-context dev_containers_feature_content_source=/var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193 --build-arg _DEV_CONTAINERS_BASE_IMAGE=mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye --build-arg _DEV_CONTAINERS_IMAGE_USER=root --build-arg _DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp --target dev_containers_target_stage -f /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193/Dockerfile.extended -t vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/empty-folder
|
||||
#0 building with "orbstack" instance using docker driver
|
||||
|
||||
#1 [internal] load build definition from Dockerfile.extended
|
||||
#1 transferring dockerfile: 3.09kB done
|
||||
#1 DONE 0.0s
|
||||
|
||||
#2 resolve image config for docker-image://docker.io/docker/dockerfile:1.4
|
||||
#2 DONE 1.3s
|
||||
#3 docker-image://docker.io/docker/dockerfile:1.4@sha256:9ba7531bd80fb0a858632727cf7a112fbfd19b17e94c4e84ced81e24ef1a0dbc
|
||||
#3 CACHED
|
||||
|
||||
#4 [internal] load .dockerignore
|
||||
#4 transferring context: 2B done
|
||||
#4 DONE 0.0s
|
||||
|
||||
#5 [internal] load metadata for mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye
|
||||
#5 DONE 0.0s
|
||||
|
||||
#6 [context dev_containers_feature_content_source] load .dockerignore
|
||||
#6 transferring dev_containers_feature_content_source: 2B done
|
||||
#6 DONE 0.0s
|
||||
|
||||
#7 [dev_containers_feature_content_normalize 1/3] FROM mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye
|
||||
#7 DONE 0.0s
|
||||
|
||||
#8 [context dev_containers_feature_content_source] load from client
|
||||
#8 transferring dev_containers_feature_content_source: 82.11kB 0.0s done
|
||||
#8 DONE 0.0s
|
||||
|
||||
#9 [dev_containers_feature_content_normalize 2/3] COPY --from=dev_containers_feature_content_source devcontainer-features.builtin.env /tmp/build-features/
|
||||
#9 CACHED
|
||||
|
||||
#10 [dev_containers_target_stage 2/5] RUN mkdir -p /tmp/dev-container-features
|
||||
#10 CACHED
|
||||
|
||||
#11 [dev_containers_target_stage 3/5] COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
|
||||
#11 CACHED
|
||||
|
||||
#12 [dev_containers_target_stage 4/5] RUN echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
|
||||
#12 CACHED
|
||||
|
||||
#13 [dev_containers_feature_content_normalize 3/3] RUN chmod -R 0755 /tmp/build-features/
|
||||
#13 CACHED
|
||||
|
||||
#14 [dev_containers_target_stage 5/5] RUN --mount=type=bind,from=dev_containers_feature_content_source,source=docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features && chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 && cd /tmp/dev-container-features/docker-in-docker_0 && chmod +x ./devcontainer-features-install.sh && ./devcontainer-features-install.sh && rm -rf /tmp/dev-container-features/docker-in-docker_0
|
||||
#14 CACHED
|
||||
|
||||
#15 exporting to image
|
||||
#15 exporting layers done
|
||||
#15 writing image sha256:275dc193c905d448ef3945e3fc86220cc315fe0cb41013988d6ff9f8d6ef2357 done
|
||||
#15 naming to docker.io/library/vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features done
|
||||
#15 DONE 0.0s
|
||||
Run: docker buildx build --load --build-context dev_containers_feature_content_source=/var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193 --build-arg _DEV_CONTAINERS_BASE_IMAGE=mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye --build-arg _DEV_CONTAINERS_IMAGE_USER=root --build-arg _DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp --target dev_containers_target_stage -f /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193/Dockerfile.extended -t vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/empty-folder
|
||||
Run: docker run --sig-proxy=false -a STDOUT -a STDERR --mount type=bind,source=/code/devcontainers-template-starter,target=/workspaces/devcontainers-template-starter,consistency=cached --mount type=volume,src=dind-var-lib-docker-0pctifo8bbg3pd06g3j5s9ae8j7lp5qfcd67m25kuahurel7v7jm,dst=/var/lib/docker -l devcontainer.local_folder=/code/devcontainers-template-starter -l devcontainer.config_file=/code/devcontainers-template-starter/.devcontainer/devcontainer.json --privileged --entrypoint /bin/sh vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features -c echo Container started
|
||||
Container started
|
||||
Not setting dockerd DNS manually.
|
||||
[1mRunning the postCreateCommand from devcontainer.json...[0m
|
||||
added 1 package in 784ms
|
||||
{"outcome":"success","containerId":"bc72db8d0c4c4e941bd9ffc341aee64a18d3397fd45b87cd93d4746150967ba8","remoteUser":"node","remoteWorkspaceFolder":"/workspaces/devcontainers-template-starter"}
|
||||
Generated
Vendored
-2
@@ -1,2 +0,0 @@
|
||||
{"type":"text","level":3,"timestamp":1749557935646,"text":"@devcontainers/cli 0.75.0. Node.js v20.16.0. linux 6.8.0-60-generic x64."}
|
||||
{"type":"text","level":2,"timestamp":1749557935646,"text":"Error: Dev container config (/home/coder/.devcontainer/devcontainer.json) not found.\n at v7 (/usr/local/nvm/versions/node/v20.16.0/lib/node_modules/@devcontainers/cli/dist/spec-node/devContainersSpecCLI.js:668:6918)\n at async /usr/local/nvm/versions/node/v20.16.0/lib/node_modules/@devcontainers/cli/dist/spec-node/devContainersSpecCLI.js:484:1188"}
|
||||
Generated
Vendored
-8
@@ -1,8 +0,0 @@
|
||||
{"type":"text","level":3,"timestamp":1749557820014,"text":"@devcontainers/cli 0.75.0. Node.js v20.16.0. linux 6.8.0-60-generic x64."}
|
||||
{"type":"start","level":2,"timestamp":1749557820014,"text":"Run: git rev-parse --show-cdup"}
|
||||
{"type":"stop","level":2,"timestamp":1749557820023,"text":"Run: git rev-parse --show-cdup","startTimestamp":1749557820014}
|
||||
{"type":"start","level":2,"timestamp":1749557820023,"text":"Run: docker ps -q -a --filter label=devcontainer.local_folder=/home/coder/coder --filter label=devcontainer.config_file=/home/coder/coder/.devcontainer/devcontainer.json"}
|
||||
{"type":"stop","level":2,"timestamp":1749557820039,"text":"Run: docker ps -q -a --filter label=devcontainer.local_folder=/home/coder/coder --filter label=devcontainer.config_file=/home/coder/coder/.devcontainer/devcontainer.json","startTimestamp":1749557820023}
|
||||
{"type":"start","level":2,"timestamp":1749557820039,"text":"Run: docker ps -q -a --filter label=devcontainer.local_folder=/home/coder/coder"}
|
||||
{"type":"stop","level":2,"timestamp":1749557820054,"text":"Run: docker ps -q -a --filter label=devcontainer.local_folder=/home/coder/coder","startTimestamp":1749557820039}
|
||||
{"mergedConfiguration":{"customizations":{"coder":[{"displayApps":{"vscode":true,"web_terminal":true}},{"displayApps":{"vscode_insiders":true,"web_terminal":false}}]}}}
|
||||
Generated
Vendored
-8
@@ -1,8 +0,0 @@
|
||||
{"type":"text","level":3,"timestamp":1749557820014,"text":"@devcontainers/cli 0.75.0. Node.js v20.16.0. linux 6.8.0-60-generic x64."}
|
||||
{"type":"start","level":2,"timestamp":1749557820014,"text":"Run: git rev-parse --show-cdup"}
|
||||
{"type":"stop","level":2,"timestamp":1749557820023,"text":"Run: git rev-parse --show-cdup","startTimestamp":1749557820014}
|
||||
{"type":"start","level":2,"timestamp":1749557820023,"text":"Run: docker ps -q -a --filter label=devcontainer.local_folder=/home/coder/coder --filter label=devcontainer.config_file=/home/coder/coder/.devcontainer/devcontainer.json"}
|
||||
{"type":"stop","level":2,"timestamp":1749557820039,"text":"Run: docker ps -q -a --filter label=devcontainer.local_folder=/home/coder/coder --filter label=devcontainer.config_file=/home/coder/coder/.devcontainer/devcontainer.json","startTimestamp":1749557820023}
|
||||
{"type":"start","level":2,"timestamp":1749557820039,"text":"Run: docker ps -q -a --filter label=devcontainer.local_folder=/home/coder/coder"}
|
||||
{"type":"stop","level":2,"timestamp":1749557820054,"text":"Run: docker ps -q -a --filter label=devcontainer.local_folder=/home/coder/coder","startTimestamp":1749557820039}
|
||||
{"mergedConfiguration":{"customizations":{}}}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
@@ -89,34 +88,24 @@ func TestFSNotifyWatcher(t *testing.T) {
|
||||
break
|
||||
}
|
||||
|
||||
// TODO(DanielleMaywood):
|
||||
// Unfortunately it appears this atomic-rename phase of the test is flakey on macOS.
|
||||
//
|
||||
// This test flake could be indicative of an issue that may present itself
|
||||
// in a running environment. Fortunately, we only use this (as of 2025-07-29)
|
||||
// for our dev container integration. We do not expect the host workspace
|
||||
// (where this is used), to ever be run on macOS, as containers are a linux
|
||||
// paradigm.
|
||||
if runtime.GOOS != "darwin" {
|
||||
err = os.WriteFile(testFile+".atomic", []byte(`{"test": "atomic"}`), 0o600)
|
||||
require.NoError(t, err, "write new atomic test file failed")
|
||||
err = os.WriteFile(testFile+".atomic", []byte(`{"test": "atomic"}`), 0o600)
|
||||
require.NoError(t, err, "write new atomic test file failed")
|
||||
|
||||
err = os.Rename(testFile+".atomic", testFile)
|
||||
require.NoError(t, err, "rename atomic test file failed")
|
||||
err = os.Rename(testFile+".atomic", testFile)
|
||||
require.NoError(t, err, "rename atomic test file failed")
|
||||
|
||||
// Verify that we receive the event we want.
|
||||
for {
|
||||
event, err := wut.Next(ctx)
|
||||
require.NoError(t, err, "next event failed")
|
||||
require.NotNil(t, event, "want non-nil event")
|
||||
if !event.Has(fsnotify.Create) {
|
||||
t.Logf("Ignoring event: %s", event)
|
||||
continue
|
||||
}
|
||||
require.Truef(t, event.Has(fsnotify.Create), "want create event: %s", event.String())
|
||||
require.Equal(t, event.Name, testFile, "want event for test file")
|
||||
break
|
||||
// Verify that we receive the event we want.
|
||||
for {
|
||||
event, err := wut.Next(ctx)
|
||||
require.NoError(t, err, "next event failed")
|
||||
require.NotNil(t, event, "want non-nil event")
|
||||
if !event.Has(fsnotify.Create) {
|
||||
t.Logf("Ignoring event: %s", event)
|
||||
continue
|
||||
}
|
||||
require.Truef(t, event.Has(fsnotify.Create), "want create event: %s", event.String())
|
||||
require.Equal(t, event.Name, testFile, "want event for test file")
|
||||
break
|
||||
}
|
||||
|
||||
// Test removing the file from the watcher.
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -79,6 +80,21 @@ func New(opts Options) *Runner {
|
||||
|
||||
type ScriptCompletedFunc func(context.Context, *proto.WorkspaceAgentScriptCompletedRequest) (*proto.WorkspaceAgentScriptCompletedResponse, error)
|
||||
|
||||
type runnerScript struct {
|
||||
runOnPostStart bool
|
||||
codersdk.WorkspaceAgentScript
|
||||
}
|
||||
|
||||
func toRunnerScript(scripts ...codersdk.WorkspaceAgentScript) []runnerScript {
|
||||
var rs []runnerScript
|
||||
for _, s := range scripts {
|
||||
rs = append(rs, runnerScript{
|
||||
WorkspaceAgentScript: s,
|
||||
})
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
type Runner struct {
|
||||
Options
|
||||
|
||||
@@ -88,7 +104,8 @@ type Runner struct {
|
||||
closed chan struct{}
|
||||
closeMutex sync.Mutex
|
||||
cron *cron.Cron
|
||||
scripts []codersdk.WorkspaceAgentScript
|
||||
initialized atomic.Bool
|
||||
scripts []runnerScript
|
||||
dataDir string
|
||||
scriptCompleted ScriptCompletedFunc
|
||||
|
||||
@@ -96,9 +113,6 @@ type Runner struct {
|
||||
// execute startup scripts, and scripts on a cron schedule. Both will increment
|
||||
// this counter.
|
||||
scriptsExecuted *prometheus.CounterVec
|
||||
|
||||
initMutex sync.Mutex
|
||||
initialized bool
|
||||
}
|
||||
|
||||
// DataDir returns the directory where scripts data is stored.
|
||||
@@ -123,17 +137,28 @@ func (r *Runner) RegisterMetrics(reg prometheus.Registerer) {
|
||||
// InitOption describes an option for the runner initialization.
|
||||
type InitOption func(*Runner)
|
||||
|
||||
// WithPostStartScripts adds scripts that should be run after the workspace
|
||||
// start scripts but before the workspace is marked as started.
|
||||
func WithPostStartScripts(scripts ...codersdk.WorkspaceAgentScript) InitOption {
|
||||
return func(r *Runner) {
|
||||
for _, s := range scripts {
|
||||
r.scripts = append(r.scripts, runnerScript{
|
||||
runOnPostStart: true,
|
||||
WorkspaceAgentScript: s,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes the runner with the provided scripts.
|
||||
// It also schedules any scripts that have a schedule.
|
||||
// This function must be called before Execute.
|
||||
func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript, scriptCompleted ScriptCompletedFunc, opts ...InitOption) error {
|
||||
r.initMutex.Lock()
|
||||
defer r.initMutex.Unlock()
|
||||
if r.initialized {
|
||||
if r.initialized.Load() {
|
||||
return xerrors.New("init: already initialized")
|
||||
}
|
||||
r.initialized = true
|
||||
r.scripts = scripts
|
||||
r.initialized.Store(true)
|
||||
r.scripts = toRunnerScript(scripts...)
|
||||
r.scriptCompleted = scriptCompleted
|
||||
for _, opt := range opts {
|
||||
opt(r)
|
||||
@@ -149,8 +174,9 @@ func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript, scriptCompleted S
|
||||
if script.Cron == "" {
|
||||
continue
|
||||
}
|
||||
script := script
|
||||
_, err := r.cron.AddFunc(script.Cron, func() {
|
||||
err := r.trackRun(r.cronCtx, script, ExecuteCronScripts)
|
||||
err := r.trackRun(r.cronCtx, script.WorkspaceAgentScript, ExecuteCronScripts)
|
||||
if err != nil {
|
||||
r.Logger.Warn(context.Background(), "run agent script on schedule", slog.Error(err))
|
||||
}
|
||||
@@ -194,28 +220,18 @@ type ExecuteOption int
|
||||
const (
|
||||
ExecuteAllScripts ExecuteOption = iota
|
||||
ExecuteStartScripts
|
||||
ExecutePostStartScripts
|
||||
ExecuteStopScripts
|
||||
ExecuteCronScripts
|
||||
)
|
||||
|
||||
// Execute runs a set of scripts according to a filter.
|
||||
func (r *Runner) Execute(ctx context.Context, option ExecuteOption) error {
|
||||
initErr := func() error {
|
||||
r.initMutex.Lock()
|
||||
defer r.initMutex.Unlock()
|
||||
if !r.initialized {
|
||||
return xerrors.New("execute: not initialized")
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if initErr != nil {
|
||||
return initErr
|
||||
}
|
||||
|
||||
var eg errgroup.Group
|
||||
for _, script := range r.scripts {
|
||||
runScript := (option == ExecuteStartScripts && script.RunOnStart) ||
|
||||
(option == ExecuteStopScripts && script.RunOnStop) ||
|
||||
(option == ExecutePostStartScripts && script.runOnPostStart) ||
|
||||
(option == ExecuteCronScripts && script.Cron != "") ||
|
||||
option == ExecuteAllScripts
|
||||
|
||||
@@ -223,8 +239,9 @@ func (r *Runner) Execute(ctx context.Context, option ExecuteOption) error {
|
||||
continue
|
||||
}
|
||||
|
||||
script := script
|
||||
eg.Go(func() error {
|
||||
err := r.trackRun(ctx, script, option)
|
||||
err := r.trackRun(ctx, script.WorkspaceAgentScript, option)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("run agent script %q: %w", script.LogSourceID, err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -143,12 +144,6 @@ func TestScriptReportsTiming(t *testing.T) {
|
||||
|
||||
timing := timings[0]
|
||||
require.Equal(t, int32(0), timing.ExitCode)
|
||||
if assert.True(t, timing.Start.IsValid(), "start time should be valid") {
|
||||
require.NotZero(t, timing.Start.AsTime(), "start time should not be zero")
|
||||
}
|
||||
if assert.True(t, timing.End.IsValid(), "end time should be valid") {
|
||||
require.NotZero(t, timing.End.AsTime(), "end time should not be zero")
|
||||
}
|
||||
require.GreaterOrEqual(t, timing.End.AsTime(), timing.Start.AsTime())
|
||||
}
|
||||
|
||||
@@ -176,6 +171,11 @@ func TestExecuteOptions(t *testing.T) {
|
||||
Script: "echo stop",
|
||||
RunOnStop: true,
|
||||
}
|
||||
postStartScript := codersdk.WorkspaceAgentScript{
|
||||
ID: uuid.New(),
|
||||
LogSourceID: uuid.New(),
|
||||
Script: "echo poststart",
|
||||
}
|
||||
regularScript := codersdk.WorkspaceAgentScript{
|
||||
ID: uuid.New(),
|
||||
LogSourceID: uuid.New(),
|
||||
@@ -187,9 +187,10 @@ func TestExecuteOptions(t *testing.T) {
|
||||
stopScript,
|
||||
regularScript,
|
||||
}
|
||||
allScripts := append(slices.Clone(scripts), postStartScript)
|
||||
|
||||
scriptByID := func(t *testing.T, id uuid.UUID) codersdk.WorkspaceAgentScript {
|
||||
for _, script := range scripts {
|
||||
for _, script := range allScripts {
|
||||
if script.ID == id {
|
||||
return script
|
||||
}
|
||||
@@ -199,9 +200,10 @@ func TestExecuteOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
wantOutput := map[uuid.UUID]string{
|
||||
startScript.ID: "start",
|
||||
stopScript.ID: "stop",
|
||||
regularScript.ID: "regular",
|
||||
startScript.ID: "start",
|
||||
stopScript.ID: "stop",
|
||||
postStartScript.ID: "poststart",
|
||||
regularScript.ID: "regular",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@@ -212,13 +214,18 @@ func TestExecuteOptions(t *testing.T) {
|
||||
{
|
||||
name: "ExecuteAllScripts",
|
||||
option: agentscripts.ExecuteAllScripts,
|
||||
wantRun: []uuid.UUID{startScript.ID, stopScript.ID, regularScript.ID},
|
||||
wantRun: []uuid.UUID{startScript.ID, stopScript.ID, regularScript.ID, postStartScript.ID},
|
||||
},
|
||||
{
|
||||
name: "ExecuteStartScripts",
|
||||
option: agentscripts.ExecuteStartScripts,
|
||||
wantRun: []uuid.UUID{startScript.ID},
|
||||
},
|
||||
{
|
||||
name: "ExecutePostStartScripts",
|
||||
option: agentscripts.ExecutePostStartScripts,
|
||||
wantRun: []uuid.UUID{postStartScript.ID},
|
||||
},
|
||||
{
|
||||
name: "ExecuteStopScripts",
|
||||
option: agentscripts.ExecuteStopScripts,
|
||||
@@ -247,6 +254,7 @@ func TestExecuteOptions(t *testing.T) {
|
||||
err := runner.Init(
|
||||
scripts,
|
||||
aAPI.ScriptCompleted,
|
||||
agentscripts.WithPostStartScripts(postStartScript),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -260,7 +268,7 @@ func TestExecuteOptions(t *testing.T) {
|
||||
"script %s should have run when using filter %s", scriptByID(t, id).Script, tc.name)
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
for _, script := range allScripts {
|
||||
if _, ok := gotRun[script.ID]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
package agentsocket
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
"storj.io/drpc"
|
||||
"storj.io/drpc/drpcconn"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentsocket/proto"
|
||||
"github.com/coder/coder/v2/agent/unit"
|
||||
)
|
||||
|
||||
// Option represents a configuration option for NewClient.
|
||||
type Option func(*options)
|
||||
|
||||
type options struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// WithPath sets the socket path. If not provided or empty, the client will
|
||||
// auto-discover the default socket path.
|
||||
func WithPath(path string) Option {
|
||||
return func(opts *options) {
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
opts.path = path
|
||||
}
|
||||
}
|
||||
|
||||
// Client provides a client for communicating with the workspace agentsocket API.
|
||||
type Client struct {
|
||||
client proto.DRPCAgentSocketClient
|
||||
conn drpc.Conn
|
||||
}
|
||||
|
||||
// NewClient creates a new socket client and opens a connection to the socket.
|
||||
// If path is not provided via WithPath or is empty, it will auto-discover the
|
||||
// default socket path.
|
||||
func NewClient(ctx context.Context, opts ...Option) (*Client, error) {
|
||||
options := &options{}
|
||||
for _, opt := range opts {
|
||||
opt(options)
|
||||
}
|
||||
|
||||
conn, err := dialSocket(ctx, options.path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("connect to socket: %w", err)
|
||||
}
|
||||
|
||||
drpcConn := drpcconn.New(conn)
|
||||
client := proto.NewDRPCAgentSocketClient(drpcConn)
|
||||
|
||||
return &Client{
|
||||
client: client,
|
||||
conn: drpcConn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the socket connection.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// Ping sends a ping request to the agent.
|
||||
func (c *Client) Ping(ctx context.Context) error {
|
||||
_, err := c.client.Ping(ctx, &proto.PingRequest{})
|
||||
return err
|
||||
}
|
||||
|
||||
// SyncStart starts a unit in the dependency graph.
|
||||
func (c *Client) SyncStart(ctx context.Context, unitName unit.ID) error {
|
||||
_, err := c.client.SyncStart(ctx, &proto.SyncStartRequest{
|
||||
Unit: string(unitName),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// SyncWant declares a dependency between units.
|
||||
func (c *Client) SyncWant(ctx context.Context, unitName, dependsOn unit.ID) error {
|
||||
_, err := c.client.SyncWant(ctx, &proto.SyncWantRequest{
|
||||
Unit: string(unitName),
|
||||
DependsOn: string(dependsOn),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// SyncComplete marks a unit as complete in the dependency graph.
|
||||
func (c *Client) SyncComplete(ctx context.Context, unitName unit.ID) error {
|
||||
_, err := c.client.SyncComplete(ctx, &proto.SyncCompleteRequest{
|
||||
Unit: string(unitName),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// SyncReady requests whether a unit is ready to be started. That is, all dependencies are satisfied.
|
||||
func (c *Client) SyncReady(ctx context.Context, unitName unit.ID) (bool, error) {
|
||||
resp, err := c.client.SyncReady(ctx, &proto.SyncReadyRequest{
|
||||
Unit: string(unitName),
|
||||
})
|
||||
return resp.Ready, err
|
||||
}
|
||||
|
||||
// SyncStatus gets the status of a unit and its dependencies.
|
||||
func (c *Client) SyncStatus(ctx context.Context, unitName unit.ID) (SyncStatusResponse, error) {
|
||||
resp, err := c.client.SyncStatus(ctx, &proto.SyncStatusRequest{
|
||||
Unit: string(unitName),
|
||||
})
|
||||
if err != nil {
|
||||
return SyncStatusResponse{}, err
|
||||
}
|
||||
|
||||
var dependencies []DependencyInfo
|
||||
for _, dep := range resp.Dependencies {
|
||||
dependencies = append(dependencies, DependencyInfo{
|
||||
DependsOn: unit.ID(dep.DependsOn),
|
||||
RequiredStatus: unit.Status(dep.RequiredStatus),
|
||||
CurrentStatus: unit.Status(dep.CurrentStatus),
|
||||
IsSatisfied: dep.IsSatisfied,
|
||||
})
|
||||
}
|
||||
|
||||
return SyncStatusResponse{
|
||||
UnitName: unitName,
|
||||
Status: unit.Status(resp.Status),
|
||||
IsReady: resp.IsReady,
|
||||
Dependencies: dependencies,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SyncStatusResponse contains the status information for a unit.
|
||||
type SyncStatusResponse struct {
|
||||
UnitName unit.ID `table:"unit,default_sort" json:"unit_name"`
|
||||
Status unit.Status `table:"status" json:"status"`
|
||||
IsReady bool `table:"ready" json:"is_ready"`
|
||||
Dependencies []DependencyInfo `table:"dependencies" json:"dependencies"`
|
||||
}
|
||||
|
||||
// DependencyInfo contains information about a unit dependency.
|
||||
type DependencyInfo struct {
|
||||
DependsOn unit.ID `table:"depends on,default_sort" json:"depends_on"`
|
||||
RequiredStatus unit.Status `table:"required status" json:"required_status"`
|
||||
CurrentStatus unit.Status `table:"current status" json:"current_status"`
|
||||
IsSatisfied bool `table:"satisfied" json:"is_satisfied"`
|
||||
}
|
||||
@@ -1,968 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.23.4
|
||||
// source: agent/agentsocket/proto/agentsocket.proto
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type PingRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *PingRequest) Reset() {
|
||||
*x = PingRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PingRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PingRequest) ProtoMessage() {}
|
||||
|
||||
func (x *PingRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PingRequest.ProtoReflect.Descriptor instead.
|
||||
func (*PingRequest) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type PingResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *PingResponse) Reset() {
|
||||
*x = PingResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PingResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PingResponse) ProtoMessage() {}
|
||||
|
||||
func (x *PingResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PingResponse.ProtoReflect.Descriptor instead.
|
||||
func (*PingResponse) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
type SyncStartRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SyncStartRequest) Reset() {
|
||||
*x = SyncStartRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncStartRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncStartRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncStartRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncStartRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SyncStartRequest) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *SyncStartRequest) GetUnit() string {
|
||||
if x != nil {
|
||||
return x.Unit
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type SyncStartResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *SyncStartResponse) Reset() {
|
||||
*x = SyncStartResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncStartResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncStartResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncStartResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncStartResponse.ProtoReflect.Descriptor instead.
|
||||
func (*SyncStartResponse) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
type SyncWantRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
DependsOn string `protobuf:"bytes,2,opt,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SyncWantRequest) Reset() {
|
||||
*x = SyncWantRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncWantRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncWantRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncWantRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncWantRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SyncWantRequest) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *SyncWantRequest) GetUnit() string {
|
||||
if x != nil {
|
||||
return x.Unit
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *SyncWantRequest) GetDependsOn() string {
|
||||
if x != nil {
|
||||
return x.DependsOn
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type SyncWantResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *SyncWantResponse) Reset() {
|
||||
*x = SyncWantResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncWantResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncWantResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncWantResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncWantResponse.ProtoReflect.Descriptor instead.
|
||||
func (*SyncWantResponse) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
type SyncCompleteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SyncCompleteRequest) Reset() {
|
||||
*x = SyncCompleteRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncCompleteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncCompleteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncCompleteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncCompleteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SyncCompleteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *SyncCompleteRequest) GetUnit() string {
|
||||
if x != nil {
|
||||
return x.Unit
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type SyncCompleteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *SyncCompleteResponse) Reset() {
|
||||
*x = SyncCompleteResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncCompleteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncCompleteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncCompleteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncCompleteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*SyncCompleteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
type SyncReadyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SyncReadyRequest) Reset() {
|
||||
*x = SyncReadyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncReadyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncReadyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncReadyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncReadyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SyncReadyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *SyncReadyRequest) GetUnit() string {
|
||||
if x != nil {
|
||||
return x.Unit
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type SyncReadyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SyncReadyResponse) Reset() {
|
||||
*x = SyncReadyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncReadyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncReadyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncReadyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncReadyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*SyncReadyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *SyncReadyResponse) GetReady() bool {
|
||||
if x != nil {
|
||||
return x.Ready
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type SyncStatusRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SyncStatusRequest) Reset() {
|
||||
*x = SyncStatusRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncStatusRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncStatusRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SyncStatusRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncStatusRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SyncStatusRequest) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
func (x *SyncStatusRequest) GetUnit() string {
|
||||
if x != nil {
|
||||
return x.Unit
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DependencyInfo struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Unit string `protobuf:"bytes,1,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
DependsOn string `protobuf:"bytes,2,opt,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"`
|
||||
RequiredStatus string `protobuf:"bytes,3,opt,name=required_status,json=requiredStatus,proto3" json:"required_status,omitempty"`
|
||||
CurrentStatus string `protobuf:"bytes,4,opt,name=current_status,json=currentStatus,proto3" json:"current_status,omitempty"`
|
||||
IsSatisfied bool `protobuf:"varint,5,opt,name=is_satisfied,json=isSatisfied,proto3" json:"is_satisfied,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) Reset() {
|
||||
*x = DependencyInfo{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DependencyInfo) ProtoMessage() {}
|
||||
|
||||
func (x *DependencyInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DependencyInfo.ProtoReflect.Descriptor instead.
|
||||
func (*DependencyInfo) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{11}
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) GetUnit() string {
|
||||
if x != nil {
|
||||
return x.Unit
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) GetDependsOn() string {
|
||||
if x != nil {
|
||||
return x.DependsOn
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) GetRequiredStatus() string {
|
||||
if x != nil {
|
||||
return x.RequiredStatus
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) GetCurrentStatus() string {
|
||||
if x != nil {
|
||||
return x.CurrentStatus
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DependencyInfo) GetIsSatisfied() bool {
|
||||
if x != nil {
|
||||
return x.IsSatisfied
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type SyncStatusResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
|
||||
IsReady bool `protobuf:"varint,2,opt,name=is_ready,json=isReady,proto3" json:"is_ready,omitempty"`
|
||||
Dependencies []*DependencyInfo `protobuf:"bytes,3,rep,name=dependencies,proto3" json:"dependencies,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SyncStatusResponse) Reset() {
|
||||
*x = SyncStatusResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SyncStatusResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SyncStatusResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SyncStatusResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SyncStatusResponse.ProtoReflect.Descriptor instead.
|
||||
func (*SyncStatusResponse) Descriptor() ([]byte, []int) {
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP(), []int{12}
|
||||
}
|
||||
|
||||
func (x *SyncStatusResponse) GetStatus() string {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *SyncStatusResponse) GetIsReady() bool {
|
||||
if x != nil {
|
||||
return x.IsReady
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *SyncStatusResponse) GetDependencies() []*DependencyInfo {
|
||||
if x != nil {
|
||||
return x.Dependencies
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_agent_agentsocket_proto_agentsocket_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_agent_agentsocket_proto_agentsocket_proto_rawDesc = []byte{
|
||||
0x0a, 0x29, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73,
|
||||
0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x63, 0x6f, 0x64,
|
||||
0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76,
|
||||
0x31, 0x22, 0x0d, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x26, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x53, 0x79, 0x6e, 0x63,
|
||||
0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x44, 0x0a,
|
||||
0x0f, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||
0x75, 0x6e, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x5f,
|
||||
0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
|
||||
0x73, 0x4f, 0x6e, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x0a, 0x13, 0x53, 0x79, 0x6e, 0x63, 0x43,
|
||||
0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12,
|
||||
0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e,
|
||||
0x69, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x0a, 0x10, 0x53, 0x79,
|
||||
0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12,
|
||||
0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e,
|
||||
0x69, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x22, 0x27, 0x0a,
|
||||
0x11, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x70, 0x65, 0x6e,
|
||||
0x64, 0x65, 0x6e, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
|
||||
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x1d, 0x0a,
|
||||
0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x4f, 0x6e, 0x12, 0x27, 0x0a, 0x0f,
|
||||
0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x53,
|
||||
0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
|
||||
0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63,
|
||||
0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c,
|
||||
0x69, 0x73, 0x5f, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x64, 0x22,
|
||||
0x91, 0x01, 0x0a, 0x12, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19,
|
||||
0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
|
||||
0x52, 0x07, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x70,
|
||||
0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||
0x24, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63,
|
||||
0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63,
|
||||
0x69, 0x65, 0x73, 0x32, 0xbb, 0x04, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x21, 0x2e, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
|
||||
0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b,
|
||||
0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x5c, 0x0a, 0x09, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12,
|
||||
0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
|
||||
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x59, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x12, 0x25, 0x2e, 0x63,
|
||||
0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e,
|
||||
0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x57,
|
||||
0x61, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x0c, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x29, 0x2e, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61,
|
||||
0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79,
|
||||
0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x5c, 0x0a, 0x09, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12,
|
||||
0x26, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
|
||||
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x12, 0x5f, 0x0a, 0x0a, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27,
|
||||
0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b,
|
||||
0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e,
|
||||
0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53,
|
||||
0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x61,
|
||||
0x67, 0x65, 0x6e, 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescOnce sync.Once
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescData = file_agent_agentsocket_proto_agentsocket_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_agent_agentsocket_proto_agentsocket_proto_rawDescGZIP() []byte {
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescOnce.Do(func() {
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_agentsocket_proto_agentsocket_proto_rawDescData)
|
||||
})
|
||||
return file_agent_agentsocket_proto_agentsocket_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_agent_agentsocket_proto_agentsocket_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
|
||||
var file_agent_agentsocket_proto_agentsocket_proto_goTypes = []interface{}{
|
||||
(*PingRequest)(nil), // 0: coder.agentsocket.v1.PingRequest
|
||||
(*PingResponse)(nil), // 1: coder.agentsocket.v1.PingResponse
|
||||
(*SyncStartRequest)(nil), // 2: coder.agentsocket.v1.SyncStartRequest
|
||||
(*SyncStartResponse)(nil), // 3: coder.agentsocket.v1.SyncStartResponse
|
||||
(*SyncWantRequest)(nil), // 4: coder.agentsocket.v1.SyncWantRequest
|
||||
(*SyncWantResponse)(nil), // 5: coder.agentsocket.v1.SyncWantResponse
|
||||
(*SyncCompleteRequest)(nil), // 6: coder.agentsocket.v1.SyncCompleteRequest
|
||||
(*SyncCompleteResponse)(nil), // 7: coder.agentsocket.v1.SyncCompleteResponse
|
||||
(*SyncReadyRequest)(nil), // 8: coder.agentsocket.v1.SyncReadyRequest
|
||||
(*SyncReadyResponse)(nil), // 9: coder.agentsocket.v1.SyncReadyResponse
|
||||
(*SyncStatusRequest)(nil), // 10: coder.agentsocket.v1.SyncStatusRequest
|
||||
(*DependencyInfo)(nil), // 11: coder.agentsocket.v1.DependencyInfo
|
||||
(*SyncStatusResponse)(nil), // 12: coder.agentsocket.v1.SyncStatusResponse
|
||||
}
|
||||
var file_agent_agentsocket_proto_agentsocket_proto_depIdxs = []int32{
|
||||
11, // 0: coder.agentsocket.v1.SyncStatusResponse.dependencies:type_name -> coder.agentsocket.v1.DependencyInfo
|
||||
0, // 1: coder.agentsocket.v1.AgentSocket.Ping:input_type -> coder.agentsocket.v1.PingRequest
|
||||
2, // 2: coder.agentsocket.v1.AgentSocket.SyncStart:input_type -> coder.agentsocket.v1.SyncStartRequest
|
||||
4, // 3: coder.agentsocket.v1.AgentSocket.SyncWant:input_type -> coder.agentsocket.v1.SyncWantRequest
|
||||
6, // 4: coder.agentsocket.v1.AgentSocket.SyncComplete:input_type -> coder.agentsocket.v1.SyncCompleteRequest
|
||||
8, // 5: coder.agentsocket.v1.AgentSocket.SyncReady:input_type -> coder.agentsocket.v1.SyncReadyRequest
|
||||
10, // 6: coder.agentsocket.v1.AgentSocket.SyncStatus:input_type -> coder.agentsocket.v1.SyncStatusRequest
|
||||
1, // 7: coder.agentsocket.v1.AgentSocket.Ping:output_type -> coder.agentsocket.v1.PingResponse
|
||||
3, // 8: coder.agentsocket.v1.AgentSocket.SyncStart:output_type -> coder.agentsocket.v1.SyncStartResponse
|
||||
5, // 9: coder.agentsocket.v1.AgentSocket.SyncWant:output_type -> coder.agentsocket.v1.SyncWantResponse
|
||||
7, // 10: coder.agentsocket.v1.AgentSocket.SyncComplete:output_type -> coder.agentsocket.v1.SyncCompleteResponse
|
||||
9, // 11: coder.agentsocket.v1.AgentSocket.SyncReady:output_type -> coder.agentsocket.v1.SyncReadyResponse
|
||||
12, // 12: coder.agentsocket.v1.AgentSocket.SyncStatus:output_type -> coder.agentsocket.v1.SyncStatusResponse
|
||||
7, // [7:13] is the sub-list for method output_type
|
||||
1, // [1:7] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_agent_agentsocket_proto_agentsocket_proto_init() }
|
||||
func file_agent_agentsocket_proto_agentsocket_proto_init() {
|
||||
if File_agent_agentsocket_proto_agentsocket_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PingRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PingResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncStartRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncStartResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncWantRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncWantResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncCompleteRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncCompleteResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncReadyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncReadyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncStatusRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DependencyInfo); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_agent_agentsocket_proto_agentsocket_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SyncStatusResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_agent_agentsocket_proto_agentsocket_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 13,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_agent_agentsocket_proto_agentsocket_proto_goTypes,
|
||||
DependencyIndexes: file_agent_agentsocket_proto_agentsocket_proto_depIdxs,
|
||||
MessageInfos: file_agent_agentsocket_proto_agentsocket_proto_msgTypes,
|
||||
}.Build()
|
||||
File_agent_agentsocket_proto_agentsocket_proto = out.File
|
||||
file_agent_agentsocket_proto_agentsocket_proto_rawDesc = nil
|
||||
file_agent_agentsocket_proto_agentsocket_proto_goTypes = nil
|
||||
file_agent_agentsocket_proto_agentsocket_proto_depIdxs = nil
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
syntax = "proto3";
|
||||
option go_package = "github.com/coder/coder/v2/agent/agentsocket/proto";
|
||||
|
||||
package coder.agentsocket.v1;
|
||||
|
||||
message PingRequest {}
|
||||
|
||||
message PingResponse {}
|
||||
|
||||
message SyncStartRequest {
|
||||
string unit = 1;
|
||||
}
|
||||
|
||||
message SyncStartResponse {}
|
||||
|
||||
message SyncWantRequest {
|
||||
string unit = 1;
|
||||
string depends_on = 2;
|
||||
}
|
||||
|
||||
message SyncWantResponse {}
|
||||
|
||||
message SyncCompleteRequest {
|
||||
string unit = 1;
|
||||
}
|
||||
|
||||
message SyncCompleteResponse {}
|
||||
|
||||
message SyncReadyRequest {
|
||||
string unit = 1;
|
||||
}
|
||||
|
||||
message SyncReadyResponse {
|
||||
bool ready = 1;
|
||||
}
|
||||
|
||||
message SyncStatusRequest {
|
||||
string unit = 1;
|
||||
}
|
||||
|
||||
message DependencyInfo {
|
||||
string unit = 1;
|
||||
string depends_on = 2;
|
||||
string required_status = 3;
|
||||
string current_status = 4;
|
||||
bool is_satisfied = 5;
|
||||
}
|
||||
|
||||
message SyncStatusResponse {
|
||||
string status = 1;
|
||||
bool is_ready = 2;
|
||||
repeated DependencyInfo dependencies = 3;
|
||||
}
|
||||
|
||||
// AgentSocket provides direct access to the agent over local IPC.
|
||||
service AgentSocket {
|
||||
// Ping the agent to check if it is alive.
|
||||
rpc Ping(PingRequest) returns (PingResponse);
|
||||
// Report the start of a unit.
|
||||
rpc SyncStart(SyncStartRequest) returns (SyncStartResponse);
|
||||
// Declare a dependency between units.
|
||||
rpc SyncWant(SyncWantRequest) returns (SyncWantResponse);
|
||||
// Report the completion of a unit.
|
||||
rpc SyncComplete(SyncCompleteRequest) returns (SyncCompleteResponse);
|
||||
// Request whether a unit is ready to be started. That is, all dependencies are satisfied.
|
||||
rpc SyncReady(SyncReadyRequest) returns (SyncReadyResponse);
|
||||
// Get the status of a unit and list its dependencies.
|
||||
rpc SyncStatus(SyncStatusRequest) returns (SyncStatusResponse);
|
||||
}
|
||||
@@ -1,311 +0,0 @@
|
||||
// Code generated by protoc-gen-go-drpc. DO NOT EDIT.
|
||||
// protoc-gen-go-drpc version: v0.0.34
|
||||
// source: agent/agentsocket/proto/agentsocket.proto
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
errors "errors"
|
||||
protojson "google.golang.org/protobuf/encoding/protojson"
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
drpc "storj.io/drpc"
|
||||
drpcerr "storj.io/drpc/drpcerr"
|
||||
)
|
||||
|
||||
type drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto struct{}
|
||||
|
||||
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) Marshal(msg drpc.Message) ([]byte, error) {
|
||||
return proto.Marshal(msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) MarshalAppend(buf []byte, msg drpc.Message) ([]byte, error) {
|
||||
return proto.MarshalOptions{}.MarshalAppend(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) Unmarshal(buf []byte, msg drpc.Message) error {
|
||||
return proto.Unmarshal(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) JSONMarshal(msg drpc.Message) ([]byte, error) {
|
||||
return protojson.Marshal(msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto) JSONUnmarshal(buf []byte, msg drpc.Message) error {
|
||||
return protojson.Unmarshal(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
type DRPCAgentSocketClient interface {
|
||||
DRPCConn() drpc.Conn
|
||||
|
||||
Ping(ctx context.Context, in *PingRequest) (*PingResponse, error)
|
||||
SyncStart(ctx context.Context, in *SyncStartRequest) (*SyncStartResponse, error)
|
||||
SyncWant(ctx context.Context, in *SyncWantRequest) (*SyncWantResponse, error)
|
||||
SyncComplete(ctx context.Context, in *SyncCompleteRequest) (*SyncCompleteResponse, error)
|
||||
SyncReady(ctx context.Context, in *SyncReadyRequest) (*SyncReadyResponse, error)
|
||||
SyncStatus(ctx context.Context, in *SyncStatusRequest) (*SyncStatusResponse, error)
|
||||
}
|
||||
|
||||
type drpcAgentSocketClient struct {
|
||||
cc drpc.Conn
|
||||
}
|
||||
|
||||
func NewDRPCAgentSocketClient(cc drpc.Conn) DRPCAgentSocketClient {
|
||||
return &drpcAgentSocketClient{cc}
|
||||
}
|
||||
|
||||
func (c *drpcAgentSocketClient) DRPCConn() drpc.Conn { return c.cc }
|
||||
|
||||
func (c *drpcAgentSocketClient) Ping(ctx context.Context, in *PingRequest) (*PingResponse, error) {
|
||||
out := new(PingResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/Ping", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentSocketClient) SyncStart(ctx context.Context, in *SyncStartRequest) (*SyncStartResponse, error) {
|
||||
out := new(SyncStartResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncStart", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentSocketClient) SyncWant(ctx context.Context, in *SyncWantRequest) (*SyncWantResponse, error) {
|
||||
out := new(SyncWantResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncWant", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentSocketClient) SyncComplete(ctx context.Context, in *SyncCompleteRequest) (*SyncCompleteResponse, error) {
|
||||
out := new(SyncCompleteResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncComplete", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentSocketClient) SyncReady(ctx context.Context, in *SyncReadyRequest) (*SyncReadyResponse, error) {
|
||||
out := new(SyncReadyResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncReady", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentSocketClient) SyncStatus(ctx context.Context, in *SyncStatusRequest) (*SyncStatusResponse, error) {
|
||||
out := new(SyncStatusResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agentsocket.v1.AgentSocket/SyncStatus", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type DRPCAgentSocketServer interface {
|
||||
Ping(context.Context, *PingRequest) (*PingResponse, error)
|
||||
SyncStart(context.Context, *SyncStartRequest) (*SyncStartResponse, error)
|
||||
SyncWant(context.Context, *SyncWantRequest) (*SyncWantResponse, error)
|
||||
SyncComplete(context.Context, *SyncCompleteRequest) (*SyncCompleteResponse, error)
|
||||
SyncReady(context.Context, *SyncReadyRequest) (*SyncReadyResponse, error)
|
||||
SyncStatus(context.Context, *SyncStatusRequest) (*SyncStatusResponse, error)
|
||||
}
|
||||
|
||||
type DRPCAgentSocketUnimplementedServer struct{}
|
||||
|
||||
func (s *DRPCAgentSocketUnimplementedServer) Ping(context.Context, *PingRequest) (*PingResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentSocketUnimplementedServer) SyncStart(context.Context, *SyncStartRequest) (*SyncStartResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentSocketUnimplementedServer) SyncWant(context.Context, *SyncWantRequest) (*SyncWantResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentSocketUnimplementedServer) SyncComplete(context.Context, *SyncCompleteRequest) (*SyncCompleteResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentSocketUnimplementedServer) SyncReady(context.Context, *SyncReadyRequest) (*SyncReadyResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentSocketUnimplementedServer) SyncStatus(context.Context, *SyncStatusRequest) (*SyncStatusResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
type DRPCAgentSocketDescription struct{}
|
||||
|
||||
func (DRPCAgentSocketDescription) NumMethods() int { return 6 }
|
||||
|
||||
func (DRPCAgentSocketDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
|
||||
switch n {
|
||||
case 0:
|
||||
return "/coder.agentsocket.v1.AgentSocket/Ping", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentSocketServer).
|
||||
Ping(
|
||||
ctx,
|
||||
in1.(*PingRequest),
|
||||
)
|
||||
}, DRPCAgentSocketServer.Ping, true
|
||||
case 1:
|
||||
return "/coder.agentsocket.v1.AgentSocket/SyncStart", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentSocketServer).
|
||||
SyncStart(
|
||||
ctx,
|
||||
in1.(*SyncStartRequest),
|
||||
)
|
||||
}, DRPCAgentSocketServer.SyncStart, true
|
||||
case 2:
|
||||
return "/coder.agentsocket.v1.AgentSocket/SyncWant", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentSocketServer).
|
||||
SyncWant(
|
||||
ctx,
|
||||
in1.(*SyncWantRequest),
|
||||
)
|
||||
}, DRPCAgentSocketServer.SyncWant, true
|
||||
case 3:
|
||||
return "/coder.agentsocket.v1.AgentSocket/SyncComplete", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentSocketServer).
|
||||
SyncComplete(
|
||||
ctx,
|
||||
in1.(*SyncCompleteRequest),
|
||||
)
|
||||
}, DRPCAgentSocketServer.SyncComplete, true
|
||||
case 4:
|
||||
return "/coder.agentsocket.v1.AgentSocket/SyncReady", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentSocketServer).
|
||||
SyncReady(
|
||||
ctx,
|
||||
in1.(*SyncReadyRequest),
|
||||
)
|
||||
}, DRPCAgentSocketServer.SyncReady, true
|
||||
case 5:
|
||||
return "/coder.agentsocket.v1.AgentSocket/SyncStatus", drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentSocketServer).
|
||||
SyncStatus(
|
||||
ctx,
|
||||
in1.(*SyncStatusRequest),
|
||||
)
|
||||
}, DRPCAgentSocketServer.SyncStatus, true
|
||||
default:
|
||||
return "", nil, nil, nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func DRPCRegisterAgentSocket(mux drpc.Mux, impl DRPCAgentSocketServer) error {
|
||||
return mux.Register(impl, DRPCAgentSocketDescription{})
|
||||
}
|
||||
|
||||
type DRPCAgentSocket_PingStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*PingResponse) error
|
||||
}
|
||||
|
||||
type drpcAgentSocket_PingStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgentSocket_PingStream) SendAndClose(m *PingResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgentSocket_SyncStartStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*SyncStartResponse) error
|
||||
}
|
||||
|
||||
type drpcAgentSocket_SyncStartStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgentSocket_SyncStartStream) SendAndClose(m *SyncStartResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgentSocket_SyncWantStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*SyncWantResponse) error
|
||||
}
|
||||
|
||||
type drpcAgentSocket_SyncWantStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgentSocket_SyncWantStream) SendAndClose(m *SyncWantResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgentSocket_SyncCompleteStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*SyncCompleteResponse) error
|
||||
}
|
||||
|
||||
type drpcAgentSocket_SyncCompleteStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgentSocket_SyncCompleteStream) SendAndClose(m *SyncCompleteResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgentSocket_SyncReadyStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*SyncReadyResponse) error
|
||||
}
|
||||
|
||||
type drpcAgentSocket_SyncReadyStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgentSocket_SyncReadyStream) SendAndClose(m *SyncReadyResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgentSocket_SyncStatusStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*SyncStatusResponse) error
|
||||
}
|
||||
|
||||
type drpcAgentSocket_SyncStatusStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgentSocket_SyncStatusStream) SendAndClose(m *SyncStatusResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_agentsocket_proto_agentsocket_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package proto
|
||||
|
||||
import "github.com/coder/coder/v2/apiversion"
|
||||
|
||||
// Version history:
|
||||
//
|
||||
// API v1.0:
|
||||
// - Initial release
|
||||
// - Ping
|
||||
// - Sync operations: SyncStart, SyncWant, SyncComplete, SyncWait, SyncStatus
|
||||
|
||||
const (
|
||||
CurrentMajor = 1
|
||||
CurrentMinor = 0
|
||||
)
|
||||
|
||||
var CurrentVersion = apiversion.New(CurrentMajor, CurrentMinor)
|
||||
@@ -1,138 +0,0 @@
|
||||
package agentsocket
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
"storj.io/drpc/drpcmux"
|
||||
"storj.io/drpc/drpcserver"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/agent/agentsocket/proto"
|
||||
"github.com/coder/coder/v2/agent/unit"
|
||||
"github.com/coder/coder/v2/codersdk/drpcsdk"
|
||||
)
|
||||
|
||||
// Server provides access to the DRPCAgentSocketService via a Unix domain socket.
|
||||
// Do not invoke Server{} directly. Use NewServer() instead.
|
||||
type Server struct {
|
||||
logger slog.Logger
|
||||
path string
|
||||
drpcServer *drpcserver.Server
|
||||
service *DRPCAgentSocketService
|
||||
|
||||
mu sync.Mutex
|
||||
listener net.Listener
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewServer creates a new agent socket server.
|
||||
func NewServer(logger slog.Logger, opts ...Option) (*Server, error) {
|
||||
options := &options{}
|
||||
for _, opt := range opts {
|
||||
opt(options)
|
||||
}
|
||||
|
||||
logger = logger.Named("agentsocket-server")
|
||||
server := &Server{
|
||||
logger: logger,
|
||||
path: options.path,
|
||||
service: &DRPCAgentSocketService{
|
||||
logger: logger,
|
||||
unitManager: unit.NewManager(),
|
||||
},
|
||||
}
|
||||
|
||||
mux := drpcmux.New()
|
||||
err := proto.DRPCRegisterAgentSocket(mux, server.service)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to register drpc service: %w", err)
|
||||
}
|
||||
|
||||
server.drpcServer = drpcserver.NewWithOptions(mux, drpcserver.Options{
|
||||
Manager: drpcsdk.DefaultDRPCOptions(nil),
|
||||
Log: func(err error) {
|
||||
if errors.Is(err, context.Canceled) ||
|
||||
errors.Is(err, context.DeadlineExceeded) {
|
||||
return
|
||||
}
|
||||
logger.Debug(context.Background(), "drpc server error", slog.Error(err))
|
||||
},
|
||||
})
|
||||
|
||||
listener, err := createSocket(server.path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("create socket: %w", err)
|
||||
}
|
||||
|
||||
server.listener = listener
|
||||
|
||||
// This context is canceled by server.Close().
|
||||
// canceling it will close all connections.
|
||||
server.ctx, server.cancel = context.WithCancel(context.Background())
|
||||
|
||||
server.logger.Info(server.ctx, "agent socket server started", slog.F("path", server.path))
|
||||
|
||||
server.wg.Add(1)
|
||||
go func() {
|
||||
defer server.wg.Done()
|
||||
server.acceptConnections()
|
||||
}()
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// Close stops the server and cleans up resources.
|
||||
func (s *Server) Close() error {
|
||||
s.mu.Lock()
|
||||
|
||||
if s.listener == nil {
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
s.logger.Info(s.ctx, "stopping agent socket server")
|
||||
|
||||
s.cancel()
|
||||
|
||||
if err := s.listener.Close(); err != nil {
|
||||
s.logger.Warn(s.ctx, "error closing socket listener", slog.Error(err))
|
||||
}
|
||||
|
||||
s.listener = nil
|
||||
|
||||
s.mu.Unlock()
|
||||
|
||||
// Wait for all connections to finish
|
||||
s.wg.Wait()
|
||||
|
||||
if err := cleanupSocket(s.path); err != nil {
|
||||
s.logger.Warn(s.ctx, "error cleaning up socket file", slog.Error(err))
|
||||
}
|
||||
|
||||
s.logger.Info(s.ctx, "agent socket server stopped")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) acceptConnections() {
|
||||
// In an edge case, Close() might race with acceptConnections() and set s.listener to nil.
|
||||
// Therefore, we grab a copy of the listener under a lock. We might still get a nil listener,
|
||||
// but then we know close has already run and we can return early.
|
||||
s.mu.Lock()
|
||||
listener := s.listener
|
||||
s.mu.Unlock()
|
||||
if listener == nil {
|
||||
return
|
||||
}
|
||||
|
||||
err := s.drpcServer.Serve(s.ctx, listener)
|
||||
if err != nil {
|
||||
s.logger.Warn(s.ctx, "error serving drpc server", slog.Error(err))
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user