Compare commits
579 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 5137f715f0 | |||
| 1171ce7add | |||
| b3e3521274 | |||
| 029c92fede | |||
| db71c0fa54 | |||
| 5cfa34b31e | |||
| e044d3b752 | |||
| 0b7d68dc3f | |||
| 5b071f4d94 | |||
| 52b87a28b0 | |||
| db9104c02e | |||
| fe867d02e0 | |||
| 20dff2aa5d | |||
| 19e4a86711 | |||
| e2e56d7d4f | |||
| bfc588955c | |||
| 3ffe7f55aa | |||
| 97f7a35a47 | |||
| e0d34ca6f7 | |||
| 24080b121c | |||
| fbda21a9f2 | |||
| e8be092af0 | |||
| c1451ca4da | |||
| 016b3ef5a2 | |||
| d2d7628522 | |||
| 7c4fbe5bae | |||
| f2606a78dd | |||
| 83e1349c2c | |||
| 280d38d4b8 | |||
| 3ab4800a18 | |||
| e84d89353f | |||
| ff61475239 | |||
| c35b560c87 | |||
| 213b768785 | |||
| a6901ae2c5 | |||
| 56cbd47082 | |||
| 45e9d93d37 | |||
| 5647e87207 | |||
| 307186325f | |||
| 28a0242c27 | |||
| e46431078c | |||
| eb781751b8 | |||
| 838ab8de7e | |||
| 2e86b76fb8 | |||
| 3f6096b0d7 | |||
| 7924bb2a56 | |||
| e63de9a259 | |||
| 0801760956 | |||
| a495952349 | |||
| 58c2ce17da | |||
| fa91992976 | |||
| 89d8a293f0 | |||
| 211e59bf65 | |||
| a41cbb0f03 | |||
| 1e49190e12 | |||
| a58e4febb9 | |||
| 9a4e1100fa | |||
| b36071c6bb | |||
| 30f032d282 | |||
| ad3fed72bc | |||
| 545cb9a7cc | |||
| e6e65fdc64 | |||
| 4672700ef6 | |||
| 06394a5b8c | |||
| 81ed112cd3 | |||
| fad457420b | |||
| 32c93a887e | |||
| 43411d20ba | |||
| 133dc66143 | |||
| 0cd4842d18 | |||
| df7ed18e1b | |||
| 5b0e6bfa2a | |||
| b779655f01 | |||
| 82f7b0cef4 | |||
| eb81fcf1e1 | |||
| a3432b4265 | |||
| c3eb68a585 | |||
| d82ed008f2 | |||
| 3924b294fb | |||
| 12f728189c | |||
| b7bdb17460 | |||
| 41ed581460 | |||
| fd43985e94 | |||
| c60c75c833 | |||
| f2a91157a9 | |||
| 4f7ae6461b | |||
| ef4d1b68e1 | |||
| 8b8a763ca9 | |||
| bf3b35b1e2 | |||
| 43ba3146a9 | |||
| 6800fc8477 | |||
| 6b4d908e7e | |||
| e52d848d05 | |||
| dba0dfa859 | |||
| 0181e036f6 | |||
| 19c0cfdabf | |||
| 2471f3b9a8 | |||
| f67c5cf72b | |||
| 689da5b7c1 | |||
| 007b2b8db0 | |||
| cab8ffa54a | |||
| b32a0a9af6 | |||
| 41dbe7de4e | |||
| 8afbc8f7f5 | |||
| edeb9bb42a | |||
| 2883cad6ad | |||
| dde21cebcc | |||
| b02796655e | |||
| 197cd935cf | |||
| d07fa9c62f | |||
| 45c07317c0 | |||
| 3ce7b2ebe6 | |||
| ba3b835339 | |||
| b7ea330aea | |||
| e37bbe6208 | |||
| 6775a86785 | |||
| 3e5d292135 | |||
| 4612c28d99 | |||
| 486d1fb697 | |||
| 6823194683 | |||
| 2c7ad1c094 | |||
| 8d9157dc35 | |||
| 50575e1a9a | |||
| 37f6b38d53 | |||
| 8488afa8df | |||
| d8e95001e8 | |||
| ebd6c1b573 | |||
| 716759aacf | |||
| 167c759149 | |||
| d8467c11ad | |||
| 6d66cb246d | |||
| 78517cab52 | |||
| cb89bc1729 | |||
| 1f7c63cf1b | |||
| 9d8578e0e3 | |||
| 2c7394bb3d | |||
| 2b19a2369f | |||
| 4ca4736411 | |||
| 918a82436e | |||
| 02696f2df9 | |||
| b4ca1d6579 | |||
| f0969f99ad | |||
| e73a202aed | |||
| be31b2e4d7 | |||
| ce49a55f56 | |||
| 8221544514 | |||
| dbbf8acc26 | |||
| 51687c74c8 | |||
| 228cbec99b | |||
| 1e349f0d50 | |||
| 8aea6040c8 | |||
| 091fdd6761 | |||
| 5d2e87f1a7 | |||
| b34ecf1e9e | |||
| 941e3873a8 | |||
| c0d68a4c2c | |||
| 567ecca61b | |||
| 667ee41165 | |||
| 8a6bfc9d28 | |||
| 2947b827fb | |||
| dd01bde9b6 | |||
| 44f9613bf2 | |||
| 2bc11d2e63 | |||
| 43488b44ce | |||
| b376b2cd13 | |||
| f6891bc465 | |||
| 088fd0b904 | |||
| 2c86d0bed0 | |||
| 38ed816207 | |||
| 53453c06a1 | |||
| 81a3b36884 | |||
| feaa9894a4 | |||
| f66e802fae | |||
| 876d448d69 | |||
| 3dcbf63cbe | |||
| 0f47b58bfb | |||
| 2e4e0b2d2c | |||
| a235644046 | |||
| fab343a2e9 | |||
| f0b4badf74 | |||
| 5fad611020 | |||
| dd1f8331de | |||
| 1b2ed5bc9b | |||
| e300b036be | |||
| dca8125263 | |||
| 695f57f7ff | |||
| b07b40b346 | |||
| d70f9ea26c | |||
| dff53d0787 | |||
| 185400db11 | |||
| 1e6ea6133c | |||
| a42b6c185d | |||
| b8e9262c51 | |||
| ccd5e1a749 | |||
| 2f54f769be | |||
| 3883d7181d | |||
| 2443a9f861 | |||
| 676e215a91 | |||
| 70cede8f7a | |||
| b212bd4ac5 | |||
| dbadae5a9c | |||
| 0536b58b48 | |||
| baf3bf6b9c | |||
| 28eca2e53f | |||
| d9a169556a | |||
| 6b3c4c00a2 | |||
| 49ed66c7ad | |||
| cbcf7561e5 | |||
| 427572199e | |||
| c82e878b50 | |||
| 8e684c8195 | |||
| 60d0aa6930 | |||
| 2aa79369a2 | |||
| 432925df31 | |||
| 6fe84025aa | |||
| 13b89f79df | |||
| 153abd5003 | |||
| 122cbaa134 | |||
| 15875a76ae | |||
| 9ad96288b2 | |||
| 7f62085a02 | |||
| d49bcc93fe | |||
| b267497c6d | |||
| 46d95cb0f0 | |||
| 812fb95273 | |||
| 612e67a53b | |||
| d9ccd97d36 | |||
| 571d358e4b | |||
| 0cab6e7763 | |||
| 967db2801b | |||
| 12a4b114de | |||
| d016f93de8 | |||
| 329aa45c16 | |||
| 0a16bda786 | |||
| 99151183bc | |||
| 433be7b16d | |||
| 07895006d9 | |||
| 4f9292859d | |||
| 5b2f43619b | |||
| d41f9f8b47 | |||
| 2e8ab2aeaf | |||
| e4d7b0b664 | |||
| 2b574e2b2d | |||
| d374becdeb | |||
| 88f4490ad6 | |||
| cb6c0f3cbb | |||
| 2b71e38b31 | |||
| f431aa53d2 | |||
| 2dc565d5de | |||
| 48d69c9e60 | |||
| e9c12c30cf | |||
| afbda2235c | |||
| 52901e1219 | |||
| 18c4a98865 | |||
| 19b6d194fc | |||
| 452668c893 | |||
| 14bd489af6 | |||
| 3416f6dfb5 | |||
| 6808daef0f | |||
| 74c5261013 | |||
| 1f6e39c0b0 | |||
| a4d74b8b44 | |||
| c634a38bd7 | |||
| 4cb94d1347 | |||
| 54c3fc63d9 | |||
| 20525c8b2e | |||
| abb2c7656a | |||
| 0534f8f59b | |||
| f28df8e7b8 | |||
| 0babc3c555 | |||
| 707d0e97d9 | |||
| f441ad66e1 | |||
| 3a0a4ddfcd | |||
| 4548ad7cef | |||
| 78283a7fb9 | |||
| 82d5130b07 | |||
| b73397e08c | |||
| 6c67add2d9 | |||
| d5ddcbdda0 | |||
| 7029ccfbdf | |||
| 3530d39740 | |||
| dd161b172e | |||
| e73901cf56 | |||
| 411ce46442 | |||
| b501046cf9 | |||
| 61be4dfe5a | |||
| dbdcad0d09 | |||
| 34841cf2b7 | |||
| 2c6e0f7d0a | |||
| a7c27cad26 | |||
| f342d10c31 | |||
| 78df68348a | |||
| e311e9ec24 | |||
| 491e0e3abf | |||
| 65c726eb50 | |||
| 7f39ff854e | |||
| 614c17924c | |||
| 6ecba0fda7 | |||
| d58239b9ec | |||
| ddf5569b10 | |||
| a20ec6659d | |||
| 89c13c2212 | |||
| 8dd003ba5e | |||
| 60c01555b9 | |||
| a9c0c01629 | |||
| f20cc66c04 | |||
| b25e5dc90b | |||
| b73d9d788b | |||
| 8d1cfbce8f | |||
| 51b58cfc98 | |||
| 782fe84c7c | |||
| 214123d476 | |||
| 1c2f9e3199 | |||
| 8cd8901db5 | |||
| 26b5390f4b | |||
| ad3eb4bb75 | |||
| d0ac4cb4b1 | |||
| e51eeb67ce | |||
| 7fa70ce159 | |||
| 4590149810 | |||
| 5d5b5aa074 | |||
| 048dc0450f | |||
| abafc0863c | |||
| 7060069034 | |||
| e6dc9eeffc | |||
| ace188bfc2 | |||
| 5229d7fd3a | |||
| d8df87d5ae | |||
| 6b3f599438 | |||
| 9b6433e3a7 | |||
| 92ef0baff3 | |||
| df4f34ac15 | |||
| fbec79f35d | |||
| 2895c108c2 | |||
| 5173bce5cc | |||
| 5c48cb4447 | |||
| a8c25180db | |||
| 148eb90bda | |||
| 9b864ed700 | |||
| cfe35f54b4 | |||
| 328a383f15 | |||
| 3aef070959 | |||
| 2c3ebc50cb | |||
| d19a762589 | |||
| 0f17d7c144 | |||
| 875cae1fc9 | |||
| e448c10122 | |||
| befb42b6fd | |||
| e6f11a383a | |||
| 20c2dda13f | |||
| b508c325b1 | |||
| 8999d5785a | |||
| 24aa223399 | |||
| 4121121797 | |||
| 71f87d054f | |||
| fc249fab1e | |||
| 3dd35e019b | |||
| ba955f44d0 | |||
| 88c1ee6d52 | |||
| 111ac3de8a | |||
| fefe02c2df | |||
| 9f3a955ebf | |||
| 0e5eecd7da | |||
| ced6ae01b7 | |||
| f47ecb54aa | |||
| 198b56c137 | |||
| c130f8d6d0 | |||
| 10204ba829 | |||
| 9ac44aa74f | |||
| 8ddc8b3447 | |||
| bd17290ff4 | |||
| 38163edf2f | |||
| 9d310388e5 | |||
| 290180b104 | |||
| 6085b92fae | |||
| 34c9661f1b | |||
| 1516c6636b | |||
| a8ce099638 | |||
| b568344fe1 | |||
| 3ae438b968 | |||
| acda90236d | |||
| f623153438 | |||
| f3ffcba63b | |||
| 3091f8f70c | |||
| c14c1cce13 | |||
| cb22df9bea | |||
| fbfd192370 | |||
| 4894eda711 | |||
| 90b6e86555 | |||
| ef70165a8a | |||
| 4f08330297 | |||
| 4965f1853b | |||
| dc4b1ef406 | |||
| 530be2f96a | |||
| 1b20b3cfa8 | |||
| e0afee1b85 | |||
| f4de2b64ec | |||
| 3f4791c9de | |||
| 4a0ca8aa5b | |||
| 1fe5c969c7 | |||
| 75ab16d19a | |||
| 76e7a1d06b | |||
| 33761c9c7d | |||
| 652097ed3a | |||
| fbd34139b5 | |||
| b69c237b8a | |||
| e4211ccb40 | |||
| f400d8a0c5 | |||
| be0436afbe | |||
| 715bbd3edd | |||
| 5f0417d14e | |||
| a4f1319108 | |||
| 177affbe4b | |||
| 9c5b631323 | |||
| 8290fee3f7 | |||
| 61fac2dcfc | |||
| 076db31486 | |||
| ad3abe350f | |||
| 8a7f0e9eb9 | |||
| 473585de6c | |||
| e71c53d4d0 | |||
| ed7e43b54c | |||
| e23873ff8f | |||
| cf8ee78547 | |||
| 645c4bd612 | |||
| a328d20bcb | |||
| 2cf2904515 | |||
| 63a4f5f4a7 | |||
| aded7b1513 | |||
| 71153e2317 | |||
| 26740cf00d | |||
| 057b43a935 | |||
| f418983f23 | |||
| de196b89b6 | |||
| 7f26111c01 | |||
| 861ae1a23a | |||
| 4f3925d0b3 | |||
| 4316c1c862 | |||
| 9e4558ae3a | |||
| 43a867441a | |||
| 1dd3eb603b | |||
| 0a550815e9 | |||
| 8441c36dfb | |||
| 651d14ea68 | |||
| 64398def48 | |||
| e36503afd2 | |||
| b0aa91bf27 | |||
| f5c4826e4c | |||
| 8c3828b531 | |||
| b83a8ce76d | |||
| 4208c30d32 | |||
| f84485d2c4 | |||
| c87deb868b | |||
| 14925e71a7 | |||
| a9797fa391 | |||
| e976f50415 | |||
| ee15adda4b | |||
| a5c409dfee | |||
| 7162dc7e14 | |||
| ca6e6213bf | |||
| 0cb875cba5 | |||
| 04dd663680 | |||
| ddaf913088 | |||
| 44bb958114 | |||
| 4277ca02e5 | |||
| bb5acb0332 | |||
| 95e5419626 | |||
| 5b9e26a13f | |||
| 55fb6b663a | |||
| 06d91bee34 | |||
| 26c3c1226e | |||
| e36b606498 | |||
| 744c73394a | |||
| 23f02651f9 | |||
| 6588494abd | |||
| 84dc001f7e | |||
| 311d1dc576 | |||
| b86e2e4cd4 | |||
| 7d63dc2b02 | |||
| bb4ce87242 | |||
| 21dc93c8a3 | |||
| 08844d03fb | |||
| ca353cb81c | |||
| c9aeea6f64 | |||
| 03045bd47a | |||
| 01ceb84a22 | |||
| 716b86b380 | |||
| 2dce4151ba | |||
| e756baa0c4 | |||
| ae20df4229 | |||
| d2b8a93638 | |||
| 921b6eb4ee | |||
| 839a16e299 | |||
| ac9c16864c | |||
| e756a95759 | |||
| b8449d5894 | |||
| 725cda9463 | |||
| af1c74d62d | |||
| 0c993ea329 | |||
| 5c49ce0194 | |||
| b5405dc424 | |||
| 7f70a23844 | |||
| b3e6a461ed | |||
| 5284d974ef | |||
| ec7d7595ff | |||
| 59c7c340a3 | |||
| cac29e0b4d | |||
| 95ce697e3a | |||
| 94eb9b8db1 | |||
| 6882e8e524 | |||
| f4026edd71 | |||
| 3200b85d87 | |||
| 8d5a13d768 | |||
| a7c671ca07 | |||
| 90573a6e99 | |||
| 0bf156cde3 | |||
| eaf9176bc5 | |||
| e491217a12 | |||
| 9d2b805fb7 | |||
| 7fc1a65b14 | |||
| fdf035cd06 | |||
| fc1d823cae | |||
| 8fe3dcf18a | |||
| 5abfe5afd0 | |||
| 7a8da08124 | |||
| 6b7858c516 | |||
| 9d3785def8 | |||
| 2a6fd90140 | |||
| c2e3648484 | |||
| 3b50530a63 | |||
| 7fecd39e23 | |||
| 99fda4a8e2 | |||
| 51aa32cfcf | |||
| 6ae8bfed94 | |||
| 35e7d7854a | |||
| edcbd4f394 | |||
| ea578ceabb | |||
| 0ddd54d34b | |||
| fdc9097d6c | |||
| e7fd2cb1a6 | |||
| 670ee4d54f | |||
| 39fbf74c7d | |||
| eac155aec2 | |||
| 7732ac475a | |||
| 6b2aee4133 | |||
| d8592bf09a | |||
| 4af8446f48 | |||
| 1286904de8 | |||
| 09f7b8e88c | |||
| 1a2aea3a6b | |||
| 6683ad989a | |||
| 8f1b4fb061 | |||
| a7243b3f3b | |||
| 1372bf82f5 | |||
| 57c9d88703 | |||
| 5ebb702e00 | |||
| 9dbc913798 | |||
| ed5567ba28 | |||
| ac322724b0 | |||
| 3d9bfdd5dc | |||
| 1ba5169109 | |||
| d33526108f | |||
| f5f150d568 | |||
| b799014832 | |||
| 9c9319f81e | |||
| ab2904a676 | |||
| 557adab224 | |||
| 21f87313bd | |||
| 42c21d400f | |||
| f677c4470b | |||
| b8c7b56fda | |||
| c4f590581e | |||
| 997493d4ae | |||
| 1ad998ee3a | |||
| 504cedf15a | |||
| 9b73020f11 | |||
| c93fe8ddbe | |||
| fe05fd1e6e | |||
| 2b5e02f5b2 |
@@ -4,9 +4,10 @@
|
||||
|
||||
"features": {
|
||||
// See all possible options here https://github.com/devcontainers/features/tree/main/src/docker-in-docker
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {}
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
||||
"moby": "false"
|
||||
}
|
||||
},
|
||||
// SYS_PTRACE to enable go debugging
|
||||
// without --priviliged the Github Codespace build fails (not required otherwise)
|
||||
"runArgs": ["--cap-add=SYS_PTRACE", "--privileged"]
|
||||
"runArgs": ["--cap-add=SYS_PTRACE"]
|
||||
}
|
||||
|
||||
@@ -4,61 +4,15 @@ description: |
|
||||
inputs:
|
||||
version:
|
||||
description: "The Go version to use."
|
||||
default: "1.20.10"
|
||||
default: "1.21.5"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Cache go toolchain
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ runner.tool_cache }}/go/${{ inputs.version }}
|
||||
key: gotoolchain-${{ runner.os }}-${{ inputs.version }}
|
||||
restore-keys: |
|
||||
gotoolchain-${{ runner.os }}-
|
||||
|
||||
- name: Setup Go
|
||||
uses: buildjet/setup-go@v4
|
||||
with:
|
||||
# We do our own caching for implementation clarity.
|
||||
cache: false
|
||||
go-version: ${{ inputs.version }}
|
||||
|
||||
- name: Get cache dirs
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
|
||||
|
||||
# We split up GOMODCACHE from GOCACHE because the latter must be invalidated
|
||||
# on code change, but the former can be kept.
|
||||
- name: Cache $GOMODCACHE
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOMODCACHE }}
|
||||
key: gomodcache-${{ runner.os }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}
|
||||
# restore-keys aren't used because it causes the cache to grow
|
||||
# infinitely. go.sum changes very infrequently, so rebuilding from
|
||||
# scratch every now and then isn't terrible.
|
||||
|
||||
- name: Cache $GOCACHE
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
# Job name must be included in the key for effective test cache reuse.
|
||||
# The key format is intentionally different than GOMODCACHE, because any
|
||||
# time a Go file changes we invalidate this cache, whereas GOMODCACHE is
|
||||
# only invalidated when go.sum changes.
|
||||
# The number in the key is incremented when the cache gets too large,
|
||||
# since this technically grows without bound.
|
||||
key: gocache2-${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/*.go', 'go.**') }}
|
||||
restore-keys: |
|
||||
gocache2-${{ runner.os }}-${{ github.job }}-
|
||||
gocache2-${{ runner.os }}-
|
||||
|
||||
- name: Install gotestsum
|
||||
shell: bash
|
||||
run: go install gotest.tools/gotestsum@latest
|
||||
|
||||
@@ -7,4 +7,4 @@ runs:
|
||||
- name: Setup sqlc
|
||||
uses: sqlc-dev/setup-sqlc@v4
|
||||
with:
|
||||
sqlc-version: "1.20.0"
|
||||
sqlc-version: "1.24.0"
|
||||
|
||||
@@ -5,7 +5,7 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
uses: hashicorp/setup-terraform@v3
|
||||
with:
|
||||
terraform_version: 1.5.5
|
||||
terraform_version: 1.5.7
|
||||
terraform_wrapper: false
|
||||
|
||||
+8
-33
@@ -44,13 +44,9 @@ updates:
|
||||
update-types:
|
||||
- version-update:semver-patch
|
||||
groups:
|
||||
otel:
|
||||
go:
|
||||
patterns:
|
||||
- "go.nhat.io/otelsql"
|
||||
- "go.opentelemetry.io/otel*"
|
||||
golang-x:
|
||||
patterns:
|
||||
- "golang.org/x/*"
|
||||
- "*"
|
||||
|
||||
# Update our Dockerfile.
|
||||
- package-ecosystem: "docker"
|
||||
@@ -66,10 +62,6 @@ updates:
|
||||
# We need to coordinate terraform updates with the version hardcoded in
|
||||
# our Go code.
|
||||
- dependency-name: "terraform"
|
||||
groups:
|
||||
scripts-docker:
|
||||
patterns:
|
||||
- "*"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/site/"
|
||||
@@ -94,30 +86,9 @@ updates:
|
||||
- version-update:semver-major
|
||||
open-pull-requests-limit: 15
|
||||
groups:
|
||||
react:
|
||||
site:
|
||||
patterns:
|
||||
- "react*"
|
||||
- "@types/react*"
|
||||
xterm:
|
||||
patterns:
|
||||
- "xterm*"
|
||||
xstate:
|
||||
patterns:
|
||||
- "xstate"
|
||||
- "@xstate*"
|
||||
mui:
|
||||
patterns:
|
||||
- "@mui*"
|
||||
storybook:
|
||||
patterns:
|
||||
- "@storybook*"
|
||||
- "storybook*"
|
||||
eslint:
|
||||
patterns:
|
||||
- "eslint*"
|
||||
- "@eslint*"
|
||||
- "@typescript-eslint/eslint-plugin"
|
||||
- "@typescript-eslint/parser"
|
||||
- "*"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/offlinedocs/"
|
||||
@@ -140,6 +111,10 @@ updates:
|
||||
- dependency-name: "@types/node"
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
groups:
|
||||
offlinedocs:
|
||||
patterns:
|
||||
- "*"
|
||||
|
||||
# Update dogfood.
|
||||
- package-ecosystem: "terraform"
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
app = "paris-coder"
|
||||
primary_region = "cdg"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://paris.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.paris.fly.dev.coder.com"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
@@ -0,0 +1,27 @@
|
||||
app = "sao-paulo-coder"
|
||||
primary_region = "gru"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://sao-paulo.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.sao-paulo.fly.dev.coder.com"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
@@ -0,0 +1,27 @@
|
||||
app = "sydney-coder"
|
||||
primary_region = "syd"
|
||||
|
||||
[experimental]
|
||||
entrypoint = ["/bin/sh", "-c", "CODER_DERP_SERVER_RELAY_URL=\"http://[${FLY_PRIVATE_IP}]:3000\" /opt/coder wsproxy server"]
|
||||
auto_rollback = true
|
||||
|
||||
[build]
|
||||
image = "ghcr.io/coder/coder-preview:main"
|
||||
|
||||
[env]
|
||||
CODER_ACCESS_URL = "https://sydney.fly.dev.coder.com"
|
||||
CODER_HTTP_ADDRESS = "0.0.0.0:3000"
|
||||
CODER_PRIMARY_ACCESS_URL = "https://dev.coder.com"
|
||||
CODER_WILDCARD_ACCESS_URL = "*--apps.sydney.fly.dev.coder.com"
|
||||
|
||||
[http_service]
|
||||
internal_port = 3000
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 2
|
||||
memory_mb = 512
|
||||
+262
-138
@@ -31,10 +31,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docs-only: ${{ steps.filter.outputs.docs_count == steps.filter.outputs.all_count }}
|
||||
docs: ${{ steps.filter.outputs.docs }}
|
||||
go: ${{ steps.filter.outputs.go }}
|
||||
ts: ${{ steps.filter.outputs.ts }}
|
||||
k8s: ${{ steps.filter.outputs.k8s }}
|
||||
ci: ${{ steps.filter.outputs.ci }}
|
||||
db: ${{ steps.filter.outputs.db }}
|
||||
offlinedocs-only: ${{ steps.filter.outputs.offlinedocs_count == steps.filter.outputs.all_count }}
|
||||
offlinedocs: ${{ steps.filter.outputs.offlinedocs }}
|
||||
steps:
|
||||
@@ -56,6 +58,12 @@ jobs:
|
||||
- "examples/web-server/**"
|
||||
- "examples/monitoring/**"
|
||||
- "examples/lima/**"
|
||||
db:
|
||||
- "**.sql"
|
||||
- "coderd/database/queries/**"
|
||||
- "coderd/database/migrations"
|
||||
- "coderd/database/sqlc.yaml"
|
||||
- "coderd/database/dump.sql"
|
||||
go:
|
||||
- "**.sql"
|
||||
- "**.go"
|
||||
@@ -136,7 +144,7 @@ jobs:
|
||||
|
||||
# Check for any typos
|
||||
- name: Check for typos
|
||||
uses: crate-ci/typos@v1.16.19
|
||||
uses: crate-ci/typos@v1.16.25
|
||||
with:
|
||||
config: .github/workflows/typos.toml
|
||||
|
||||
@@ -220,7 +228,7 @@ jobs:
|
||||
with:
|
||||
# This doesn't need caching. It's super fast anyways!
|
||||
cache: false
|
||||
go-version: 1.20.10
|
||||
go-version: 1.21.5
|
||||
|
||||
- name: Install shfmt
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.7.0
|
||||
@@ -291,14 +299,9 @@ jobs:
|
||||
gotestsum --junitfile="gotests.xml" --jsonfile="gotests.json" \
|
||||
--packages="./..." -- $PARALLEL_FLAG -short -failfast $COVERAGE_FLAGS
|
||||
|
||||
- name: Print test stats
|
||||
if: success() || failure()
|
||||
run: |
|
||||
# Artifacts are not available after rerunning a job,
|
||||
# so we need to print the test stats to the log.
|
||||
go run ./scripts/ci-report/main.go gotests.json | tee gotests_stats.json
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
timeout-minutes: 1
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: success() || failure()
|
||||
with:
|
||||
@@ -319,7 +322,9 @@ jobs:
|
||||
|
||||
test-go-pg:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
needs:
|
||||
- changes
|
||||
- sqlc-vet # No point in testing the DB if the queries are invalid
|
||||
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
# This timeout must be greater than the timeout set by `go test` in
|
||||
# `make test-postgres` to ensure we receive a trace of running
|
||||
@@ -343,14 +348,9 @@ jobs:
|
||||
export TS_DEBUG_DISCO=true
|
||||
make test-postgres
|
||||
|
||||
- name: Print test stats
|
||||
if: success() || failure()
|
||||
run: |
|
||||
# Artifacts are not available after rerunning a job,
|
||||
# so we need to print the test stats to the log.
|
||||
go run ./scripts/ci-report/main.go gotests.json | tee gotests_stats.json
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
timeout-minutes: 1
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: success() || failure()
|
||||
with:
|
||||
@@ -391,105 +391,13 @@ jobs:
|
||||
gotestsum --junitfile="gotests.xml" -- -race ./...
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
timeout-minutes: 1
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: always()
|
||||
with:
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
|
||||
deploy:
|
||||
name: "deploy"
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-16vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
timeout-minutes: 30
|
||||
needs: changes
|
||||
if: |
|
||||
github.ref == 'refs/heads/main' && !github.event.pull_request.head.repo.fork
|
||||
&& needs.changes.outputs.docs-only == 'false'
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@v1
|
||||
with:
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
|
||||
- name: Set up Google Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v1
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports@latest
|
||||
- name: Install nfpm
|
||||
run: go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.16.0
|
||||
|
||||
- name: Install zstd
|
||||
run: sudo apt-get install -y zstd
|
||||
|
||||
- name: Build Release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
go mod download
|
||||
|
||||
version="$(./scripts/version.sh)"
|
||||
make gen/mark-fresh
|
||||
make -j \
|
||||
build/coder_"$version"_windows_amd64.zip \
|
||||
build/coder_"$version"_linux_amd64.{tar.gz,deb}
|
||||
|
||||
- name: Install Release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
regions=(
|
||||
# gcp-region-id instance-name systemd-service-name
|
||||
"us-central1-a coder coder"
|
||||
"australia-southeast1-b coder-sydney coder-workspace-proxy"
|
||||
"europe-west3-c coder-europe coder-workspace-proxy"
|
||||
"southamerica-east1-b coder-brazil coder-workspace-proxy"
|
||||
)
|
||||
|
||||
deb_pkg="./build/coder_$(./scripts/version.sh)_linux_amd64.deb"
|
||||
if [ ! -f "$deb_pkg" ]; then
|
||||
echo "deb package not found: $deb_pkg"
|
||||
ls -l ./build
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gcloud config set project coder-dogfood
|
||||
for region in "${regions[@]}"; do
|
||||
echo "::group::$region"
|
||||
set -- $region
|
||||
|
||||
set -x
|
||||
gcloud config set compute/zone "$1"
|
||||
gcloud compute scp "$deb_pkg" "${2}:/tmp/coder.deb"
|
||||
gcloud compute ssh "$2" -- /bin/sh -c "set -eux; sudo dpkg -i --force-confdef /tmp/coder.deb; sudo systemctl daemon-reload; sudo service '$3' restart"
|
||||
set +x
|
||||
|
||||
echo "::endgroup::"
|
||||
done
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coder
|
||||
path: |
|
||||
./build/*.zip
|
||||
./build/*.tar.gz
|
||||
./build/*.deb
|
||||
retention-days: 7
|
||||
|
||||
test-js:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
@@ -572,7 +480,7 @@ jobs:
|
||||
|
||||
- name: Upload Playwright Failed Tests
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: failed-test-videos
|
||||
path: ./site/test-results/**/*.webm
|
||||
@@ -580,7 +488,7 @@ jobs:
|
||||
|
||||
- name: Upload pprof dumps
|
||||
if: always() && github.actor != 'dependabot[bot]' && runner.os == 'Linux' && !github.event.pull_request.head.repo.fork
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debug-pprof-dumps
|
||||
path: ./site/test-results/**/debug-pprof-*.txt
|
||||
@@ -607,7 +515,7 @@ jobs:
|
||||
# the check to pass. This is desired in PRs, but not in mainline.
|
||||
- name: Publish to Chromatic (non-mainline)
|
||||
if: github.ref != 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@v1
|
||||
uses: chromaui/action@v10
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -635,7 +543,7 @@ jobs:
|
||||
# infinitely "in progress" in mainline unless we re-review each build.
|
||||
- name: Publish to Chromatic (mainline)
|
||||
if: github.ref == 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@v1
|
||||
uses: chromaui/action@v10
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
@@ -655,7 +563,8 @@ jobs:
|
||||
name: offlinedocs
|
||||
needs: changes
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
if: needs.changes.outputs.offlinedocs == 'true' || needs.changes.outputs.ci == 'true'
|
||||
if: needs.changes.outputs.offlinedocs == 'true' || needs.changes.outputs.ci == 'true' || needs.changes.outputs.docs == 'true'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -668,11 +577,25 @@ jobs:
|
||||
with:
|
||||
directory: offlinedocs
|
||||
|
||||
- name: Install Protoc
|
||||
run: |
|
||||
mkdir -p /tmp/proto
|
||||
pushd /tmp/proto
|
||||
curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v23.3/protoc-23.3-linux-x86_64.zip
|
||||
unzip protoc.zip
|
||||
cp -r ./bin/* /usr/local/bin
|
||||
cp -r ./include /usr/local/bin/include
|
||||
popd
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install go tools
|
||||
run: |
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.33
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
go install github.com/mikefarah/yq/v4@v4.30.6
|
||||
go install github.com/golang/mock/mockgen@v1.6.0
|
||||
|
||||
- name: Setup sqlc
|
||||
@@ -704,6 +627,7 @@ jobs:
|
||||
- test-js
|
||||
- test-e2e
|
||||
- offlinedocs
|
||||
- sqlc-vet
|
||||
# Allow this job to run even if the needed jobs fail, are skipped or
|
||||
# cancelled.
|
||||
if: always()
|
||||
@@ -718,6 +642,8 @@ jobs:
|
||||
echo "- test-go-pg: ${{ needs.test-go-pg.result }}"
|
||||
echo "- test-go-race: ${{ needs.test-go-race.result }}"
|
||||
echo "- test-js: ${{ needs.test-js.result }}"
|
||||
echo "- test-e2e: ${{ needs.test-e2e.result }}"
|
||||
echo "- offlinedocs: ${{ needs.offlinedocs.result }}"
|
||||
echo
|
||||
|
||||
# We allow skipped jobs to pass, but not failed or cancelled jobs.
|
||||
@@ -728,29 +654,23 @@ jobs:
|
||||
|
||||
echo "Required checks have passed"
|
||||
|
||||
build-main-image:
|
||||
# This build and publihes ghcr.io/coder/coder-preview:main for each merge commit to main branch.
|
||||
# We are only building this for amd64 plateform. (>95% pulls are for amd64)
|
||||
build:
|
||||
# This builds and publishes ghcr.io/coder/coder-preview:main for each commit
|
||||
# to main branch. We are only building this for amd64 platform. (>95% pulls
|
||||
# are for amd64)
|
||||
needs: changes
|
||||
if: github.ref == 'refs/heads/main' && needs.changes.outputs.docs-only == 'false'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
outputs:
|
||||
IMAGE: ghcr.io/coder/coder-preview:${{ steps.build-docker.outputs.tag }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -758,27 +678,51 @@ jobs:
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Linux amd64 Docker image
|
||||
id: build_and_push
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install nfpm
|
||||
run: go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.16.0
|
||||
|
||||
- name: Install zstd
|
||||
run: sudo apt-get install -y zstd
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
go mod download
|
||||
make gen/mark-fresh
|
||||
export DOCKER_IMAGE_NO_PREREQUISITES=true
|
||||
|
||||
version="$(./scripts/version.sh)"
|
||||
make gen/mark-fresh
|
||||
make -j \
|
||||
build/coder_linux_amd64 \
|
||||
build/coder_"$version"_windows_amd64.zip \
|
||||
build/coder_"$version"_linux_amd64.{tar.gz,deb}
|
||||
|
||||
- name: Build and Push Linux amd64 Docker Image
|
||||
id: build-docker
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
version="$(./scripts/version.sh)"
|
||||
tag="main-$(echo "$version" | sed 's/+/-/g')"
|
||||
|
||||
export CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
make -j build/coder_linux_amd64
|
||||
./scripts/build_docker.sh \
|
||||
--arch amd64 \
|
||||
--target ghcr.io/coder/coder-preview:main \
|
||||
--target "ghcr.io/coder/coder-preview:$tag" \
|
||||
--version $version \
|
||||
--push \
|
||||
build/coder_linux_amd64
|
||||
|
||||
# Tag image with new package tag and push
|
||||
tag=$(echo "$version" | sed 's/+/-/g')
|
||||
docker tag ghcr.io/coder/coder-preview:main ghcr.io/coder/coder-preview:main-$tag
|
||||
docker push ghcr.io/coder/coder-preview:main-$tag
|
||||
# Tag as main
|
||||
docker tag "ghcr.io/coder/coder-preview:$tag" ghcr.io/coder/coder-preview:main
|
||||
docker push ghcr.io/coder/coder-preview:main
|
||||
|
||||
# Store the tag in an output variable so we can use it in other jobs
|
||||
echo "tag=$tag" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Prune old images
|
||||
uses: vlaurin/action-ghcr-prune@v0.5.0
|
||||
@@ -790,3 +734,183 @@ jobs:
|
||||
keep-tags-regexes: ^pr
|
||||
prune-tags-regexes: ^main-
|
||||
prune-untagged: true
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coder
|
||||
path: |
|
||||
./build/*.zip
|
||||
./build/*.tar.gz
|
||||
./build/*.deb
|
||||
retention-days: 7
|
||||
|
||||
deploy:
|
||||
name: "deploy"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
needs:
|
||||
- changes
|
||||
- build
|
||||
if: |
|
||||
github.ref == 'refs/heads/main' && !github.event.pull_request.head.repo.fork
|
||||
&& needs.changes.outputs.docs-only == 'false'
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
|
||||
- name: Set up Google Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2
|
||||
|
||||
- name: Set up Flux CLI
|
||||
uses: fluxcd/flux2/action@main
|
||||
with:
|
||||
# Keep this up to date with the version of flux installed in dogfood cluster
|
||||
version: "2.2.1"
|
||||
|
||||
- name: Get Cluster Credentials
|
||||
uses: "google-github-actions/get-gke-credentials@v2"
|
||||
with:
|
||||
cluster_name: dogfood-v2
|
||||
location: us-central1-a
|
||||
project_id: coder-dogfood-v2
|
||||
|
||||
- name: Reconcile Flux
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
flux --namespace flux-system reconcile source git flux-system
|
||||
flux --namespace flux-system reconcile source git coder-main
|
||||
flux --namespace flux-system reconcile kustomization flux-system
|
||||
flux --namespace flux-system reconcile kustomization coder
|
||||
flux --namespace flux-system reconcile source chart coder-coder
|
||||
flux --namespace flux-system reconcile source chart coder-coder-provisioner
|
||||
flux --namespace coder reconcile helmrelease coder
|
||||
flux --namespace coder reconcile helmrelease coder-provisioner
|
||||
|
||||
# Just updating Flux is usually not enough. The Helm release may get
|
||||
# redeployed, but unless something causes the Deployment to update the
|
||||
# pods won't be recreated. It's important that the pods get recreated,
|
||||
# since we use `imagePullPolicy: Always` to ensure we're running the
|
||||
# latest image.
|
||||
- name: Rollout Deployment
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
kubectl --namespace coder rollout restart deployment/coder
|
||||
kubectl --namespace coder rollout status deployment/coder
|
||||
kubectl --namespace coder rollout restart deployment/coder-provisioner
|
||||
kubectl --namespace coder rollout status deployment/coder-provisioner
|
||||
|
||||
deploy-wsproxies:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: github.ref == 'refs/heads/main' && !github.event.pull_request.head.repo.fork
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup flyctl
|
||||
uses: superfly/flyctl-actions/setup-flyctl@master
|
||||
|
||||
- name: Deploy workspace proxies
|
||||
run: |
|
||||
flyctl deploy --image "$IMAGE" --app paris-coder --config ./.github/fly-wsproxies/paris-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_PARIS" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sydney-coder --config ./.github/fly-wsproxies/sydney-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SYDNEY" --yes
|
||||
flyctl deploy --image "$IMAGE" --app sao-paulo-coder --config ./.github/fly-wsproxies/sao-paulo-coder.toml --env "CODER_PROXY_SESSION_TOKEN=$TOKEN_SAO_PAULO" --yes
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
IMAGE: ${{ needs.build.outputs.IMAGE }}
|
||||
TOKEN_PARIS: ${{ secrets.FLY_PARIS_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SYDNEY: ${{ secrets.FLY_SYDNEY_CODER_PROXY_SESSION_TOKEN }}
|
||||
TOKEN_SAO_PAULO: ${{ secrets.FLY_SAO_PAULO_CODER_PROXY_SESSION_TOKEN }}
|
||||
|
||||
deploy-legacy-proxies:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
needs: build
|
||||
if: github.ref == 'refs/heads/main' && !github.event.pull_request.head.repo.fork
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
workload_identity_provider: projects/573722524737/locations/global/workloadIdentityPools/github/providers/github
|
||||
service_account: coder-ci@coder-dogfood.iam.gserviceaccount.com
|
||||
|
||||
- name: Set up Google Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coder
|
||||
path: ./build
|
||||
|
||||
- name: Install Release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
regions=(
|
||||
# gcp-region-id instance-name systemd-service-name
|
||||
"australia-southeast1-b coder-sydney coder-workspace-proxy"
|
||||
"europe-west3-c coder-europe coder-workspace-proxy"
|
||||
"southamerica-east1-b coder-brazil coder-workspace-proxy"
|
||||
)
|
||||
|
||||
deb_pkg=$(find ./build -name "coder_*_linux_amd64.deb" -print -quit)
|
||||
if [ -z "$deb_pkg" ]; then
|
||||
echo "deb package $deb_pkg not found"
|
||||
ls -l ./build
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gcloud config set project coder-dogfood
|
||||
for region in "${regions[@]}"; do
|
||||
echo "::group::$region"
|
||||
set -- $region
|
||||
|
||||
set -x
|
||||
gcloud config set compute/zone "$1"
|
||||
gcloud compute scp "$deb_pkg" "${2}:/tmp/coder.deb"
|
||||
gcloud compute ssh "$2" -- /bin/sh -c "set -eux; sudo dpkg -i --force-confdef /tmp/coder.deb; sudo systemctl daemon-reload; sudo service '$3' restart"
|
||||
set +x
|
||||
|
||||
echo "::endgroup::"
|
||||
done
|
||||
|
||||
# sqlc-vet runs a postgres docker container, runs Coder migrations, and then
|
||||
# runs sqlc-vet to ensure all queries are valid. This catches any mistakes
|
||||
# in migrations or sqlc queries that makes a query unable to be prepared.
|
||||
sqlc-vet:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.db == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
# We need golang to run the migration main.go
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: Setup and run sqlc vet
|
||||
run: |
|
||||
make sqlc-vet
|
||||
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
if: ${{ github.event_name == 'pull_request_target' && success() && !github.event.pull_request.draft }}
|
||||
steps:
|
||||
- name: release-labels
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
# This script ensures PR title and labels are in sync:
|
||||
#
|
||||
|
||||
@@ -5,15 +5,11 @@ on:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "flake.nix"
|
||||
- "flake.lock"
|
||||
- "dogfood/**"
|
||||
- ".github/workflows/dogfood.yaml"
|
||||
# Uncomment these lines when testing with CI.
|
||||
# pull_request:
|
||||
# paths:
|
||||
# - "flake.nix"
|
||||
# - "flake.lock"
|
||||
# - "dogfood/**"
|
||||
# - ".github/workflows/dogfood.yaml"
|
||||
workflow_dispatch:
|
||||
@@ -27,7 +23,7 @@ jobs:
|
||||
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v6.5
|
||||
uses: tj-actions/branch-names@v8
|
||||
|
||||
- name: "Branch name to Docker tag name"
|
||||
id: docker-tag-name
|
||||
@@ -37,13 +33,8 @@ jobs:
|
||||
tag=${tag//\//--}
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@v6
|
||||
|
||||
- name: Run the Magic Nix Cache
|
||||
uses: DeterminateSystems/magic-nix-cache-action@v2
|
||||
|
||||
- run: nix build .#devEnvImage && ./result | docker load
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
@@ -51,10 +42,15 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Tag and Push
|
||||
run: |
|
||||
docker tag codercom/oss-dogfood:latest codercom/oss-dogfood:${{ steps.docker-tag-name.outputs.tag }}
|
||||
docker push codercom/oss-dogfood -a
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: "{{defaultContext}}:dogfood"
|
||||
pull: true
|
||||
push: true
|
||||
tags: "codercom/oss-dogfood:${{ steps.docker-tag-name.outputs.tag }},codercom/oss-dogfood:latest"
|
||||
cache-from: type=registry,ref=codercom/oss-dogfood:latest
|
||||
cache-to: type=inline
|
||||
|
||||
deploy_template:
|
||||
needs: deploy_image
|
||||
|
||||
@@ -9,10 +9,6 @@ on:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: "PR number"
|
||||
type: number
|
||||
required: true
|
||||
experiments:
|
||||
description: "Experiments to enable"
|
||||
required: false
|
||||
@@ -355,6 +351,7 @@ jobs:
|
||||
- name: Install/Upgrade Helm chart
|
||||
run: |
|
||||
set -euo pipefail
|
||||
helm dependency update --skip-refresh ./helm/coder
|
||||
helm upgrade --install "pr${{ env.PR_NUMBER }}" ./helm/coder \
|
||||
--namespace "pr${{ env.PR_NUMBER }}" \
|
||||
--values ./pr-deploy-values.yaml \
|
||||
|
||||
@@ -281,13 +281,13 @@ jobs:
|
||||
CODER_GPG_RELEASE_KEY_BASE64: ${{ secrets.GPG_RELEASE_KEY_BASE64 }}
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
uses: google-github-actions/auth@v1
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_ID_PROVIDER }}
|
||||
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
|
||||
|
||||
- name: Setup GCloud SDK
|
||||
uses: "google-github-actions/setup-gcloud@v1"
|
||||
uses: "google-github-actions/setup-gcloud@v2"
|
||||
|
||||
- name: Publish Helm Chart
|
||||
if: ${{ !inputs.dry_run }}
|
||||
@@ -306,7 +306,7 @@ jobs:
|
||||
|
||||
- name: Upload artifacts to actions (if dry-run)
|
||||
if: ${{ inputs.dry_run }}
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release-artifacts
|
||||
path: |
|
||||
@@ -434,27 +434,26 @@ jobs:
|
||||
|
||||
$release_assets = gh release view --repo coder/coder "v${version}" --json assets | `
|
||||
ConvertFrom-Json
|
||||
# Get the installer URL from the release assets.
|
||||
$installer_url = $release_assets.assets | `
|
||||
# Get the installer URLs from the release assets.
|
||||
$amd64_installer_url = $release_assets.assets | `
|
||||
Where-Object name -Match ".*_windows_amd64_installer.exe$" | `
|
||||
Select -ExpandProperty url
|
||||
$amd64_zip_url = $release_assets.assets | `
|
||||
Where-Object name -Match ".*_windows_amd64.zip$" | `
|
||||
Select -ExpandProperty url
|
||||
$arm64_zip_url = $release_assets.assets | `
|
||||
Where-Object name -Match ".*_windows_arm64.zip$" | `
|
||||
Select -ExpandProperty url
|
||||
|
||||
echo "Installer URL: ${installer_url}"
|
||||
echo "amd64 Installer URL: ${amd64_installer_url}"
|
||||
echo "amd64 zip URL: ${amd64_zip_url}"
|
||||
echo "arm64 zip URL: ${arm64_zip_url}"
|
||||
echo "Package version: ${version}"
|
||||
|
||||
# The URL "|X64" suffix forces the architecture as it cannot be
|
||||
# sniffed properly from the URL. wingetcreate checks both the URL and
|
||||
# binary magic bytes for the architecture and they need to both match,
|
||||
# but they only check for `x64`, `win64` and `_64` in the URL. Our URL
|
||||
# contains `amd64` which doesn't match sadly.
|
||||
#
|
||||
# wingetcreate will still do the binary magic bytes check, so if we
|
||||
# accidentally change the architecture of the installer, it will fail
|
||||
# submission.
|
||||
.\wingetcreate.exe update Coder.Coder `
|
||||
--submit `
|
||||
--version "${version}" `
|
||||
--urls "${installer_url}|X64" `
|
||||
--urls "${amd64_installer_url}" "${amd64_zip_url}" "${arm64_zip_url}" `
|
||||
--token "$env:WINGET_GH_TOKEN"
|
||||
|
||||
env:
|
||||
@@ -481,65 +480,28 @@ jobs:
|
||||
# different repo.
|
||||
GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
|
||||
publish-chocolatey:
|
||||
name: Publish to Chocolatey
|
||||
runs-on: windows-latest
|
||||
# publish-sqlc pushes the latest schema to sqlc cloud.
|
||||
# At present these pushes cannot be tagged, so the last push is always the latest.
|
||||
publish-sqlc:
|
||||
name: "Publish to schema sqlc cloud"
|
||||
runs-on: "ubuntu-latest"
|
||||
needs: release
|
||||
if: ${{ !inputs.dry_run }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-depth: 1
|
||||
|
||||
# Same reason as for release.
|
||||
- name: Fetch git tags
|
||||
run: git fetch --tags --force
|
||||
# We need golang to run the migration main.go
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
# From https://chocolatey.org
|
||||
- name: Install Chocolatey
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: Push schema to sqlc cloud
|
||||
# Don't block a release on this
|
||||
continue-on-error: true
|
||||
run: |
|
||||
Set-ExecutionPolicy Bypass -Scope Process -Force
|
||||
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072
|
||||
|
||||
iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
|
||||
|
||||
- name: Build chocolatey package
|
||||
run: |
|
||||
cd scripts/chocolatey
|
||||
|
||||
# The package version is the same as the tag minus the leading "v".
|
||||
# The version in this output already has the leading "v" removed but
|
||||
# we do it again to be safe.
|
||||
$version = "${{ needs.release.outputs.version }}".Trim('v')
|
||||
|
||||
$release_assets = gh release view --repo coder/coder "v${version}" --json assets | `
|
||||
ConvertFrom-Json
|
||||
|
||||
# Get the URL for the Windows ZIP from the release assets.
|
||||
$zip_url = $release_assets.assets | `
|
||||
Where-Object name -Match ".*_windows_amd64.zip$" | `
|
||||
Select -ExpandProperty url
|
||||
|
||||
echo "ZIP URL: ${zip_url}"
|
||||
echo "Package version: ${version}"
|
||||
|
||||
echo "Downloading ZIP..."
|
||||
Invoke-WebRequest $zip_url -OutFile assets.zip
|
||||
|
||||
echo "Extracting ZIP..."
|
||||
Expand-Archive assets.zip -DestinationPath assets/
|
||||
|
||||
# No need to specify nuspec if there's only one in the directory.
|
||||
choco pack --version=$version binary_path=assets/coder.exe
|
||||
|
||||
choco apikey --api-key $env:CHOCO_API_KEY --source https://push.chocolatey.org/
|
||||
|
||||
# No need to specify nupkg if there's only one in the directory.
|
||||
choco push --source https://push.chocolatey.org/
|
||||
|
||||
env:
|
||||
CHOCO_API_KEY: ${{ secrets.CHOCO_API_KEY }}
|
||||
# We need a GitHub token for the gh CLI to function under GitHub Actions
|
||||
GH_TOKEN: ${{ secrets.CDRCI_GITHUB_TOKEN }}
|
||||
make sqlc-push
|
||||
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: go, javascript
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
rm Makefile
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
||||
- name: Send Slack notification on failure
|
||||
if: ${{ failure() }}
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
image_name: ${{ steps.build.outputs.image }}
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@fbd16365eb88e12433951383f5e99bd901fc618f
|
||||
uses: aquasecurity/trivy-action@91713af97dc80187565512baba96e4364e983601
|
||||
with:
|
||||
image-ref: ${{ steps.build.outputs.image }}
|
||||
format: sarif
|
||||
@@ -130,13 +130,13 @@ jobs:
|
||||
severity: "CRITICAL,HIGH"
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: trivy-results.sarif
|
||||
category: "Trivy"
|
||||
|
||||
- name: Upload Trivy scan results as an artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: trivy
|
||||
path: trivy-results.sarif
|
||||
|
||||
@@ -13,7 +13,7 @@ jobs:
|
||||
actions: write
|
||||
steps:
|
||||
- name: stale
|
||||
uses: actions/stale@v8.0.0
|
||||
uses: actions/stale@v9.0.0
|
||||
with:
|
||||
stale-issue-label: "stale"
|
||||
stale-pr-label: "stale"
|
||||
@@ -30,6 +30,52 @@ jobs:
|
||||
operations-per-run: 60
|
||||
# Start with the oldest issues, always.
|
||||
ascending: true
|
||||
- name: "Close old issues labeled likely-no"
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const thirtyDaysAgo = new Date(new Date().setDate(new Date().getDate() - 30));
|
||||
console.log(`Looking for issues labeled with 'likely-no' more than 30 days ago, which is after ${thirtyDaysAgo.toISOString()}`);
|
||||
|
||||
const issues = await github.rest.issues.listForRepo({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
labels: 'likely-no',
|
||||
state: 'open',
|
||||
});
|
||||
|
||||
console.log(`Found ${issues.data.length} open issues labeled with 'likely-no'`);
|
||||
|
||||
for (const issue of issues.data) {
|
||||
console.log(`Checking issue #${issue.number} created at ${issue.created_at}`);
|
||||
|
||||
const timeline = await github.rest.issues.listEventsForTimeline({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issue.number,
|
||||
});
|
||||
|
||||
const labelEvent = timeline.data.find(event => event.event === 'labeled' && event.label.name === 'likely-no');
|
||||
|
||||
if (labelEvent) {
|
||||
console.log(`Issue #${issue.number} was labeled with 'likely-no' at ${labelEvent.created_at}`);
|
||||
|
||||
if (new Date(labelEvent.created_at) < thirtyDaysAgo) {
|
||||
console.log(`Issue #${issue.number} is older than 30 days with 'likely-no' label, closing issue.`);
|
||||
await github.rest.issues.update({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issue.number,
|
||||
state: 'closed',
|
||||
state_reason: 'not planned'
|
||||
});
|
||||
}
|
||||
} else {
|
||||
console.log(`Issue #${issue.number} does not have a 'likely-no' label event in its timeline.`);
|
||||
}
|
||||
}
|
||||
|
||||
branches:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -52,8 +98,8 @@ jobs:
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
retain_days: 1
|
||||
keep_minimum_runs: 1
|
||||
retain_days: 30
|
||||
keep_minimum_runs: 30
|
||||
delete_workflow_pattern: pr-cleanup.yaml
|
||||
|
||||
- name: Delete PR Deploy workflow skipped runs
|
||||
@@ -61,7 +107,6 @@ jobs:
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
retain_days: 0
|
||||
keep_minimum_runs: 0
|
||||
delete_run_by_conclusion_pattern: skipped
|
||||
retain_days: 30
|
||||
keep_minimum_runs: 30
|
||||
delete_workflow_pattern: pr-deploy.yaml
|
||||
|
||||
@@ -14,6 +14,7 @@ darcula = "darcula"
|
||||
Hashi = "Hashi"
|
||||
trialer = "trialer"
|
||||
encrypter = "encrypter"
|
||||
hel = "hel" # as in helsinki
|
||||
|
||||
[files]
|
||||
extend-exclude = [
|
||||
@@ -29,4 +30,5 @@ extend-exclude = [
|
||||
"**/*_test.go",
|
||||
"**/*.test.tsx",
|
||||
"**/pnpm-lock.yaml",
|
||||
"tailnet/testdata/**",
|
||||
]
|
||||
|
||||
@@ -20,7 +20,6 @@ yarn-error.log
|
||||
|
||||
# Front-end ignore patterns.
|
||||
.next/
|
||||
site/**/*.typegen.ts
|
||||
site/build-storybook.log
|
||||
site/coverage/
|
||||
site/storybook-static/
|
||||
|
||||
+2
-1
@@ -23,7 +23,6 @@ yarn-error.log
|
||||
|
||||
# Front-end ignore patterns.
|
||||
.next/
|
||||
site/**/*.typegen.ts
|
||||
site/build-storybook.log
|
||||
site/coverage/
|
||||
site/storybook-static/
|
||||
@@ -83,6 +82,8 @@ helm/**/templates/*.yaml
|
||||
|
||||
# Testdata shouldn't be formatted.
|
||||
scripts/apitypings/testdata/**/*.ts
|
||||
enterprise/tailnet/testdata/*.golden.html
|
||||
tailnet/testdata/*.golden.html
|
||||
|
||||
# Generated files shouldn't be formatted.
|
||||
site/e2e/provisionerGenerated.ts
|
||||
|
||||
@@ -8,6 +8,8 @@ helm/**/templates/*.yaml
|
||||
|
||||
# Testdata shouldn't be formatted.
|
||||
scripts/apitypings/testdata/**/*.ts
|
||||
enterprise/tailnet/testdata/*.golden.html
|
||||
tailnet/testdata/*.golden.html
|
||||
|
||||
# Generated files shouldn't be formatted.
|
||||
site/e2e/provisionerGenerated.ts
|
||||
|
||||
Vendored
+3
-4
@@ -18,9 +18,10 @@
|
||||
"coderdenttest",
|
||||
"coderdtest",
|
||||
"codersdk",
|
||||
"contravariance",
|
||||
"cronstrue",
|
||||
"databasefake",
|
||||
"dbfake",
|
||||
"dbmem",
|
||||
"dbgen",
|
||||
"dbtype",
|
||||
"DERP",
|
||||
@@ -170,7 +171,7 @@
|
||||
"wsconncache",
|
||||
"wsjson",
|
||||
"xerrors",
|
||||
"xstate",
|
||||
"xlarge",
|
||||
"yamux"
|
||||
],
|
||||
"cSpell.ignorePaths": ["site/package.json", ".vscode/settings.json"],
|
||||
@@ -206,8 +207,6 @@
|
||||
"files.insertFinalNewline": true,
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintFlags": ["--fast"],
|
||||
"go.lintOnSave": "package",
|
||||
"go.coverOnSave": true,
|
||||
"go.coverageDecorator": {
|
||||
"type": "gutter",
|
||||
"coveredGutterStyle": "blockgreen",
|
||||
|
||||
@@ -50,7 +50,7 @@ endif
|
||||
# Note, all find statements should be written with `.` or `./path` as
|
||||
# the search path so that these exclusions match.
|
||||
FIND_EXCLUSIONS= \
|
||||
-not \( \( -path '*/.git/*' -o -path './build/*' -o -path './vendor/*' -o -path './.coderv2/*' -o -path '*/node_modules/*' -o -path '*/out/*' -o -path './coderd/apidoc/*' -o -path '*/.next/*' \) -prune \)
|
||||
-not \( \( -path '*/.git/*' -o -path './build/*' -o -path './vendor/*' -o -path './.coderv2/*' -o -path '*/node_modules/*' -o -path '*/out/*' -o -path './coderd/apidoc/*' -o -path '*/.next/*' -o -path '*/.terraform/*' \) -prune \)
|
||||
# Source files used for make targets, evaluated on use.
|
||||
GO_SRC_FILES := $(shell find . $(FIND_EXCLUSIONS) -type f -name '*.go' -not -name '*_test.go')
|
||||
# All the shell files in the repo, excluding ignored files.
|
||||
@@ -428,7 +428,8 @@ lint/ts:
|
||||
|
||||
lint/go:
|
||||
./scripts/check_enterprise_imports.sh
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2
|
||||
linter_ver=$(shell egrep -o 'GOLANGCI_LINT_VERSION=\S+' dogfood/Dockerfile | cut -d '=' -f 2)
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$$linter_ver
|
||||
golangci-lint run
|
||||
.PHONY: lint/go
|
||||
|
||||
@@ -448,13 +449,15 @@ lint/helm:
|
||||
DB_GEN_FILES := \
|
||||
coderd/database/querier.go \
|
||||
coderd/database/unique_constraint.go \
|
||||
coderd/database/dbfake/dbfake.go \
|
||||
coderd/database/dbmem/dbmem.go \
|
||||
coderd/database/dbmetrics/dbmetrics.go \
|
||||
coderd/database/dbauthz/dbauthz.go \
|
||||
coderd/database/dbmock/dbmock.go
|
||||
|
||||
# all gen targets should be added here and to gen/mark-fresh
|
||||
gen: \
|
||||
tailnet/proto/tailnet.pb.go \
|
||||
agent/proto/agent.pb.go \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
coderd/database/dump.sql \
|
||||
@@ -479,6 +482,8 @@ gen: \
|
||||
# used during releases so we don't run generation scripts.
|
||||
gen/mark-fresh:
|
||||
files="\
|
||||
tailnet/proto/tailnet.pb.go \
|
||||
agent/proto/agent.pb.go \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
coderd/database/dump.sql \
|
||||
@@ -524,6 +529,22 @@ coderd/database/querier.go: coderd/database/sqlc.yaml coderd/database/dump.sql $
|
||||
coderd/database/dbmock/dbmock.go: coderd/database/db.go coderd/database/querier.go
|
||||
go generate ./coderd/database/dbmock/
|
||||
|
||||
tailnet/proto/tailnet.pb.go: tailnet/proto/tailnet.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--go-drpc_out=. \
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./tailnet/proto/tailnet.proto
|
||||
|
||||
agent/proto/agent.pb.go: agent/proto/agent.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
--go_opt=paths=source_relative \
|
||||
--go-drpc_out=. \
|
||||
--go-drpc_opt=paths=source_relative \
|
||||
./agent/proto/agent.proto
|
||||
|
||||
provisionersdk/proto/provisioner.pb.go: provisionersdk/proto/provisioner.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
@@ -567,7 +588,7 @@ docs/cli.md: scripts/clidocgen/main.go examples/examples.gen.json $(GO_SRC_FILES
|
||||
CI=true BASE_PATH="." go run ./scripts/clidocgen
|
||||
pnpm run format:write:only ./docs/cli.md ./docs/cli/*.md ./docs/manifest.json
|
||||
|
||||
docs/admin/audit-logs.md: scripts/auditdocgen/main.go enterprise/audit/table.go coderd/rbac/object_gen.go
|
||||
docs/admin/audit-logs.md: coderd/database/querier.go scripts/auditdocgen/main.go enterprise/audit/table.go coderd/rbac/object_gen.go
|
||||
go run scripts/auditdocgen/main.go
|
||||
pnpm run format:write:only ./docs/admin/audit-logs.md
|
||||
|
||||
@@ -575,7 +596,16 @@ coderd/apidoc/swagger.json: $(shell find ./scripts/apidocgen $(FIND_EXCLUSIONS)
|
||||
./scripts/apidocgen/generate.sh
|
||||
pnpm run format:write:only ./docs/api ./docs/manifest.json ./coderd/apidoc/swagger.json
|
||||
|
||||
update-golden-files: cli/testdata/.gen-golden helm/coder/tests/testdata/.gen-golden helm/provisioner/tests/testdata/.gen-golden scripts/ci-report/testdata/.gen-golden enterprise/cli/testdata/.gen-golden coderd/.gen-golden provisioner/terraform/testdata/.gen-golden
|
||||
update-golden-files: \
|
||||
cli/testdata/.gen-golden \
|
||||
helm/coder/tests/testdata/.gen-golden \
|
||||
helm/provisioner/tests/testdata/.gen-golden \
|
||||
scripts/ci-report/testdata/.gen-golden \
|
||||
enterprise/cli/testdata/.gen-golden \
|
||||
enterprise/tailnet/testdata/.gen-golden \
|
||||
tailnet/testdata/.gen-golden \
|
||||
coderd/.gen-golden \
|
||||
provisioner/terraform/testdata/.gen-golden
|
||||
.PHONY: update-golden-files
|
||||
|
||||
cli/testdata/.gen-golden: $(wildcard cli/testdata/*.golden) $(wildcard cli/*.tpl) $(GO_SRC_FILES) $(wildcard cli/*_test.go)
|
||||
@@ -586,6 +616,14 @@ enterprise/cli/testdata/.gen-golden: $(wildcard enterprise/cli/testdata/*.golden
|
||||
go test ./enterprise/cli -run="TestEnterpriseCommandHelp" -update
|
||||
touch "$@"
|
||||
|
||||
tailnet/testdata/.gen-golden: $(wildcard tailnet/testdata/*.golden.html) $(GO_SRC_FILES) $(wildcard tailnet/*_test.go)
|
||||
go test ./tailnet -run="TestDebugTemplate" -update
|
||||
touch "$@"
|
||||
|
||||
enterprise/tailnet/testdata/.gen-golden: $(wildcard enterprise/tailnet/testdata/*.golden.html) $(GO_SRC_FILES) $(wildcard enterprise/tailnet/*_test.go)
|
||||
go test ./enterprise/tailnet -run="TestDebugTemplate" -update
|
||||
touch "$@"
|
||||
|
||||
helm/coder/tests/testdata/.gen-golden: $(wildcard helm/coder/tests/testdata/*.yaml) $(wildcard helm/coder/tests/testdata/*.golden) $(GO_SRC_FILES) $(wildcard helm/coder/tests/*_test.go)
|
||||
go test ./helm/coder/tests -run=TestUpdateGoldenFiles -update
|
||||
touch "$@"
|
||||
@@ -670,6 +708,33 @@ test:
|
||||
gotestsum --format standard-quiet -- -v -short -count=1 ./...
|
||||
.PHONY: test
|
||||
|
||||
# sqlc-cloud-is-setup will fail if no SQLc auth token is set. Use this as a
|
||||
# dependency for any sqlc-cloud related targets.
|
||||
sqlc-cloud-is-setup:
|
||||
if [[ "$(SQLC_AUTH_TOKEN)" == "" ]]; then
|
||||
echo "ERROR: 'SQLC_AUTH_TOKEN' must be set to auth with sqlc cloud before running verify." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
.PHONY: sqlc-cloud-is-setup
|
||||
|
||||
sqlc-push: sqlc-cloud-is-setup test-postgres-docker
|
||||
echo "--- sqlc push"
|
||||
SQLC_DATABASE_URL="postgresql://postgres:postgres@localhost:5432/$(shell go run scripts/migrate-ci/main.go)" \
|
||||
sqlc push -f coderd/database/sqlc.yaml && echo "Passed sqlc push"
|
||||
.PHONY: sqlc-push
|
||||
|
||||
sqlc-verify: sqlc-cloud-is-setup test-postgres-docker
|
||||
echo "--- sqlc verify"
|
||||
SQLC_DATABASE_URL="postgresql://postgres:postgres@localhost:5432/$(shell go run scripts/migrate-ci/main.go)" \
|
||||
sqlc verify -f coderd/database/sqlc.yaml && echo "Passed sqlc verify"
|
||||
.PHONY: sqlc-verify
|
||||
|
||||
sqlc-vet: test-postgres-docker
|
||||
echo "--- sqlc vet"
|
||||
SQLC_DATABASE_URL="postgresql://postgres:postgres@localhost:5432/$(shell go run scripts/migrate-ci/main.go)" \
|
||||
sqlc vet -f coderd/database/sqlc.yaml && echo "Passed sqlc vet"
|
||||
.PHONY: sqlc-vet
|
||||
|
||||
# When updating -timeout for this test, keep in sync with
|
||||
# test-go-postgres (.github/workflows/coder.yaml).
|
||||
# Do add coverage flags so that test caching works.
|
||||
|
||||
@@ -70,7 +70,7 @@ curl -L https://coder.com/install.sh | sh
|
||||
|
||||
You can run the install script with `--dry-run` to see the commands that will be used to install without executing them. You can modify the installation process by including flags. Run the install script with `--help` for reference.
|
||||
|
||||
> See [install](docs/install) for additional methods.
|
||||
> See [install](https://coder.com/docs/v2/latest/install) for additional methods.
|
||||
|
||||
Once installed, you can start a production deployment<sup>1</sup> with a single command:
|
||||
|
||||
|
||||
+36
-14
@@ -35,6 +35,8 @@ import (
|
||||
"tailscale.com/types/netlogtype"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/retry"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentproc"
|
||||
"github.com/coder/coder/v2/agent/agentscripts"
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
@@ -45,7 +47,6 @@ import (
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/retry"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -68,6 +69,7 @@ type Options struct {
|
||||
EnvironmentVariables map[string]string
|
||||
Logger slog.Logger
|
||||
IgnorePorts map[int]string
|
||||
PortCacheDuration time.Duration
|
||||
SSHMaxTimeout time.Duration
|
||||
TailnetListenPort uint16
|
||||
Subsystems []codersdk.AgentSubsystem
|
||||
@@ -126,6 +128,9 @@ func New(options Options) Agent {
|
||||
if options.ServiceBannerRefreshInterval == 0 {
|
||||
options.ServiceBannerRefreshInterval = 2 * time.Minute
|
||||
}
|
||||
if options.PortCacheDuration == 0 {
|
||||
options.PortCacheDuration = 1 * time.Second
|
||||
}
|
||||
|
||||
prometheusRegistry := options.PrometheusRegistry
|
||||
if prometheusRegistry == nil {
|
||||
@@ -153,6 +158,7 @@ func New(options Options) Agent {
|
||||
lifecycleReported: make(chan codersdk.WorkspaceAgentLifecycle, 1),
|
||||
lifecycleStates: []agentsdk.PostLifecycleRequest{{State: codersdk.WorkspaceAgentLifecycleCreated}},
|
||||
ignorePorts: options.IgnorePorts,
|
||||
portCacheDuration: options.PortCacheDuration,
|
||||
connStatsChan: make(chan *agentsdk.Stats, 1),
|
||||
reportMetadataInterval: options.ReportMetadataInterval,
|
||||
serviceBannerRefreshInterval: options.ServiceBannerRefreshInterval,
|
||||
@@ -181,8 +187,9 @@ type agent struct {
|
||||
// ignorePorts tells the api handler which ports to ignore when
|
||||
// listing all listening ports. This is helpful to hide ports that
|
||||
// are used by the agent, that the user does not care about.
|
||||
ignorePorts map[int]string
|
||||
subsystems []codersdk.AgentSubsystem
|
||||
ignorePorts map[int]string
|
||||
portCacheDuration time.Duration
|
||||
subsystems []codersdk.AgentSubsystem
|
||||
|
||||
reconnectingPTYs sync.Map
|
||||
reconnectingPTYTimeout time.Duration
|
||||
@@ -216,8 +223,10 @@ type agent struct {
|
||||
connCountReconnectingPTY atomic.Int64
|
||||
|
||||
prometheusRegistry *prometheus.Registry
|
||||
metrics *agentMetrics
|
||||
syscaller agentproc.Syscaller
|
||||
// metrics are prometheus registered metrics that will be collected and
|
||||
// labeled in Coder with the agent + workspace.
|
||||
metrics *agentMetrics
|
||||
syscaller agentproc.Syscaller
|
||||
|
||||
// modifiedProcs is used for testing process priority management.
|
||||
modifiedProcs chan []*agentproc.Process
|
||||
@@ -246,6 +255,9 @@ func (a *agent) init(ctx context.Context) {
|
||||
Filesystem: a.filesystem,
|
||||
PatchLogs: a.client.PatchLogs,
|
||||
})
|
||||
// Register runner metrics. If the prom registry is nil, the metrics
|
||||
// will not report anywhere.
|
||||
a.scriptRunner.RegisterMetrics(a.prometheusRegistry)
|
||||
go a.runLoop(ctx)
|
||||
}
|
||||
|
||||
@@ -536,6 +548,14 @@ func (a *agent) reportMetadataLoop(ctx context.Context) {
|
||||
continue
|
||||
case <-report:
|
||||
if len(updatedMetadata) > 0 {
|
||||
select {
|
||||
case <-reportSemaphore:
|
||||
default:
|
||||
// If there's already a report in flight, don't send
|
||||
// another one, wait for next tick instead.
|
||||
continue
|
||||
}
|
||||
|
||||
metadata := make([]agentsdk.Metadata, 0, len(updatedMetadata))
|
||||
for key, result := range updatedMetadata {
|
||||
metadata = append(metadata, agentsdk.Metadata{
|
||||
@@ -545,14 +565,6 @@ func (a *agent) reportMetadataLoop(ctx context.Context) {
|
||||
delete(updatedMetadata, key)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-reportSemaphore:
|
||||
default:
|
||||
// If there's already a report in flight, don't send
|
||||
// another one, wait for next tick instead.
|
||||
continue
|
||||
}
|
||||
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, reportTimeout)
|
||||
defer func() {
|
||||
@@ -739,11 +751,14 @@ func (a *agent) run(ctx context.Context) error {
|
||||
return xerrors.Errorf("init script runner: %w", err)
|
||||
}
|
||||
err = a.trackConnGoroutine(func() {
|
||||
start := time.Now()
|
||||
err := a.scriptRunner.Execute(ctx, func(script codersdk.WorkspaceAgentScript) bool {
|
||||
return script.RunOnStart
|
||||
})
|
||||
// Measure the time immediately after the script has finished
|
||||
dur := time.Since(start).Seconds()
|
||||
if err != nil {
|
||||
a.logger.Warn(ctx, "startup script failed", slog.Error(err))
|
||||
a.logger.Warn(ctx, "startup script(s) failed", slog.Error(err))
|
||||
if errors.Is(err, agentscripts.ErrTimeout) {
|
||||
a.setLifecycle(ctx, codersdk.WorkspaceAgentLifecycleStartTimeout)
|
||||
} else {
|
||||
@@ -752,6 +767,12 @@ func (a *agent) run(ctx context.Context) error {
|
||||
} else {
|
||||
a.setLifecycle(ctx, codersdk.WorkspaceAgentLifecycleReady)
|
||||
}
|
||||
|
||||
label := "false"
|
||||
if err == nil {
|
||||
label = "true"
|
||||
}
|
||||
a.metrics.startupScriptSeconds.WithLabelValues(label).Set(dur)
|
||||
a.scriptRunner.StartCron()
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1465,6 +1486,7 @@ func (a *agent) Close() error {
|
||||
return script.RunOnStop
|
||||
})
|
||||
if err != nil {
|
||||
a.logger.Warn(ctx, "shutdown script(s) failed", slog.Error(err))
|
||||
if errors.Is(err, agentscripts.ErrTimeout) {
|
||||
lifecycleState = codersdk.WorkspaceAgentLifecycleShutdownTimeout
|
||||
} else {
|
||||
|
||||
+266
-319
@@ -1,11 +1,13 @@
|
||||
package agent_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -17,7 +19,6 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -25,7 +26,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
scp "github.com/bramvdbogaerde/go-scp"
|
||||
"github.com/bramvdbogaerde/go-scp"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pion/udp"
|
||||
@@ -45,6 +46,7 @@ import (
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/sloghuman"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/agent/agentproc"
|
||||
"github.com/coder/coder/v2/agent/agentproc/agentproctest"
|
||||
@@ -52,7 +54,6 @@ import (
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/pty"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
"github.com/coder/coder/v2/tailnet/tailnettest"
|
||||
@@ -153,7 +154,7 @@ func TestAgent_Stats_Magic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, strings.TrimSpace(string(output)))
|
||||
})
|
||||
t.Run("Tracks", func(t *testing.T) {
|
||||
t.Run("TracksVSCode", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "window" {
|
||||
t.Skip("Sleeping for infinity doesn't work on Windows")
|
||||
@@ -192,6 +193,77 @@ func TestAgent_Stats_Magic(t *testing.T) {
|
||||
err = session.Wait()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("TracksJetBrains", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("JetBrains tracking is only supported on Linux")
|
||||
}
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
// JetBrains tracking works by looking at the process name listening on the
|
||||
// forwarded port. If the process's command line includes the magic string
|
||||
// we are looking for, then we assume it is a JetBrains editor. So when we
|
||||
// connect to the port we must ensure the process includes that magic string
|
||||
// to fool the agent into thinking this is JetBrains. To do this we need to
|
||||
// spawn an external process (in this case a simple echo server) so we can
|
||||
// control the process name. The -D here is just to mimic how Java options
|
||||
// are set but is not necessary as the agent looks only for the magic
|
||||
// string itself anywhere in the command.
|
||||
_, b, _, ok := runtime.Caller(0)
|
||||
require.True(t, ok)
|
||||
dir := filepath.Join(filepath.Dir(b), "../scripts/echoserver/main.go")
|
||||
echoServerCmd := exec.Command("go", "run", dir,
|
||||
"-D", agentssh.MagicProcessCmdlineJetBrains)
|
||||
stdout, err := echoServerCmd.StdoutPipe()
|
||||
require.NoError(t, err)
|
||||
err = echoServerCmd.Start()
|
||||
require.NoError(t, err)
|
||||
defer echoServerCmd.Process.Kill()
|
||||
|
||||
// The echo server prints its port as the first line.
|
||||
sc := bufio.NewScanner(stdout)
|
||||
sc.Scan()
|
||||
remotePort := sc.Text()
|
||||
|
||||
//nolint:dogsled
|
||||
conn, _, stats, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
|
||||
sshClient, err := conn.SSHClient(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
tunneledConn, err := sshClient.Dial("tcp", fmt.Sprintf("127.0.0.1:%s", remotePort))
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
// always close on failure of test
|
||||
_ = conn.Close()
|
||||
_ = tunneledConn.Close()
|
||||
})
|
||||
|
||||
var s *agentsdk.Stats
|
||||
require.Eventuallyf(t, func() bool {
|
||||
var ok bool
|
||||
s, ok = <-stats
|
||||
return ok && s.ConnectionCount > 0 &&
|
||||
s.SessionCountJetBrains == 1
|
||||
}, testutil.WaitLong, testutil.IntervalFast,
|
||||
"never saw stats with conn open: %+v", s,
|
||||
)
|
||||
|
||||
// Kill the server and connection after checking for the echo.
|
||||
requireEcho(t, tunneledConn)
|
||||
_ = echoServerCmd.Process.Kill()
|
||||
_ = tunneledConn.Close()
|
||||
|
||||
require.Eventuallyf(t, func() bool {
|
||||
var ok bool
|
||||
s, ok = <-stats
|
||||
return ok && s.ConnectionCount == 0 &&
|
||||
s.SessionCountJetBrains == 0
|
||||
}, testutil.WaitLong, testutil.IntervalFast,
|
||||
"never saw stats after conn closes: %+v", s,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAgent_SessionExec(t *testing.T) {
|
||||
@@ -350,8 +422,13 @@ func TestAgent_Session_TTY_MOTD(t *testing.T) {
|
||||
unexpected: []string{},
|
||||
},
|
||||
{
|
||||
name: "Trim",
|
||||
manifest: agentsdk.Manifest{},
|
||||
name: "Trim",
|
||||
// Enable motd since it will be printed after the banner,
|
||||
// this ensures that we can test for an exact mount of
|
||||
// newlines.
|
||||
manifest: agentsdk.Manifest{
|
||||
MOTDFile: name,
|
||||
},
|
||||
banner: codersdk.ServiceBannerConfig{
|
||||
Enabled: true,
|
||||
Message: "\n\n\n\n\n\nbanner\n\n\n\n\n\n",
|
||||
@@ -375,6 +452,7 @@ func TestAgent_Session_TTY_MOTD(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:tparallel // Sub tests need to run sequentially.
|
||||
func TestAgent_Session_TTY_MOTD_Update(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "windows" {
|
||||
@@ -434,33 +512,38 @@ func TestAgent_Session_TTY_MOTD_Update(t *testing.T) {
|
||||
}
|
||||
//nolint:dogsled // Allow the blank identifiers.
|
||||
conn, client, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, setSBInterval)
|
||||
for _, test := range tests {
|
||||
|
||||
sshClient, err := conn.SSHClient(ctx)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = sshClient.Close()
|
||||
})
|
||||
|
||||
//nolint:paralleltest // These tests need to swap the banner func.
|
||||
for i, test := range tests {
|
||||
test := test
|
||||
// Set new banner func and wait for the agent to call it to update the
|
||||
// banner.
|
||||
ready := make(chan struct{}, 2)
|
||||
client.SetServiceBannerFunc(func() (codersdk.ServiceBannerConfig, error) {
|
||||
select {
|
||||
case ready <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return test.banner, nil
|
||||
})
|
||||
<-ready
|
||||
<-ready // Wait for two updates to ensure the value has propagated.
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
// Set new banner func and wait for the agent to call it to update the
|
||||
// banner.
|
||||
ready := make(chan struct{}, 2)
|
||||
client.SetServiceBannerFunc(func() (codersdk.ServiceBannerConfig, error) {
|
||||
select {
|
||||
case ready <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return test.banner, nil
|
||||
})
|
||||
<-ready
|
||||
<-ready // Wait for two updates to ensure the value has propagated.
|
||||
|
||||
sshClient, err := conn.SSHClient(ctx)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = sshClient.Close()
|
||||
})
|
||||
session, err := sshClient.NewSession()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = session.Close()
|
||||
})
|
||||
session, err := sshClient.NewSession()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = session.Close()
|
||||
})
|
||||
|
||||
testSessionOutput(t, session, test.expected, test.unexpected, nil)
|
||||
testSessionOutput(t, session, test.expected, test.unexpected, nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -637,150 +720,57 @@ func TestAgent_Session_TTY_HugeOutputIsNotLost(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:paralleltest // This test reserves a port.
|
||||
func TestAgent_TCPLocalForwarding(t *testing.T) {
|
||||
random, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
_ = random.Close()
|
||||
tcpAddr, valid := random.Addr().(*net.TCPAddr)
|
||||
require.True(t, valid)
|
||||
randomPort := tcpAddr.Port
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
local, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
rl, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer local.Close()
|
||||
tcpAddr, valid = local.Addr().(*net.TCPAddr)
|
||||
defer rl.Close()
|
||||
tcpAddr, valid := rl.Addr().(*net.TCPAddr)
|
||||
require.True(t, valid)
|
||||
remotePort := tcpAddr.Port
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
conn, err := local.Accept()
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
_, err = conn.Write(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
}()
|
||||
go echoOnce(t, rl)
|
||||
|
||||
_, proc := setupSSHCommand(t, []string{"-L", fmt.Sprintf("%d:127.0.0.1:%d", randomPort, remotePort)}, []string{"sleep", "5"})
|
||||
sshClient := setupAgentSSHClient(ctx, t)
|
||||
|
||||
go func() {
|
||||
err := proc.Wait()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
conn, err := net.Dial("tcp", "127.0.0.1:"+strconv.Itoa(randomPort))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
_, err = conn.Write([]byte("test"))
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
if !assert.Equal(t, "test", string(b)) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}, testutil.WaitLong, testutil.IntervalSlow)
|
||||
|
||||
<-done
|
||||
|
||||
_ = proc.Kill()
|
||||
conn, err := sshClient.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", remotePort))
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
requireEcho(t, conn)
|
||||
}
|
||||
|
||||
//nolint:paralleltest // This test reserves a port.
|
||||
func TestAgent_TCPRemoteForwarding(t *testing.T) {
|
||||
random, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
_ = random.Close()
|
||||
tcpAddr, valid := random.Addr().(*net.TCPAddr)
|
||||
require.True(t, valid)
|
||||
randomPort := tcpAddr.Port
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
sshClient := setupAgentSSHClient(ctx, t)
|
||||
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer l.Close()
|
||||
tcpAddr, valid = l.Addr().(*net.TCPAddr)
|
||||
require.True(t, valid)
|
||||
localPort := tcpAddr.Port
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
conn, err := l.Accept()
|
||||
localhost := netip.MustParseAddr("127.0.0.1")
|
||||
var randomPort uint16
|
||||
var ll net.Listener
|
||||
var err error
|
||||
for {
|
||||
randomPort = pickRandomPort()
|
||||
addr := net.TCPAddrFromAddrPort(netip.AddrPortFrom(localhost, randomPort))
|
||||
ll, err = sshClient.ListenTCP(addr)
|
||||
if err != nil {
|
||||
return
|
||||
t.Logf("error remote forwarding: %s", err.Error())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timed out getting random listener")
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
defer conn.Close()
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
_, err = conn.Write(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
}()
|
||||
break
|
||||
}
|
||||
defer ll.Close()
|
||||
go echoOnce(t, ll)
|
||||
|
||||
_, proc := setupSSHCommand(t, []string{"-R", fmt.Sprintf("127.0.0.1:%d:127.0.0.1:%d", randomPort, localPort)}, []string{"sleep", "5"})
|
||||
|
||||
go func() {
|
||||
err := proc.Wait()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", randomPort))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
_, err = conn.Write([]byte("test"))
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
if !assert.Equal(t, "test", string(b)) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}, testutil.WaitLong, testutil.IntervalSlow)
|
||||
|
||||
<-done
|
||||
|
||||
_ = proc.Kill()
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", randomPort))
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
requireEcho(t, conn)
|
||||
}
|
||||
|
||||
func TestAgent_UnixLocalForwarding(t *testing.T) {
|
||||
@@ -788,52 +778,18 @@ func TestAgent_UnixLocalForwarding(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("unix domain sockets are not fully supported on Windows")
|
||||
}
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
tmpdir := tempDirUnixSocket(t)
|
||||
remoteSocketPath := filepath.Join(tmpdir, "remote-socket")
|
||||
localSocketPath := filepath.Join(tmpdir, "local-socket")
|
||||
|
||||
l, err := net.Listen("unix", remoteSocketPath)
|
||||
require.NoError(t, err)
|
||||
defer l.Close()
|
||||
go echoOnce(t, l)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
sshClient := setupAgentSSHClient(ctx, t)
|
||||
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
_, err = conn.Write(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
_, proc := setupSSHCommand(t, []string{"-L", fmt.Sprintf("%s:%s", localSocketPath, remoteSocketPath)}, []string{"sleep", "5"})
|
||||
|
||||
go func() {
|
||||
err := proc.Wait()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
_, err := os.Stat(localSocketPath)
|
||||
return err == nil
|
||||
}, testutil.WaitLong, testutil.IntervalFast)
|
||||
|
||||
conn, err := net.Dial("unix", localSocketPath)
|
||||
conn, err := sshClient.Dial("unix", remoteSocketPath)
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
_, err = conn.Write([]byte("test"))
|
||||
@@ -843,9 +799,6 @@ func TestAgent_UnixLocalForwarding(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test", string(b))
|
||||
_ = conn.Close()
|
||||
<-done
|
||||
|
||||
_ = proc.Kill()
|
||||
}
|
||||
|
||||
func TestAgent_UnixRemoteForwarding(t *testing.T) {
|
||||
@@ -856,66 +809,19 @@ func TestAgent_UnixRemoteForwarding(t *testing.T) {
|
||||
|
||||
tmpdir := tempDirUnixSocket(t)
|
||||
remoteSocketPath := filepath.Join(tmpdir, "remote-socket")
|
||||
localSocketPath := filepath.Join(tmpdir, "local-socket")
|
||||
|
||||
l, err := net.Listen("unix", localSocketPath)
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
sshClient := setupAgentSSHClient(ctx, t)
|
||||
|
||||
l, err := sshClient.ListenUnix(remoteSocketPath)
|
||||
require.NoError(t, err)
|
||||
defer l.Close()
|
||||
go echoOnce(t, l)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
_, err = conn.Write(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
_, proc := setupSSHCommand(t, []string{"-R", fmt.Sprintf("%s:%s", remoteSocketPath, localSocketPath)}, []string{"sleep", "5"})
|
||||
|
||||
go func() {
|
||||
err := proc.Wait()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// It's possible that the socket is created but the server is not ready to
|
||||
// accept connections yet. We need to retry until we can connect.
|
||||
//
|
||||
// Note that we wait long here because if the tailnet connection has trouble
|
||||
// connecting, it could take 5 seconds or more to reconnect.
|
||||
var conn net.Conn
|
||||
require.Eventually(t, func() bool {
|
||||
var err error
|
||||
conn, err = net.Dial("unix", remoteSocketPath)
|
||||
return err == nil
|
||||
}, testutil.WaitLong, testutil.IntervalFast)
|
||||
conn, err := net.Dial("unix", remoteSocketPath)
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
_, err = conn.Write([]byte("test"))
|
||||
require.NoError(t, err)
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test", string(b))
|
||||
_ = conn.Close()
|
||||
|
||||
<-done
|
||||
|
||||
_ = proc.Kill()
|
||||
requireEcho(t, conn)
|
||||
}
|
||||
|
||||
func TestAgent_SFTP(t *testing.T) {
|
||||
@@ -1714,32 +1620,34 @@ func TestAgent_Dial(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Setup listener
|
||||
// The purpose of this test is to ensure that a client can dial a
|
||||
// listener in the workspace over tailnet.
|
||||
l := c.setup(t)
|
||||
defer l.Close()
|
||||
go func() {
|
||||
for {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
done := make(chan struct{})
|
||||
defer func() {
|
||||
l.Close()
|
||||
<-done
|
||||
}()
|
||||
|
||||
go testAccept(t, c)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
c, err := l.Accept()
|
||||
if assert.NoError(t, err, "accept connection") {
|
||||
defer c.Close()
|
||||
testAccept(ctx, t, c)
|
||||
}
|
||||
}()
|
||||
|
||||
//nolint:dogsled
|
||||
conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
|
||||
require.True(t, conn.AwaitReachable(context.Background()))
|
||||
conn1, err := conn.DialContext(context.Background(), l.Addr().Network(), l.Addr().String())
|
||||
agentConn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
|
||||
require.True(t, agentConn.AwaitReachable(ctx))
|
||||
conn, err := agentConn.DialContext(ctx, l.Addr().Network(), l.Addr().String())
|
||||
require.NoError(t, err)
|
||||
defer conn1.Close()
|
||||
conn2, err := conn.DialContext(context.Background(), l.Addr().Network(), l.Addr().String())
|
||||
require.NoError(t, err)
|
||||
defer conn2.Close()
|
||||
testDial(t, conn2)
|
||||
testDial(t, conn1)
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
defer conn.Close()
|
||||
testDial(ctx, t, conn)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2052,50 +1960,14 @@ func TestAgent_DebugServer(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func setupSSHCommand(t *testing.T, beforeArgs []string, afterArgs []string) (*ptytest.PTYCmd, pty.Process) {
|
||||
//nolint:dogsled
|
||||
// setupAgentSSHClient creates an agent, dials it, and sets up an ssh.Client for it
|
||||
func setupAgentSSHClient(ctx context.Context, t *testing.T) *ssh.Client {
|
||||
//nolint: dogsled
|
||||
agentConn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0)
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
sshClient, err := agentConn.SSHClient(ctx)
|
||||
require.NoError(t, err)
|
||||
waitGroup := sync.WaitGroup{}
|
||||
go func() {
|
||||
defer listener.Close()
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
ssh, err := agentConn.SSH(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return
|
||||
}
|
||||
waitGroup.Add(1)
|
||||
go func() {
|
||||
agentssh.Bicopy(context.Background(), conn, ssh)
|
||||
waitGroup.Done()
|
||||
}()
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
_ = listener.Close()
|
||||
waitGroup.Wait()
|
||||
})
|
||||
tcpAddr, valid := listener.Addr().(*net.TCPAddr)
|
||||
require.True(t, valid)
|
||||
args := append(beforeArgs,
|
||||
"-o", "HostName "+tcpAddr.IP.String(),
|
||||
"-o", "Port "+strconv.Itoa(tcpAddr.Port),
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"host",
|
||||
)
|
||||
args = append(args, afterArgs...)
|
||||
cmd := pty.Command("ssh", args...)
|
||||
return ptytest.Start(t, cmd)
|
||||
t.Cleanup(func() { sshClient.Close() })
|
||||
return sshClient
|
||||
}
|
||||
|
||||
func setupSSHSession(
|
||||
@@ -2205,22 +2077,41 @@ func setupAgent(t *testing.T, metadata agentsdk.Manifest, ptyTimeout time.Durati
|
||||
|
||||
var dialTestPayload = []byte("dean-was-here123")
|
||||
|
||||
func testDial(t *testing.T, c net.Conn) {
|
||||
func testDial(ctx context.Context, t *testing.T, c net.Conn) {
|
||||
t.Helper()
|
||||
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
err := c.SetDeadline(deadline)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := c.SetDeadline(time.Time{})
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
}
|
||||
|
||||
assertWritePayload(t, c, dialTestPayload)
|
||||
assertReadPayload(t, c, dialTestPayload)
|
||||
}
|
||||
|
||||
func testAccept(t *testing.T, c net.Conn) {
|
||||
func testAccept(ctx context.Context, t *testing.T, c net.Conn) {
|
||||
t.Helper()
|
||||
defer c.Close()
|
||||
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
err := c.SetDeadline(deadline)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := c.SetDeadline(time.Time{})
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
}
|
||||
|
||||
assertReadPayload(t, c, dialTestPayload)
|
||||
assertWritePayload(t, c, dialTestPayload)
|
||||
}
|
||||
|
||||
func assertReadPayload(t *testing.T, r io.Reader, payload []byte) {
|
||||
t.Helper()
|
||||
b := make([]byte, len(payload)+16)
|
||||
n, err := r.Read(b)
|
||||
assert.NoError(t, err, "read payload")
|
||||
@@ -2229,6 +2120,7 @@ func assertReadPayload(t *testing.T, r io.Reader, payload []byte) {
|
||||
}
|
||||
|
||||
func assertWritePayload(t *testing.T, w io.Writer, payload []byte) {
|
||||
t.Helper()
|
||||
n, err := w.Write(payload)
|
||||
assert.NoError(t, err, "write payload")
|
||||
assert.Equal(t, len(payload), n, "payload length does not match")
|
||||
@@ -2345,6 +2237,17 @@ func TestAgent_Metrics_SSH(t *testing.T) {
|
||||
Type: agentsdk.AgentMetricTypeCounter,
|
||||
Value: 0,
|
||||
},
|
||||
{
|
||||
Name: "coderd_agentstats_startup_script_seconds",
|
||||
Type: agentsdk.AgentMetricTypeGauge,
|
||||
Value: 0,
|
||||
Labels: []agentsdk.AgentMetricLabel{
|
||||
{
|
||||
Name: "success",
|
||||
Value: "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var actual []*promgo.MetricFamily
|
||||
@@ -2569,3 +2472,47 @@ func (s *syncWriter) Write(p []byte) (int, error) {
|
||||
defer s.mu.Unlock()
|
||||
return s.w.Write(p)
|
||||
}
|
||||
|
||||
// pickRandomPort picks a random port number for the ephemeral range. We do this entirely randomly
|
||||
// instead of opening a listener and closing it to find a port that is likely to be free, since
|
||||
// sometimes the OS reallocates the port very quickly.
|
||||
func pickRandomPort() uint16 {
|
||||
const (
|
||||
// Overlap of windows, linux in https://en.wikipedia.org/wiki/Ephemeral_port
|
||||
min = 49152
|
||||
max = 60999
|
||||
)
|
||||
n := max - min
|
||||
x := rand.Intn(n) //nolint: gosec
|
||||
return uint16(min + x)
|
||||
}
|
||||
|
||||
// echoOnce accepts a single connection, reads 4 bytes and echos them back
|
||||
func echoOnce(t *testing.T, ll net.Listener) {
|
||||
t.Helper()
|
||||
conn, err := ll.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
_, err = conn.Write(b)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// requireEcho sends 4 bytes and requires the read response to match what was sent.
|
||||
func requireEcho(t *testing.T, conn net.Conn) {
|
||||
t.Helper()
|
||||
_, err := conn.Write([]byte("test"))
|
||||
require.NoError(t, err)
|
||||
b := make([]byte, 4)
|
||||
_, err = conn.Read(b)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test", string(b))
|
||||
}
|
||||
|
||||
@@ -7,18 +7,18 @@ import (
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func (p *Process) Niceness(sc Syscaller) (int, error) {
|
||||
func (*Process) Niceness(Syscaller) (int, error) {
|
||||
return 0, errUnimplemented
|
||||
}
|
||||
|
||||
func (p *Process) SetNiceness(sc Syscaller, score int) error {
|
||||
func (*Process) SetNiceness(Syscaller, int) error {
|
||||
return errUnimplemented
|
||||
}
|
||||
|
||||
func (p *Process) Cmd() string {
|
||||
func (*Process) Cmd() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func List(fs afero.Fs, syscaller Syscaller) ([]*Process, error) {
|
||||
func List(afero.Fs, Syscaller) ([]*Process, error) {
|
||||
return nil, errUnimplemented
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ type Syscaller interface {
|
||||
Kill(pid int32, sig syscall.Signal) error
|
||||
}
|
||||
|
||||
// nolint: unused // used on some but no all platforms
|
||||
const defaultProcDir = "/proc"
|
||||
|
||||
type Process struct {
|
||||
|
||||
@@ -17,14 +17,14 @@ var errUnimplemented = xerrors.New("unimplemented")
|
||||
|
||||
type nopSyscaller struct{}
|
||||
|
||||
func (nopSyscaller) SetPriority(pid int32, priority int) error {
|
||||
func (nopSyscaller) SetPriority(int32, int) error {
|
||||
return errUnimplemented
|
||||
}
|
||||
|
||||
func (nopSyscaller) GetPriority(pid int32) (int, error) {
|
||||
func (nopSyscaller) GetPriority(int32) (int, error) {
|
||||
return 0, errUnimplemented
|
||||
}
|
||||
|
||||
func (nopSyscaller) Kill(pid int32, sig syscall.Signal) error {
|
||||
func (nopSyscaller) Kill(int32, syscall.Signal) error {
|
||||
return errUnimplemented
|
||||
}
|
||||
|
||||
@@ -13,12 +13,14 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
@@ -27,6 +29,14 @@ import (
|
||||
var (
|
||||
// ErrTimeout is returned when a script times out.
|
||||
ErrTimeout = xerrors.New("script timed out")
|
||||
// ErrOutputPipesOpen is returned when a script exits leaving the output
|
||||
// pipe(s) (stdout, stderr) open. This happens because we set WaitDelay on
|
||||
// the command, which gives us two things:
|
||||
//
|
||||
// 1. The ability to ensure that a script exits (this is important for e.g.
|
||||
// blocking login, and avoiding doing so indefinitely)
|
||||
// 2. Improved command cancellation on timeout
|
||||
ErrOutputPipesOpen = xerrors.New("script exited without closing output pipes")
|
||||
|
||||
parser = cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.DowOptional)
|
||||
)
|
||||
@@ -49,6 +59,11 @@ func New(opts Options) *Runner {
|
||||
cronCtxCancel: cronCtxCancel,
|
||||
cron: cron.New(cron.WithParser(parser)),
|
||||
closed: make(chan struct{}),
|
||||
scriptsExecuted: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "agent",
|
||||
Subsystem: "scripts",
|
||||
Name: "executed_total",
|
||||
}, []string{"success"}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +78,19 @@ type Runner struct {
|
||||
cron *cron.Cron
|
||||
initialized atomic.Bool
|
||||
scripts []codersdk.WorkspaceAgentScript
|
||||
|
||||
// scriptsExecuted includes all scripts executed by the workspace agent. Agents
|
||||
// execute startup scripts, and scripts on a cron schedule. Both will increment
|
||||
// this counter.
|
||||
scriptsExecuted *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func (r *Runner) RegisterMetrics(reg prometheus.Registerer) {
|
||||
if reg == nil {
|
||||
// If no registry, do nothing.
|
||||
return
|
||||
}
|
||||
reg.MustRegister(r.scriptsExecuted)
|
||||
}
|
||||
|
||||
// Init initializes the runner with the provided scripts.
|
||||
@@ -82,7 +110,7 @@ func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript) error {
|
||||
}
|
||||
script := script
|
||||
_, err := r.cron.AddFunc(script.Cron, func() {
|
||||
err := r.run(r.cronCtx, script)
|
||||
err := r.trackRun(r.cronCtx, script)
|
||||
if err != nil {
|
||||
r.Logger.Warn(context.Background(), "run agent script on schedule", slog.Error(err))
|
||||
}
|
||||
@@ -97,7 +125,26 @@ func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript) error {
|
||||
// StartCron starts the cron scheduler.
|
||||
// This is done async to allow for the caller to execute scripts prior.
|
||||
func (r *Runner) StartCron() {
|
||||
r.cron.Start()
|
||||
// cron.Start() and cron.Stop() does not guarantee that the cron goroutine
|
||||
// has exited by the time the `cron.Stop()` context returns, so we need to
|
||||
// track it manually.
|
||||
err := r.trackCommandGoroutine(func() {
|
||||
// Since this is run async, in quick unit tests, it is possible the
|
||||
// Close() function gets called before we even start the cron.
|
||||
// In these cases, the Run() will never end.
|
||||
// So if we are closed, we just return, and skip the Run() entirely.
|
||||
select {
|
||||
case <-r.cronCtx.Done():
|
||||
// The cronCtx is canceled before cron.Close() happens. So if the ctx is
|
||||
// canceled, then Close() will be called, or it is about to be called.
|
||||
// So do nothing!
|
||||
default:
|
||||
r.cron.Run()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
r.Logger.Warn(context.Background(), "start cron failed", slog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Execute runs a set of scripts according to a filter.
|
||||
@@ -115,7 +162,7 @@ func (r *Runner) Execute(ctx context.Context, filter func(script codersdk.Worksp
|
||||
}
|
||||
script := script
|
||||
eg.Go(func() error {
|
||||
err := r.run(ctx, script)
|
||||
err := r.trackRun(ctx, script)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("run agent script %q: %w", script.LogSourceID, err)
|
||||
}
|
||||
@@ -125,6 +172,17 @@ func (r *Runner) Execute(ctx context.Context, filter func(script codersdk.Worksp
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// trackRun wraps "run" with metrics.
|
||||
func (r *Runner) trackRun(ctx context.Context, script codersdk.WorkspaceAgentScript) error {
|
||||
err := r.run(ctx, script)
|
||||
if err != nil {
|
||||
r.scriptsExecuted.WithLabelValues("false").Add(1)
|
||||
} else {
|
||||
r.scriptsExecuted.WithLabelValues("true").Add(1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// run executes the provided script with the timeout.
|
||||
// If the timeout is exceeded, the process is sent an interrupt signal.
|
||||
// If the process does not exit after a few seconds, it is forcefully killed.
|
||||
@@ -240,7 +298,22 @@ func (r *Runner) run(ctx context.Context, script codersdk.WorkspaceAgentScript)
|
||||
err = cmdCtx.Err()
|
||||
case err = <-cmdDone:
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
switch {
|
||||
case errors.Is(err, exec.ErrWaitDelay):
|
||||
err = ErrOutputPipesOpen
|
||||
message := fmt.Sprintf("script exited successfully, but output pipes were not closed after %s", cmd.WaitDelay)
|
||||
details := fmt.Sprint(
|
||||
"This usually means a child process was started with references to stdout or stderr. As a result, this " +
|
||||
"process may now have been terminated. Consider redirecting the output or using a separate " +
|
||||
"\"coder_script\" for the process, see " +
|
||||
"https://coder.com/docs/v2/latest/templates/troubleshooting#startup-script-issues for more information.",
|
||||
)
|
||||
// Inform the user by propagating the message via log writers.
|
||||
_, _ = fmt.Fprintf(cmd.Stderr, "WARNING: %s. %s\n", message, details)
|
||||
// Also log to agent logs for ease of debugging.
|
||||
r.Logger.Warn(ctx, message, slog.F("details", details), slog.Error(err))
|
||||
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
err = ErrTimeout
|
||||
}
|
||||
return err
|
||||
@@ -253,8 +326,9 @@ func (r *Runner) Close() error {
|
||||
return nil
|
||||
}
|
||||
close(r.closed)
|
||||
// Must cancel the cron ctx BEFORE stopping the cron.
|
||||
r.cronCtxCancel()
|
||||
r.cron.Stop()
|
||||
<-r.cron.Stop().Done()
|
||||
r.cmdCloseWait.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -53,6 +53,15 @@ func TestTimeout(t *testing.T) {
|
||||
require.ErrorIs(t, runner.Execute(context.Background(), nil), agentscripts.ErrTimeout)
|
||||
}
|
||||
|
||||
// TestCronClose exists because cron.Run() can happen after cron.Close().
|
||||
// If this happens, there used to be a deadlock.
|
||||
func TestCronClose(t *testing.T) {
|
||||
t.Parallel()
|
||||
runner := agentscripts.New(agentscripts.Options{})
|
||||
runner.StartCron()
|
||||
require.NoError(t, runner.Close(), "close runner")
|
||||
}
|
||||
|
||||
func setup(t *testing.T, patchLogs func(ctx context.Context, req agentsdk.PatchLogs) error) *agentscripts.Runner {
|
||||
t.Helper()
|
||||
if patchLogs == nil {
|
||||
|
||||
+122
-38
@@ -19,6 +19,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/google/uuid"
|
||||
"github.com/kballard/go-shellquote"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -46,8 +47,12 @@ const (
|
||||
MagicSessionTypeEnvironmentVariable = "CODER_SSH_SESSION_TYPE"
|
||||
// MagicSessionTypeVSCode is set in the SSH config by the VS Code extension to identify itself.
|
||||
MagicSessionTypeVSCode = "vscode"
|
||||
// MagicSessionTypeJetBrains is set in the SSH config by the JetBrains extension to identify itself.
|
||||
// MagicSessionTypeJetBrains is set in the SSH config by the JetBrains
|
||||
// extension to identify itself.
|
||||
MagicSessionTypeJetBrains = "jetbrains"
|
||||
// MagicProcessCmdlineJetBrains is a string in a process's command line that
|
||||
// uniquely identifies it as JetBrains software.
|
||||
MagicProcessCmdlineJetBrains = "idea.vendor.name=JetBrains"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
@@ -110,7 +115,11 @@ func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prom
|
||||
|
||||
srv := &ssh.Server{
|
||||
ChannelHandlers: map[string]ssh.ChannelHandler{
|
||||
"direct-tcpip": ssh.DirectTCPIPHandler,
|
||||
"direct-tcpip": func(srv *ssh.Server, conn *gossh.ServerConn, newChan gossh.NewChannel, ctx ssh.Context) {
|
||||
// Wrapper is designed to find and track JetBrains Gateway connections.
|
||||
wrapped := NewJetbrainsChannelWatcher(ctx, s.logger, newChan, &s.connCountJetBrains)
|
||||
ssh.DirectTCPIPHandler(srv, conn, wrapped, ctx)
|
||||
},
|
||||
"direct-streamlocal@openssh.com": directStreamLocalHandler,
|
||||
"session": ssh.DefaultSessionHandler,
|
||||
},
|
||||
@@ -141,7 +150,7 @@ func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prom
|
||||
},
|
||||
ReversePortForwardingCallback: func(ctx ssh.Context, bindHost string, bindPort uint32) bool {
|
||||
// Allow reverse port forwarding all!
|
||||
s.logger.Debug(ctx, "local port forward",
|
||||
s.logger.Debug(ctx, "reverse port forward",
|
||||
slog.F("bind_host", bindHost),
|
||||
slog.F("bind_port", bindPort))
|
||||
return true
|
||||
@@ -192,9 +201,16 @@ func (s *Server) ConnStats() ConnStats {
|
||||
}
|
||||
|
||||
func (s *Server) sessionHandler(session ssh.Session) {
|
||||
logger := s.logger.With(slog.F("remote_addr", session.RemoteAddr()), slog.F("local_addr", session.LocalAddr()))
|
||||
logger.Info(session.Context(), "handling ssh session")
|
||||
ctx := session.Context()
|
||||
logger := s.logger.With(
|
||||
slog.F("remote_addr", session.RemoteAddr()),
|
||||
slog.F("local_addr", session.LocalAddr()),
|
||||
// Assigning a random uuid for each session is useful for tracking
|
||||
// logs for the same ssh session.
|
||||
slog.F("id", uuid.NewString()),
|
||||
)
|
||||
logger.Info(ctx, "handling ssh session")
|
||||
|
||||
if !s.trackSession(session, true) {
|
||||
// See (*Server).Close() for why we call Close instead of Exit.
|
||||
_ = session.Close()
|
||||
@@ -218,7 +234,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
|
||||
switch ss := session.Subsystem(); ss {
|
||||
case "":
|
||||
case "sftp":
|
||||
s.sftpHandler(session)
|
||||
s.sftpHandler(logger, session)
|
||||
return
|
||||
default:
|
||||
logger.Warn(ctx, "unsupported subsystem", slog.F("subsystem", ss))
|
||||
@@ -226,11 +242,32 @@ func (s *Server) sessionHandler(session ssh.Session) {
|
||||
return
|
||||
}
|
||||
|
||||
err := s.sessionStart(session, extraEnv)
|
||||
err := s.sessionStart(logger, session, extraEnv)
|
||||
var exitError *exec.ExitError
|
||||
if xerrors.As(err, &exitError) {
|
||||
logger.Info(ctx, "ssh session returned", slog.Error(exitError))
|
||||
_ = session.Exit(exitError.ExitCode())
|
||||
code := exitError.ExitCode()
|
||||
if code == -1 {
|
||||
// If we return -1 here, it will be transmitted as an
|
||||
// uint32(4294967295). This exit code is nonsense, so
|
||||
// instead we return 255 (same as OpenSSH). This is
|
||||
// also the same exit code that the shell returns for
|
||||
// -1.
|
||||
//
|
||||
// For signals, we could consider sending 128+signal
|
||||
// instead (however, OpenSSH doesn't seem to do this).
|
||||
code = 255
|
||||
}
|
||||
logger.Info(ctx, "ssh session returned",
|
||||
slog.Error(exitError),
|
||||
slog.F("process_exit_code", exitError.ExitCode()),
|
||||
slog.F("exit_code", code),
|
||||
)
|
||||
|
||||
// TODO(mafredri): For signal exit, there's also an "exit-signal"
|
||||
// request (session.Exit sends "exit-status"), however, since it's
|
||||
// not implemented on the session interface and not used by
|
||||
// OpenSSH, we'll leave it for now.
|
||||
_ = session.Exit(code)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
@@ -244,7 +281,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
|
||||
_ = session.Exit(0)
|
||||
}
|
||||
|
||||
func (s *Server) sessionStart(session ssh.Session, extraEnv []string) (retErr error) {
|
||||
func (s *Server) sessionStart(logger slog.Logger, session ssh.Session, extraEnv []string) (retErr error) {
|
||||
ctx := session.Context()
|
||||
env := append(session.Environ(), extraEnv...)
|
||||
var magicType string
|
||||
@@ -252,23 +289,23 @@ func (s *Server) sessionStart(session ssh.Session, extraEnv []string) (retErr er
|
||||
if !strings.HasPrefix(kv, MagicSessionTypeEnvironmentVariable) {
|
||||
continue
|
||||
}
|
||||
magicType = strings.TrimPrefix(kv, MagicSessionTypeEnvironmentVariable+"=")
|
||||
magicType = strings.ToLower(strings.TrimPrefix(kv, MagicSessionTypeEnvironmentVariable+"="))
|
||||
env = append(env[:index], env[index+1:]...)
|
||||
}
|
||||
|
||||
// Always force lowercase checking to be case-insensitive.
|
||||
switch strings.ToLower(magicType) {
|
||||
case strings.ToLower(MagicSessionTypeVSCode):
|
||||
switch magicType {
|
||||
case MagicSessionTypeVSCode:
|
||||
s.connCountVSCode.Add(1)
|
||||
defer s.connCountVSCode.Add(-1)
|
||||
case strings.ToLower(MagicSessionTypeJetBrains):
|
||||
s.connCountJetBrains.Add(1)
|
||||
defer s.connCountJetBrains.Add(-1)
|
||||
case MagicSessionTypeJetBrains:
|
||||
// Do nothing here because JetBrains launches hundreds of ssh sessions.
|
||||
// We instead track JetBrains in the single persistent tcp forwarding channel.
|
||||
case "":
|
||||
s.connCountSSHSession.Add(1)
|
||||
defer s.connCountSSHSession.Add(-1)
|
||||
default:
|
||||
s.logger.Warn(ctx, "invalid magic ssh session type specified", slog.F("type", magicType))
|
||||
logger.Warn(ctx, "invalid magic ssh session type specified", slog.F("type", magicType))
|
||||
}
|
||||
|
||||
magicTypeLabel := magicTypeMetricLabel(magicType)
|
||||
@@ -301,12 +338,12 @@ func (s *Server) sessionStart(session ssh.Session, extraEnv []string) (retErr er
|
||||
}
|
||||
|
||||
if isPty {
|
||||
return s.startPTYSession(session, magicTypeLabel, cmd, sshPty, windowSize)
|
||||
return s.startPTYSession(logger, session, magicTypeLabel, cmd, sshPty, windowSize)
|
||||
}
|
||||
return s.startNonPTYSession(session, magicTypeLabel, cmd.AsExec())
|
||||
return s.startNonPTYSession(logger, session, magicTypeLabel, cmd.AsExec())
|
||||
}
|
||||
|
||||
func (s *Server) startNonPTYSession(session ssh.Session, magicTypeLabel string, cmd *exec.Cmd) error {
|
||||
func (s *Server) startNonPTYSession(logger slog.Logger, session ssh.Session, magicTypeLabel string, cmd *exec.Cmd) error {
|
||||
s.metrics.sessionsTotal.WithLabelValues(magicTypeLabel, "no").Add(1)
|
||||
|
||||
cmd.Stdout = session
|
||||
@@ -330,6 +367,17 @@ func (s *Server) startNonPTYSession(session ssh.Session, magicTypeLabel string,
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "start_command").Add(1)
|
||||
return xerrors.Errorf("start: %w", err)
|
||||
}
|
||||
sigs := make(chan ssh.Signal, 1)
|
||||
session.Signals(sigs)
|
||||
defer func() {
|
||||
session.Signals(nil)
|
||||
close(sigs)
|
||||
}()
|
||||
go func() {
|
||||
for sig := range sigs {
|
||||
s.handleSignal(logger, sig, cmd.Process, magicTypeLabel)
|
||||
}
|
||||
}()
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
||||
@@ -340,9 +388,10 @@ type ptySession interface {
|
||||
Context() ssh.Context
|
||||
DisablePTYEmulation()
|
||||
RawCommand() string
|
||||
Signals(chan<- ssh.Signal)
|
||||
}
|
||||
|
||||
func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd *pty.Cmd, sshPty ssh.Pty, windowSize <-chan ssh.Window) (retErr error) {
|
||||
func (s *Server) startPTYSession(logger slog.Logger, session ptySession, magicTypeLabel string, cmd *pty.Cmd, sshPty ssh.Pty, windowSize <-chan ssh.Window) (retErr error) {
|
||||
s.metrics.sessionsTotal.WithLabelValues(magicTypeLabel, "yes").Add(1)
|
||||
|
||||
ctx := session.Context()
|
||||
@@ -355,7 +404,7 @@ func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd
|
||||
if serviceBanner != nil {
|
||||
err := showServiceBanner(session, serviceBanner)
|
||||
if err != nil {
|
||||
s.logger.Error(ctx, "agent failed to show service banner", slog.Error(err))
|
||||
logger.Error(ctx, "agent failed to show service banner", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "service_banner").Add(1)
|
||||
}
|
||||
}
|
||||
@@ -366,11 +415,11 @@ func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd
|
||||
if manifest != nil {
|
||||
err := showMOTD(s.fs, session, manifest.MOTDFile)
|
||||
if err != nil {
|
||||
s.logger.Error(ctx, "agent failed to show MOTD", slog.Error(err))
|
||||
logger.Error(ctx, "agent failed to show MOTD", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "motd").Add(1)
|
||||
}
|
||||
} else {
|
||||
s.logger.Warn(ctx, "metadata lookup failed, unable to show MOTD")
|
||||
logger.Warn(ctx, "metadata lookup failed, unable to show MOTD")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -379,7 +428,7 @@ func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd
|
||||
// The pty package sets `SSH_TTY` on supported platforms.
|
||||
ptty, process, err := pty.Start(cmd, pty.WithPTYOption(
|
||||
pty.WithSSHRequest(sshPty),
|
||||
pty.WithLogger(slog.Stdlib(ctx, s.logger, slog.LevelInfo)),
|
||||
pty.WithLogger(slog.Stdlib(ctx, logger, slog.LevelInfo)),
|
||||
))
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "start_command").Add(1)
|
||||
@@ -388,20 +437,43 @@ func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd
|
||||
defer func() {
|
||||
closeErr := ptty.Close()
|
||||
if closeErr != nil {
|
||||
s.logger.Warn(ctx, "failed to close tty", slog.Error(closeErr))
|
||||
logger.Warn(ctx, "failed to close tty", slog.Error(closeErr))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "close").Add(1)
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
sigs := make(chan ssh.Signal, 1)
|
||||
session.Signals(sigs)
|
||||
defer func() {
|
||||
session.Signals(nil)
|
||||
close(sigs)
|
||||
}()
|
||||
go func() {
|
||||
for win := range windowSize {
|
||||
resizeErr := ptty.Resize(uint16(win.Height), uint16(win.Width))
|
||||
// If the pty is closed, then command has exited, no need to log.
|
||||
if resizeErr != nil && !errors.Is(resizeErr, pty.ErrClosed) {
|
||||
s.logger.Warn(ctx, "failed to resize tty", slog.Error(resizeErr))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "resize").Add(1)
|
||||
for {
|
||||
if sigs == nil && windowSize == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case sig, ok := <-sigs:
|
||||
if !ok {
|
||||
sigs = nil
|
||||
continue
|
||||
}
|
||||
s.handleSignal(logger, sig, process, magicTypeLabel)
|
||||
case win, ok := <-windowSize:
|
||||
if !ok {
|
||||
windowSize = nil
|
||||
continue
|
||||
}
|
||||
resizeErr := ptty.Resize(uint16(win.Height), uint16(win.Width))
|
||||
// If the pty is closed, then command has exited, no need to log.
|
||||
if resizeErr != nil && !errors.Is(resizeErr, pty.ErrClosed) {
|
||||
logger.Warn(ctx, "failed to resize tty", slog.Error(resizeErr))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "resize").Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -422,7 +494,7 @@ func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd
|
||||
// 2. The client hangs up, which cancels the command's Context, and go will
|
||||
// kill the command's process. This then has the same effect as (1).
|
||||
n, err := io.Copy(session, ptty.OutputReader())
|
||||
s.logger.Debug(ctx, "copy output done", slog.F("bytes", n), slog.Error(err))
|
||||
logger.Debug(ctx, "copy output done", slog.F("bytes", n), slog.Error(err))
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "output_io_copy").Add(1)
|
||||
return xerrors.Errorf("copy error: %w", err)
|
||||
@@ -435,7 +507,7 @@ func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd
|
||||
// ExitErrors just mean the command we run returned a non-zero exit code, which is normal
|
||||
// and not something to be concerned about. But, if it's something else, we should log it.
|
||||
if err != nil && !xerrors.As(err, &exitErr) {
|
||||
s.logger.Warn(ctx, "process wait exited with error", slog.Error(err))
|
||||
logger.Warn(ctx, "process wait exited with error", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "wait").Add(1)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -444,7 +516,19 @@ func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) sftpHandler(session ssh.Session) {
|
||||
func (s *Server) handleSignal(logger slog.Logger, ssig ssh.Signal, signaler interface{ Signal(os.Signal) error }, magicTypeLabel string) {
|
||||
ctx := context.Background()
|
||||
sig := osSignalFrom(ssig)
|
||||
logger = logger.With(slog.F("ssh_signal", ssig), slog.F("signal", sig.String()))
|
||||
logger.Info(ctx, "received signal from client")
|
||||
err := signaler.Signal(sig)
|
||||
if err != nil {
|
||||
logger.Warn(ctx, "signaling the process failed", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "signal").Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sftpHandler(logger slog.Logger, session ssh.Session) {
|
||||
s.metrics.sftpConnectionsTotal.Add(1)
|
||||
|
||||
ctx := session.Context()
|
||||
@@ -460,14 +544,14 @@ func (s *Server) sftpHandler(session ssh.Session) {
|
||||
// directory so that SFTP connections land there.
|
||||
homedir, err := userHomeDir()
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, "get sftp working directory failed, unable to get home dir", slog.Error(err))
|
||||
logger.Warn(ctx, "get sftp working directory failed, unable to get home dir", slog.Error(err))
|
||||
} else {
|
||||
opts = append(opts, sftp.WithServerWorkingDirectory(homedir))
|
||||
}
|
||||
|
||||
server, err := sftp.NewServer(session, opts...)
|
||||
if err != nil {
|
||||
s.logger.Debug(ctx, "initialize sftp server", slog.Error(err))
|
||||
logger.Debug(ctx, "initialize sftp server", slog.Error(err))
|
||||
return
|
||||
}
|
||||
defer server.Close()
|
||||
@@ -485,7 +569,7 @@ func (s *Server) sftpHandler(session ssh.Session) {
|
||||
_ = session.Exit(0)
|
||||
return
|
||||
}
|
||||
s.logger.Warn(ctx, "sftp server closed with error", slog.Error(err))
|
||||
logger.Warn(ctx, "sftp server closed with error", slog.Error(err))
|
||||
s.metrics.sftpServerErrors.Add(1)
|
||||
_ = session.Exit(1)
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ func Test_sessionStart_orphan(t *testing.T) {
|
||||
// we don't really care what the error is here. In the larger scenario,
|
||||
// the client has disconnected, so we can't return any error information
|
||||
// to them.
|
||||
_ = s.startPTYSession(sess, "ssh", cmd, ptyInfo, windowSize)
|
||||
_ = s.startPTYSession(logger, sess, "ssh", cmd, ptyInfo, windowSize)
|
||||
}()
|
||||
|
||||
readDone := make(chan struct{})
|
||||
@@ -114,6 +114,11 @@ type testSSHContext struct {
|
||||
context.Context
|
||||
}
|
||||
|
||||
var (
|
||||
_ gliderssh.Context = testSSHContext{}
|
||||
_ ptySession = &testSession{}
|
||||
)
|
||||
|
||||
func newTestSession(ctx context.Context) (toClient *io.PipeReader, fromClient *io.PipeWriter, s ptySession) {
|
||||
toClient, fromPty := io.Pipe()
|
||||
toPty, fromClient := io.Pipe()
|
||||
@@ -144,6 +149,10 @@ func (s *testSession) Write(p []byte) (n int, err error) {
|
||||
return s.fromPty.Write(p)
|
||||
}
|
||||
|
||||
func (*testSession) Signals(_ chan<- gliderssh.Signal) {
|
||||
// Not implemented, but will be called.
|
||||
}
|
||||
|
||||
func (testSSHContext) Lock() {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
package agentssh_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -24,6 +26,7 @@ import (
|
||||
"github.com/coder/coder/v2/agent/agentssh"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -57,8 +60,8 @@ func TestNewServer_ServeClient(t *testing.T) {
|
||||
|
||||
var b bytes.Buffer
|
||||
sess, err := c.NewSession()
|
||||
sess.Stdout = &b
|
||||
require.NoError(t, err)
|
||||
sess.Stdout = &b
|
||||
err = sess.Start("echo hello")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -139,6 +142,7 @@ func TestNewServer_CloseActiveConnections(t *testing.T) {
|
||||
defer wg.Done()
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
sess, err := c.NewSession()
|
||||
assert.NoError(t, err)
|
||||
sess.Stdin = pty.Input()
|
||||
sess.Stdout = pty.Output()
|
||||
sess.Stderr = pty.Output()
|
||||
@@ -159,6 +163,159 @@ func TestNewServer_CloseActiveConnections(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestNewServer_Signal(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Stdout", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
logger := slogtest.Make(t, nil)
|
||||
s, err := agentssh.NewServer(ctx, logger, prometheus.NewRegistry(), afero.NewMemMapFs(), 0, "")
|
||||
require.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
// The assumption is that these are set before serving SSH connections.
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
err := s.Serve(ln)
|
||||
assert.Error(t, err) // Server is closed.
|
||||
}()
|
||||
defer func() {
|
||||
err := s.Close()
|
||||
require.NoError(t, err)
|
||||
<-done
|
||||
}()
|
||||
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
|
||||
sess, err := c.NewSession()
|
||||
require.NoError(t, err)
|
||||
r, err := sess.StdoutPipe()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Perform multiple sleeps since the interrupt signal doesn't propagate to
|
||||
// the process group, this lets us exit early.
|
||||
sleeps := strings.Repeat("sleep 1 && ", int(testutil.WaitMedium.Seconds()))
|
||||
err = sess.Start(fmt.Sprintf("echo hello && %s echo bye", sleeps))
|
||||
require.NoError(t, err)
|
||||
|
||||
sc := bufio.NewScanner(r)
|
||||
for sc.Scan() {
|
||||
t.Log(sc.Text())
|
||||
if strings.Contains(sc.Text(), "hello") {
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NoError(t, sc.Err())
|
||||
|
||||
err = sess.Signal(ssh.SIGKILL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assumption, signal propagates and the command exists, closing stdout.
|
||||
for sc.Scan() {
|
||||
t.Log(sc.Text())
|
||||
require.NotContains(t, sc.Text(), "bye")
|
||||
}
|
||||
require.NoError(t, sc.Err())
|
||||
|
||||
err = sess.Wait()
|
||||
exitErr := &ssh.ExitError{}
|
||||
require.ErrorAs(t, err, &exitErr)
|
||||
wantCode := 255
|
||||
if runtime.GOOS == "windows" {
|
||||
wantCode = 1
|
||||
}
|
||||
require.Equal(t, wantCode, exitErr.ExitStatus())
|
||||
})
|
||||
t.Run("PTY", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
logger := slogtest.Make(t, nil)
|
||||
s, err := agentssh.NewServer(ctx, logger, prometheus.NewRegistry(), afero.NewMemMapFs(), 0, "")
|
||||
require.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
// The assumption is that these are set before serving SSH connections.
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
err := s.Serve(ln)
|
||||
assert.Error(t, err) // Server is closed.
|
||||
}()
|
||||
defer func() {
|
||||
err := s.Close()
|
||||
require.NoError(t, err)
|
||||
<-done
|
||||
}()
|
||||
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
|
||||
pty := ptytest.New(t)
|
||||
|
||||
sess, err := c.NewSession()
|
||||
require.NoError(t, err)
|
||||
r, err := sess.StdoutPipe()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Note, we request pty but don't use ptytest here because we can't
|
||||
// easily test for no text before EOF.
|
||||
sess.Stdin = pty.Input()
|
||||
sess.Stderr = pty.Output()
|
||||
|
||||
err = sess.RequestPty("xterm", 80, 80, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Perform multiple sleeps since the interrupt signal doesn't propagate to
|
||||
// the process group, this lets us exit early.
|
||||
sleeps := strings.Repeat("sleep 1 && ", int(testutil.WaitMedium.Seconds()))
|
||||
err = sess.Start(fmt.Sprintf("echo hello && %s echo bye", sleeps))
|
||||
require.NoError(t, err)
|
||||
|
||||
sc := bufio.NewScanner(r)
|
||||
for sc.Scan() {
|
||||
t.Log(sc.Text())
|
||||
if strings.Contains(sc.Text(), "hello") {
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NoError(t, sc.Err())
|
||||
|
||||
err = sess.Signal(ssh.SIGKILL)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assumption, signal propagates and the command exists, closing stdout.
|
||||
for sc.Scan() {
|
||||
t.Log(sc.Text())
|
||||
require.NotContains(t, sc.Text(), "bye")
|
||||
}
|
||||
require.NoError(t, sc.Err())
|
||||
|
||||
err = sess.Wait()
|
||||
exitErr := &ssh.ExitError{}
|
||||
require.ErrorAs(t, err, &exitErr)
|
||||
wantCode := 255
|
||||
if runtime.GOOS == "windows" {
|
||||
wantCode = 1
|
||||
}
|
||||
require.Equal(t, wantCode, exitErr.ExitStatus())
|
||||
})
|
||||
}
|
||||
|
||||
func sshClient(t *testing.T, addr string) *ssh.Client {
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -37,6 +37,7 @@ type forwardedUnixHandler struct {
|
||||
}
|
||||
|
||||
func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server, req *gossh.Request) (bool, []byte) {
|
||||
h.log.Debug(ctx, "handling SSH unix forward")
|
||||
h.Lock()
|
||||
if h.forwards == nil {
|
||||
h.forwards = make(map[string]net.Listener)
|
||||
@@ -47,22 +48,25 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
h.log.Warn(ctx, "SSH unix forward request from client with no gossh connection")
|
||||
return false, nil
|
||||
}
|
||||
log := h.log.With(slog.F("remote_addr", conn.RemoteAddr()))
|
||||
|
||||
switch req.Type {
|
||||
case "streamlocal-forward@openssh.com":
|
||||
var reqPayload streamLocalForwardPayload
|
||||
err := gossh.Unmarshal(req.Payload, &reqPayload)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "parse streamlocal-forward@openssh.com request payload from client", slog.Error(err))
|
||||
h.log.Warn(ctx, "parse streamlocal-forward@openssh.com request (SSH unix forward) payload from client", slog.Error(err))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
addr := reqPayload.SocketPath
|
||||
log = log.With(slog.F("socket_path", addr))
|
||||
log.Debug(ctx, "request begin SSH unix forward")
|
||||
h.Lock()
|
||||
_, ok := h.forwards[addr]
|
||||
h.Unlock()
|
||||
if ok {
|
||||
h.log.Warn(ctx, "SSH unix forward request for socket path that is already being forwarded (maybe to another client?)",
|
||||
log.Warn(ctx, "SSH unix forward request for socket path that is already being forwarded (maybe to another client?)",
|
||||
slog.F("socket_path", addr),
|
||||
)
|
||||
return false, nil
|
||||
@@ -72,9 +76,8 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
parentDir := filepath.Dir(addr)
|
||||
err = os.MkdirAll(parentDir, 0o700)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "create parent dir for SSH unix forward request",
|
||||
log.Warn(ctx, "create parent dir for SSH unix forward request",
|
||||
slog.F("parent_dir", parentDir),
|
||||
slog.F("socket_path", addr),
|
||||
slog.Error(err),
|
||||
)
|
||||
return false, nil
|
||||
@@ -82,12 +85,13 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
|
||||
ln, err := net.Listen("unix", addr)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "listen on Unix socket for SSH unix forward request",
|
||||
log.Warn(ctx, "listen on Unix socket for SSH unix forward request",
|
||||
slog.F("socket_path", addr),
|
||||
slog.Error(err),
|
||||
)
|
||||
return false, nil
|
||||
}
|
||||
log.Debug(ctx, "SSH unix forward listening on socket")
|
||||
|
||||
// The listener needs to successfully start before it can be added to
|
||||
// the map, so we don't have to worry about checking for an existing
|
||||
@@ -97,6 +101,7 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
h.Lock()
|
||||
h.forwards[addr] = ln
|
||||
h.Unlock()
|
||||
log.Debug(ctx, "SSH unix forward added to cache")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
@@ -110,14 +115,15 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
if !xerrors.Is(err, net.ErrClosed) {
|
||||
h.log.Warn(ctx, "accept on local Unix socket for SSH unix forward request",
|
||||
slog.F("socket_path", addr),
|
||||
log.Warn(ctx, "accept on local Unix socket for SSH unix forward request",
|
||||
slog.Error(err),
|
||||
)
|
||||
}
|
||||
// closed below
|
||||
log.Debug(ctx, "SSH unix forward listener closed")
|
||||
break
|
||||
}
|
||||
log.Debug(ctx, "accepted SSH unix forward connection")
|
||||
payload := gossh.Marshal(&forwardedStreamLocalPayload{
|
||||
SocketPath: addr,
|
||||
})
|
||||
@@ -125,7 +131,7 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
go func() {
|
||||
ch, reqs, err := conn.OpenChannel("forwarded-streamlocal@openssh.com", payload)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "open SSH channel to forward Unix connection to client",
|
||||
h.log.Warn(ctx, "open SSH unix forward channel to client",
|
||||
slog.F("socket_path", addr),
|
||||
slog.Error(err),
|
||||
)
|
||||
@@ -143,6 +149,7 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
delete(h.forwards, addr)
|
||||
}
|
||||
h.Unlock()
|
||||
log.Debug(ctx, "SSH unix forward listener removed from cache", slog.F("path", addr))
|
||||
_ = ln.Close()
|
||||
}()
|
||||
|
||||
@@ -152,9 +159,10 @@ func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server,
|
||||
var reqPayload streamLocalForwardPayload
|
||||
err := gossh.Unmarshal(req.Payload, &reqPayload)
|
||||
if err != nil {
|
||||
h.log.Warn(ctx, "parse cancel-streamlocal-forward@openssh.com request payload from client", slog.Error(err))
|
||||
h.log.Warn(ctx, "parse cancel-streamlocal-forward@openssh.com (SSH unix forward) request payload from client", slog.Error(err))
|
||||
return false, nil
|
||||
}
|
||||
log.Debug(ctx, "request to cancel SSH unix forward", slog.F("path", reqPayload.SocketPath))
|
||||
h.Lock()
|
||||
ln, ok := h.forwards[reqPayload.SocketPath]
|
||||
h.Unlock()
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"go.uber.org/atomic"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
|
||||
"cdr.dev/slog"
|
||||
)
|
||||
|
||||
// localForwardChannelData is copied from the ssh package.
|
||||
type localForwardChannelData struct {
|
||||
DestAddr string
|
||||
DestPort uint32
|
||||
|
||||
OriginAddr string
|
||||
OriginPort uint32
|
||||
}
|
||||
|
||||
// JetbrainsChannelWatcher is used to track JetBrains port forwarded (Gateway)
|
||||
// channels. If the port forward is something other than JetBrains, this struct
|
||||
// is a noop.
|
||||
type JetbrainsChannelWatcher struct {
|
||||
gossh.NewChannel
|
||||
jetbrainsCounter *atomic.Int64
|
||||
}
|
||||
|
||||
func NewJetbrainsChannelWatcher(ctx ssh.Context, logger slog.Logger, newChannel gossh.NewChannel, counter *atomic.Int64) gossh.NewChannel {
|
||||
d := localForwardChannelData{}
|
||||
if err := gossh.Unmarshal(newChannel.ExtraData(), &d); err != nil {
|
||||
// If the data fails to unmarshal, do nothing.
|
||||
logger.Warn(ctx, "failed to unmarshal port forward data", slog.Error(err))
|
||||
return newChannel
|
||||
}
|
||||
|
||||
// If we do get a port, we should be able to get the matching PID and from
|
||||
// there look up the invocation.
|
||||
cmdline, err := getListeningPortProcessCmdline(d.DestPort)
|
||||
if err != nil {
|
||||
logger.Warn(ctx, "failed to inspect port",
|
||||
slog.F("destination_port", d.DestPort),
|
||||
slog.Error(err))
|
||||
return newChannel
|
||||
}
|
||||
|
||||
// If this is not JetBrains, then we do not need to do anything special. We
|
||||
// attempt to match on something that appears unique to JetBrains software.
|
||||
if !strings.Contains(strings.ToLower(cmdline), strings.ToLower(MagicProcessCmdlineJetBrains)) {
|
||||
return newChannel
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "discovered forwarded JetBrains process",
|
||||
slog.F("destination_port", d.DestPort))
|
||||
|
||||
return &JetbrainsChannelWatcher{
|
||||
NewChannel: newChannel,
|
||||
jetbrainsCounter: counter,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *JetbrainsChannelWatcher) Accept() (gossh.Channel, <-chan *gossh.Request, error) {
|
||||
c, r, err := w.NewChannel.Accept()
|
||||
if err != nil {
|
||||
return c, r, err
|
||||
}
|
||||
w.jetbrainsCounter.Add(1)
|
||||
|
||||
return &ChannelOnClose{
|
||||
Channel: c,
|
||||
done: func() {
|
||||
w.jetbrainsCounter.Add(-1)
|
||||
},
|
||||
}, r, err
|
||||
}
|
||||
|
||||
type ChannelOnClose struct {
|
||||
gossh.Channel
|
||||
// once ensures close only decrements the counter once.
|
||||
// Because close can be called multiple times.
|
||||
once sync.Once
|
||||
done func()
|
||||
}
|
||||
|
||||
func (c *ChannelOnClose) Close() error {
|
||||
c.once.Do(c.done)
|
||||
return c.Channel.Close()
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
//go:build linux
|
||||
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cakturk/go-netstat/netstat"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func getListeningPortProcessCmdline(port uint32) (string, error) {
|
||||
tabs, err := netstat.TCPSocks(func(s *netstat.SockTabEntry) bool {
|
||||
return s.LocalAddr != nil && uint32(s.LocalAddr.Port) == port
|
||||
})
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("inspect port %d: %w", port, err)
|
||||
}
|
||||
if len(tabs) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Defensive check.
|
||||
if tabs[0].Process == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// The process name provided by go-netstat does not include the full command
|
||||
// line so grab that instead.
|
||||
pid := tabs[0].Process.Pid
|
||||
data, err := os.ReadFile(fmt.Sprintf("/proc/%d/cmdline", pid))
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("read /proc/%d/cmdline: %w", pid, err)
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
//go:build !linux
|
||||
|
||||
package agentssh
|
||||
|
||||
func getListeningPortProcessCmdline(uint32) (string, error) {
|
||||
// We are not worrying about other platforms at the moment because Gateway
|
||||
// only supports Linux anyway.
|
||||
return "", nil
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
//go:build !windows
|
||||
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func osSignalFrom(sig ssh.Signal) os.Signal {
|
||||
switch sig {
|
||||
case ssh.SIGABRT:
|
||||
return unix.SIGABRT
|
||||
case ssh.SIGALRM:
|
||||
return unix.SIGALRM
|
||||
case ssh.SIGFPE:
|
||||
return unix.SIGFPE
|
||||
case ssh.SIGHUP:
|
||||
return unix.SIGHUP
|
||||
case ssh.SIGILL:
|
||||
return unix.SIGILL
|
||||
case ssh.SIGINT:
|
||||
return unix.SIGINT
|
||||
case ssh.SIGKILL:
|
||||
return unix.SIGKILL
|
||||
case ssh.SIGPIPE:
|
||||
return unix.SIGPIPE
|
||||
case ssh.SIGQUIT:
|
||||
return unix.SIGQUIT
|
||||
case ssh.SIGSEGV:
|
||||
return unix.SIGSEGV
|
||||
case ssh.SIGTERM:
|
||||
return unix.SIGTERM
|
||||
case ssh.SIGUSR1:
|
||||
return unix.SIGUSR1
|
||||
case ssh.SIGUSR2:
|
||||
return unix.SIGUSR2
|
||||
|
||||
// Unhandled, use sane fallback.
|
||||
default:
|
||||
return unix.SIGKILL
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
)
|
||||
|
||||
func osSignalFrom(sig ssh.Signal) os.Signal {
|
||||
switch sig {
|
||||
// Signals are not supported on Windows.
|
||||
default:
|
||||
return os.Kill
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ func NewClient(t testing.TB,
|
||||
agentID uuid.UUID,
|
||||
manifest agentsdk.Manifest,
|
||||
statsChan chan *agentsdk.Stats,
|
||||
coordinator tailnet.Coordinator,
|
||||
coordinator tailnet.CoordinatorV1,
|
||||
) *Client {
|
||||
if manifest.AgentID == uuid.Nil {
|
||||
manifest.AgentID = agentID
|
||||
@@ -47,7 +47,7 @@ type Client struct {
|
||||
manifest agentsdk.Manifest
|
||||
metadata map[string]agentsdk.Metadata
|
||||
statsChan chan *agentsdk.Stats
|
||||
coordinator tailnet.Coordinator
|
||||
coordinator tailnet.CoordinatorV1
|
||||
LastWorkspaceAgent func()
|
||||
PatchWorkspaceLogs func() error
|
||||
GetServiceBannerFunc func() (codersdk.ServiceBannerConfig, error)
|
||||
|
||||
+18
-5
@@ -26,17 +26,30 @@ func (a *agent) apiHandler() http.Handler {
|
||||
cpy[k] = b
|
||||
}
|
||||
|
||||
lp := &listeningPortsHandler{ignorePorts: cpy}
|
||||
cacheDuration := 1 * time.Second
|
||||
if a.portCacheDuration > 0 {
|
||||
cacheDuration = a.portCacheDuration
|
||||
}
|
||||
|
||||
lp := &listeningPortsHandler{
|
||||
ignorePorts: cpy,
|
||||
cacheDuration: cacheDuration,
|
||||
}
|
||||
r.Get("/api/v0/listening-ports", lp.handler)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
type listeningPortsHandler struct {
|
||||
mut sync.Mutex
|
||||
ports []codersdk.WorkspaceAgentListeningPort
|
||||
mtime time.Time
|
||||
ignorePorts map[int]string
|
||||
ignorePorts map[int]string
|
||||
cacheDuration time.Duration
|
||||
|
||||
//nolint: unused // used on some but not all platforms
|
||||
mut sync.Mutex
|
||||
//nolint: unused // used on some but not all platforms
|
||||
ports []codersdk.WorkspaceAgentListeningPort
|
||||
//nolint: unused // used on some but not all platforms
|
||||
mtime time.Time
|
||||
}
|
||||
|
||||
// handler returns a list of listening ports. This is tested by coderd's
|
||||
|
||||
@@ -17,6 +17,9 @@ import (
|
||||
type agentMetrics struct {
|
||||
connectionsTotal prometheus.Counter
|
||||
reconnectingPTYErrors *prometheus.CounterVec
|
||||
// startupScriptSeconds is the time in seconds that the start script(s)
|
||||
// took to run. This is reported once per agent.
|
||||
startupScriptSeconds *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
func newAgentMetrics(registerer prometheus.Registerer) *agentMetrics {
|
||||
@@ -35,9 +38,18 @@ func newAgentMetrics(registerer prometheus.Registerer) *agentMetrics {
|
||||
)
|
||||
registerer.MustRegister(reconnectingPTYErrors)
|
||||
|
||||
startupScriptSeconds := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "coderd",
|
||||
Subsystem: "agentstats",
|
||||
Name: "startup_script_seconds",
|
||||
Help: "Amount of time taken to run the startup script in seconds.",
|
||||
}, []string{"success"})
|
||||
registerer.MustRegister(startupScriptSeconds)
|
||||
|
||||
return &agentMetrics{
|
||||
connectionsTotal: connectionsTotal,
|
||||
reconnectingPTYErrors: reconnectingPTYErrors,
|
||||
startupScriptSeconds: startupScriptSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ func (lp *listeningPortsHandler) getListeningPorts() ([]codersdk.WorkspaceAgentL
|
||||
lp.mut.Lock()
|
||||
defer lp.mut.Unlock()
|
||||
|
||||
if time.Since(lp.mtime) < time.Second {
|
||||
if time.Since(lp.mtime) < lp.cacheDuration {
|
||||
// copy
|
||||
ports := make([]codersdk.WorkspaceAgentListeningPort, len(lp.ports))
|
||||
copy(ports, lp.ports)
|
||||
|
||||
@@ -4,7 +4,7 @@ package agent
|
||||
|
||||
import "github.com/coder/coder/v2/codersdk"
|
||||
|
||||
func (lp *listeningPortsHandler) getListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
|
||||
func (*listeningPortsHandler) getListeningPorts() ([]codersdk.WorkspaceAgentListeningPort, error) {
|
||||
// Can't scan for ports on non-linux or non-windows_amd64 systems at the
|
||||
// moment. The UI will not show any "no ports found" message to the user, so
|
||||
// the user won't suspect a thing.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,262 @@
|
||||
syntax = "proto3";
|
||||
option go_package = "github.com/coder/coder/v2/agent/proto";
|
||||
|
||||
package coder.agent.v2;
|
||||
|
||||
import "tailnet/proto/tailnet.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
message WorkspaceApp {
|
||||
bytes id = 1;
|
||||
string url = 2;
|
||||
bool external = 3;
|
||||
string slug = 4;
|
||||
string display_name = 5;
|
||||
string command = 6;
|
||||
string icon = 7;
|
||||
bool subdomain = 8;
|
||||
string subdomain_name = 9;
|
||||
|
||||
enum SharingLevel {
|
||||
SHARING_LEVEL_UNSPECIFIED = 0;
|
||||
OWNER = 1;
|
||||
AUTHENTICATED = 2;
|
||||
PUBLIC = 3;
|
||||
}
|
||||
SharingLevel sharing_level = 10;
|
||||
|
||||
message Healthcheck {
|
||||
string url = 1;
|
||||
google.protobuf.Duration interval = 2;
|
||||
int32 threshold = 3;
|
||||
}
|
||||
Healthcheck healthcheck = 11;
|
||||
|
||||
enum Health {
|
||||
HEALTH_UNSPECIFIED = 0;
|
||||
DISABLED = 1;
|
||||
INITIALIZING = 2;
|
||||
HEALTHY = 3;
|
||||
UNHEALTHY = 4;
|
||||
}
|
||||
Health health = 12;
|
||||
}
|
||||
|
||||
message WorkspaceAgentScript {
|
||||
bytes log_source_id = 1;
|
||||
string log_path = 2;
|
||||
string script = 3;
|
||||
string cron = 4;
|
||||
bool run_on_start = 5;
|
||||
bool run_on_stop = 6;
|
||||
bool start_blocks_login = 7;
|
||||
google.protobuf.Duration timeout = 8;
|
||||
}
|
||||
|
||||
message WorkspaceAgentMetadata {
|
||||
message Result {
|
||||
google.protobuf.Timestamp collected_at = 1;
|
||||
int64 age = 2;
|
||||
string value = 3;
|
||||
string error = 4;
|
||||
}
|
||||
Result result = 1;
|
||||
|
||||
message Description {
|
||||
string display_name = 1;
|
||||
string key = 2;
|
||||
string script = 3;
|
||||
google.protobuf.Duration interval = 4;
|
||||
google.protobuf.Duration timeout = 5;
|
||||
}
|
||||
Description description = 2;
|
||||
}
|
||||
|
||||
message Manifest {
|
||||
bytes agent_id = 1;
|
||||
string owner_username = 13;
|
||||
bytes workspace_id = 14;
|
||||
uint32 git_auth_configs = 2;
|
||||
map<string, string> environment_variables = 3;
|
||||
string directory = 4;
|
||||
string vs_code_port_proxy_uri = 5;
|
||||
string motd_path = 6;
|
||||
bool disable_direct_connections = 7;
|
||||
bool derp_force_websockets = 8;
|
||||
|
||||
coder.tailnet.v2.DERPMap derp_map = 9;
|
||||
repeated WorkspaceAgentScript scripts = 10;
|
||||
repeated WorkspaceApp apps = 11;
|
||||
repeated WorkspaceAgentMetadata.Description metadata = 12;
|
||||
}
|
||||
|
||||
message GetManifestRequest {}
|
||||
|
||||
message ServiceBanner {
|
||||
bool enabled = 1;
|
||||
string message = 2;
|
||||
string background_color = 3;
|
||||
}
|
||||
|
||||
message GetServiceBannerRequest {}
|
||||
|
||||
message Stats {
|
||||
// ConnectionsByProto is a count of connections by protocol.
|
||||
map<string, int64> connections_by_proto = 1;
|
||||
// ConnectionCount is the number of connections received by an agent.
|
||||
int64 connection_count = 2;
|
||||
// ConnectionMedianLatencyMS is the median latency of all connections in milliseconds.
|
||||
double connection_median_latency_ms = 3;
|
||||
// RxPackets is the number of received packets.
|
||||
int64 rx_packets = 4;
|
||||
// RxBytes is the number of received bytes.
|
||||
int64 rx_bytes = 5;
|
||||
// TxPackets is the number of transmitted bytes.
|
||||
int64 tx_packets = 6;
|
||||
// TxBytes is the number of transmitted bytes.
|
||||
int64 tx_bytes = 7;
|
||||
|
||||
// SessionCountVSCode is the number of connections received by an agent
|
||||
// that are from our VS Code extension.
|
||||
int64 session_count_vscode = 8;
|
||||
// SessionCountJetBrains is the number of connections received by an agent
|
||||
// that are from our JetBrains extension.
|
||||
int64 session_count_jetbrains = 9;
|
||||
// SessionCountReconnectingPTY is the number of connections received by an agent
|
||||
// that are from the reconnecting web terminal.
|
||||
int64 session_count_reconnecting_pty = 10;
|
||||
// SessionCountSSH is the number of connections received by an agent
|
||||
// that are normal, non-tagged SSH sessions.
|
||||
int64 session_count_ssh = 11;
|
||||
|
||||
message Metric {
|
||||
string name = 1;
|
||||
|
||||
enum Type {
|
||||
TYPE_UNSPECIFIED = 0;
|
||||
COUNTER = 1;
|
||||
GAUGE = 2;
|
||||
}
|
||||
Type type = 2;
|
||||
|
||||
double value = 3;
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
repeated Label labels = 4;
|
||||
}
|
||||
repeated Metric metrics = 12;
|
||||
}
|
||||
|
||||
message UpdateStatsRequest{
|
||||
Stats stats = 1;
|
||||
}
|
||||
|
||||
message UpdateStatsResponse {
|
||||
google.protobuf.Duration report_interval = 1;
|
||||
}
|
||||
|
||||
message Lifecycle {
|
||||
enum State {
|
||||
STATE_UNSPECIFIED = 0;
|
||||
CREATED = 1;
|
||||
STARTING = 2;
|
||||
START_TIMEOUT = 3;
|
||||
START_ERROR = 4;
|
||||
READY = 5;
|
||||
SHUTTING_DOWN = 6;
|
||||
SHUTDOWN_TIMEOUT = 7;
|
||||
SHUTDOWN_ERROR = 8;
|
||||
OFF = 9;
|
||||
}
|
||||
State state = 1;
|
||||
google.protobuf.Timestamp changed_at = 2;
|
||||
}
|
||||
|
||||
message UpdateLifecycleRequest {
|
||||
Lifecycle lifecycle = 1;
|
||||
}
|
||||
|
||||
enum AppHealth {
|
||||
APP_HEALTH_UNSPECIFIED = 0;
|
||||
DISABLED = 1;
|
||||
INITIALIZING = 2;
|
||||
HEALTHY = 3;
|
||||
UNHEALTHY = 4;
|
||||
}
|
||||
|
||||
message BatchUpdateAppHealthRequest {
|
||||
message HealthUpdate {
|
||||
bytes id = 1;
|
||||
AppHealth health = 2;
|
||||
}
|
||||
repeated HealthUpdate updates = 1;
|
||||
}
|
||||
|
||||
message BatchUpdateAppHealthResponse {}
|
||||
|
||||
message Startup {
|
||||
string version = 1;
|
||||
string expanded_directory = 2;
|
||||
enum Subsystem {
|
||||
SUBSYSTEM_UNSPECIFIED = 0;
|
||||
ENVBOX = 1;
|
||||
ENVBUILDER = 2;
|
||||
EXECTRACE = 3;
|
||||
}
|
||||
repeated Subsystem subsystems = 3;
|
||||
}
|
||||
|
||||
message UpdateStartupRequest{
|
||||
Startup startup = 1;
|
||||
}
|
||||
|
||||
message Metadata {
|
||||
string key = 1;
|
||||
WorkspaceAgentMetadata.Result result = 2;
|
||||
}
|
||||
|
||||
message BatchUpdateMetadataRequest {
|
||||
repeated Metadata metadata = 2;
|
||||
}
|
||||
|
||||
message BatchUpdateMetadataResponse {}
|
||||
|
||||
message Log {
|
||||
google.protobuf.Timestamp created_at = 1;
|
||||
string output = 2;
|
||||
|
||||
enum Level {
|
||||
LEVEL_UNSPECIFIED = 0;
|
||||
TRACE = 1;
|
||||
DEBUG = 2;
|
||||
INFO = 3;
|
||||
WARN = 4;
|
||||
ERROR = 5;
|
||||
}
|
||||
Level level = 3;
|
||||
}
|
||||
|
||||
message BatchCreateLogsRequest {
|
||||
bytes log_source_id = 1;
|
||||
repeated Log logs = 2;
|
||||
}
|
||||
|
||||
message BatchCreateLogsResponse {}
|
||||
|
||||
service Agent {
|
||||
rpc GetManifest(GetManifestRequest) returns (Manifest);
|
||||
rpc GetServiceBanner(GetServiceBannerRequest) returns (ServiceBanner);
|
||||
rpc UpdateStats(UpdateStatsRequest) returns (UpdateStatsResponse);
|
||||
rpc UpdateLifecycle(UpdateLifecycleRequest) returns (Lifecycle);
|
||||
rpc BatchUpdateAppHealths(BatchUpdateAppHealthRequest) returns (BatchUpdateAppHealthResponse);
|
||||
rpc UpdateStartup(UpdateStartupRequest) returns (Startup);
|
||||
rpc BatchUpdateMetadata(BatchUpdateMetadataRequest) returns (BatchUpdateMetadataResponse);
|
||||
rpc BatchCreateLogs(BatchCreateLogsRequest) returns (BatchCreateLogsResponse);
|
||||
|
||||
rpc StreamDERPMaps(tailnet.v2.StreamDERPMapsRequest) returns (stream tailnet.v2.DERPMap);
|
||||
rpc CoordinateTailnet(stream tailnet.v2.CoordinateRequest) returns (stream tailnet.v2.CoordinateResponse);
|
||||
}
|
||||
@@ -0,0 +1,539 @@
|
||||
// Code generated by protoc-gen-go-drpc. DO NOT EDIT.
|
||||
// protoc-gen-go-drpc version: v0.0.33
|
||||
// source: agent/proto/agent.proto
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
errors "errors"
|
||||
proto1 "github.com/coder/coder/v2/tailnet/proto"
|
||||
protojson "google.golang.org/protobuf/encoding/protojson"
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
drpc "storj.io/drpc"
|
||||
drpcerr "storj.io/drpc/drpcerr"
|
||||
)
|
||||
|
||||
type drpcEncoding_File_agent_proto_agent_proto struct{}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) Marshal(msg drpc.Message) ([]byte, error) {
|
||||
return proto.Marshal(msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) MarshalAppend(buf []byte, msg drpc.Message) ([]byte, error) {
|
||||
return proto.MarshalOptions{}.MarshalAppend(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) Unmarshal(buf []byte, msg drpc.Message) error {
|
||||
return proto.Unmarshal(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) JSONMarshal(msg drpc.Message) ([]byte, error) {
|
||||
return protojson.Marshal(msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_agent_proto_agent_proto) JSONUnmarshal(buf []byte, msg drpc.Message) error {
|
||||
return protojson.Unmarshal(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
type DRPCAgentClient interface {
|
||||
DRPCConn() drpc.Conn
|
||||
|
||||
GetManifest(ctx context.Context, in *GetManifestRequest) (*Manifest, error)
|
||||
GetServiceBanner(ctx context.Context, in *GetServiceBannerRequest) (*ServiceBanner, error)
|
||||
UpdateStats(ctx context.Context, in *UpdateStatsRequest) (*UpdateStatsResponse, error)
|
||||
UpdateLifecycle(ctx context.Context, in *UpdateLifecycleRequest) (*Lifecycle, error)
|
||||
BatchUpdateAppHealths(ctx context.Context, in *BatchUpdateAppHealthRequest) (*BatchUpdateAppHealthResponse, error)
|
||||
UpdateStartup(ctx context.Context, in *UpdateStartupRequest) (*Startup, error)
|
||||
BatchUpdateMetadata(ctx context.Context, in *BatchUpdateMetadataRequest) (*BatchUpdateMetadataResponse, error)
|
||||
BatchCreateLogs(ctx context.Context, in *BatchCreateLogsRequest) (*BatchCreateLogsResponse, error)
|
||||
StreamDERPMaps(ctx context.Context, in *proto1.StreamDERPMapsRequest) (DRPCAgent_StreamDERPMapsClient, error)
|
||||
CoordinateTailnet(ctx context.Context) (DRPCAgent_CoordinateTailnetClient, error)
|
||||
}
|
||||
|
||||
type drpcAgentClient struct {
|
||||
cc drpc.Conn
|
||||
}
|
||||
|
||||
func NewDRPCAgentClient(cc drpc.Conn) DRPCAgentClient {
|
||||
return &drpcAgentClient{cc}
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) DRPCConn() drpc.Conn { return c.cc }
|
||||
|
||||
func (c *drpcAgentClient) GetManifest(ctx context.Context, in *GetManifestRequest) (*Manifest, error) {
|
||||
out := new(Manifest)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/GetManifest", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) GetServiceBanner(ctx context.Context, in *GetServiceBannerRequest) (*ServiceBanner, error) {
|
||||
out := new(ServiceBanner)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/GetServiceBanner", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) UpdateStats(ctx context.Context, in *UpdateStatsRequest) (*UpdateStatsResponse, error) {
|
||||
out := new(UpdateStatsResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/UpdateStats", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) UpdateLifecycle(ctx context.Context, in *UpdateLifecycleRequest) (*Lifecycle, error) {
|
||||
out := new(Lifecycle)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/UpdateLifecycle", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) BatchUpdateAppHealths(ctx context.Context, in *BatchUpdateAppHealthRequest) (*BatchUpdateAppHealthResponse, error) {
|
||||
out := new(BatchUpdateAppHealthResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/BatchUpdateAppHealths", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) UpdateStartup(ctx context.Context, in *UpdateStartupRequest) (*Startup, error) {
|
||||
out := new(Startup)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/UpdateStartup", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) BatchUpdateMetadata(ctx context.Context, in *BatchUpdateMetadataRequest) (*BatchUpdateMetadataResponse, error) {
|
||||
out := new(BatchUpdateMetadataResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/BatchUpdateMetadata", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) BatchCreateLogs(ctx context.Context, in *BatchCreateLogsRequest) (*BatchCreateLogsResponse, error) {
|
||||
out := new(BatchCreateLogsResponse)
|
||||
err := c.cc.Invoke(ctx, "/coder.agent.v2.Agent/BatchCreateLogs", drpcEncoding_File_agent_proto_agent_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) StreamDERPMaps(ctx context.Context, in *proto1.StreamDERPMapsRequest) (DRPCAgent_StreamDERPMapsClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, "/coder.agent.v2.Agent/StreamDERPMaps", drpcEncoding_File_agent_proto_agent_proto{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &drpcAgent_StreamDERPMapsClient{stream}
|
||||
if err := x.MsgSend(in, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DRPCAgent_StreamDERPMapsClient interface {
|
||||
drpc.Stream
|
||||
Recv() (*proto1.DERPMap, error)
|
||||
}
|
||||
|
||||
type drpcAgent_StreamDERPMapsClient struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_StreamDERPMapsClient) GetStream() drpc.Stream {
|
||||
return x.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_StreamDERPMapsClient) Recv() (*proto1.DERPMap, error) {
|
||||
m := new(proto1.DERPMap)
|
||||
if err := x.MsgRecv(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (x *drpcAgent_StreamDERPMapsClient) RecvMsg(m *proto1.DERPMap) error {
|
||||
return x.MsgRecv(m, drpcEncoding_File_agent_proto_agent_proto{})
|
||||
}
|
||||
|
||||
func (c *drpcAgentClient) CoordinateTailnet(ctx context.Context) (DRPCAgent_CoordinateTailnetClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, "/coder.agent.v2.Agent/CoordinateTailnet", drpcEncoding_File_agent_proto_agent_proto{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &drpcAgent_CoordinateTailnetClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DRPCAgent_CoordinateTailnetClient interface {
|
||||
drpc.Stream
|
||||
Send(*proto1.CoordinateRequest) error
|
||||
Recv() (*proto1.CoordinateResponse, error)
|
||||
}
|
||||
|
||||
type drpcAgent_CoordinateTailnetClient struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_CoordinateTailnetClient) GetStream() drpc.Stream {
|
||||
return x.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_CoordinateTailnetClient) Send(m *proto1.CoordinateRequest) error {
|
||||
return x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{})
|
||||
}
|
||||
|
||||
func (x *drpcAgent_CoordinateTailnetClient) Recv() (*proto1.CoordinateResponse, error) {
|
||||
m := new(proto1.CoordinateResponse)
|
||||
if err := x.MsgRecv(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (x *drpcAgent_CoordinateTailnetClient) RecvMsg(m *proto1.CoordinateResponse) error {
|
||||
return x.MsgRecv(m, drpcEncoding_File_agent_proto_agent_proto{})
|
||||
}
|
||||
|
||||
type DRPCAgentServer interface {
|
||||
GetManifest(context.Context, *GetManifestRequest) (*Manifest, error)
|
||||
GetServiceBanner(context.Context, *GetServiceBannerRequest) (*ServiceBanner, error)
|
||||
UpdateStats(context.Context, *UpdateStatsRequest) (*UpdateStatsResponse, error)
|
||||
UpdateLifecycle(context.Context, *UpdateLifecycleRequest) (*Lifecycle, error)
|
||||
BatchUpdateAppHealths(context.Context, *BatchUpdateAppHealthRequest) (*BatchUpdateAppHealthResponse, error)
|
||||
UpdateStartup(context.Context, *UpdateStartupRequest) (*Startup, error)
|
||||
BatchUpdateMetadata(context.Context, *BatchUpdateMetadataRequest) (*BatchUpdateMetadataResponse, error)
|
||||
BatchCreateLogs(context.Context, *BatchCreateLogsRequest) (*BatchCreateLogsResponse, error)
|
||||
StreamDERPMaps(*proto1.StreamDERPMapsRequest, DRPCAgent_StreamDERPMapsStream) error
|
||||
CoordinateTailnet(DRPCAgent_CoordinateTailnetStream) error
|
||||
}
|
||||
|
||||
type DRPCAgentUnimplementedServer struct{}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) GetManifest(context.Context, *GetManifestRequest) (*Manifest, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) GetServiceBanner(context.Context, *GetServiceBannerRequest) (*ServiceBanner, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) UpdateStats(context.Context, *UpdateStatsRequest) (*UpdateStatsResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) UpdateLifecycle(context.Context, *UpdateLifecycleRequest) (*Lifecycle, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) BatchUpdateAppHealths(context.Context, *BatchUpdateAppHealthRequest) (*BatchUpdateAppHealthResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) UpdateStartup(context.Context, *UpdateStartupRequest) (*Startup, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) BatchUpdateMetadata(context.Context, *BatchUpdateMetadataRequest) (*BatchUpdateMetadataResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) BatchCreateLogs(context.Context, *BatchCreateLogsRequest) (*BatchCreateLogsResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) StreamDERPMaps(*proto1.StreamDERPMapsRequest, DRPCAgent_StreamDERPMapsStream) error {
|
||||
return drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCAgentUnimplementedServer) CoordinateTailnet(DRPCAgent_CoordinateTailnetStream) error {
|
||||
return drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
type DRPCAgentDescription struct{}
|
||||
|
||||
func (DRPCAgentDescription) NumMethods() int { return 10 }
|
||||
|
||||
func (DRPCAgentDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
|
||||
switch n {
|
||||
case 0:
|
||||
return "/coder.agent.v2.Agent/GetManifest", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
GetManifest(
|
||||
ctx,
|
||||
in1.(*GetManifestRequest),
|
||||
)
|
||||
}, DRPCAgentServer.GetManifest, true
|
||||
case 1:
|
||||
return "/coder.agent.v2.Agent/GetServiceBanner", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
GetServiceBanner(
|
||||
ctx,
|
||||
in1.(*GetServiceBannerRequest),
|
||||
)
|
||||
}, DRPCAgentServer.GetServiceBanner, true
|
||||
case 2:
|
||||
return "/coder.agent.v2.Agent/UpdateStats", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
UpdateStats(
|
||||
ctx,
|
||||
in1.(*UpdateStatsRequest),
|
||||
)
|
||||
}, DRPCAgentServer.UpdateStats, true
|
||||
case 3:
|
||||
return "/coder.agent.v2.Agent/UpdateLifecycle", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
UpdateLifecycle(
|
||||
ctx,
|
||||
in1.(*UpdateLifecycleRequest),
|
||||
)
|
||||
}, DRPCAgentServer.UpdateLifecycle, true
|
||||
case 4:
|
||||
return "/coder.agent.v2.Agent/BatchUpdateAppHealths", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
BatchUpdateAppHealths(
|
||||
ctx,
|
||||
in1.(*BatchUpdateAppHealthRequest),
|
||||
)
|
||||
}, DRPCAgentServer.BatchUpdateAppHealths, true
|
||||
case 5:
|
||||
return "/coder.agent.v2.Agent/UpdateStartup", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
UpdateStartup(
|
||||
ctx,
|
||||
in1.(*UpdateStartupRequest),
|
||||
)
|
||||
}, DRPCAgentServer.UpdateStartup, true
|
||||
case 6:
|
||||
return "/coder.agent.v2.Agent/BatchUpdateMetadata", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
BatchUpdateMetadata(
|
||||
ctx,
|
||||
in1.(*BatchUpdateMetadataRequest),
|
||||
)
|
||||
}, DRPCAgentServer.BatchUpdateMetadata, true
|
||||
case 7:
|
||||
return "/coder.agent.v2.Agent/BatchCreateLogs", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCAgentServer).
|
||||
BatchCreateLogs(
|
||||
ctx,
|
||||
in1.(*BatchCreateLogsRequest),
|
||||
)
|
||||
}, DRPCAgentServer.BatchCreateLogs, true
|
||||
case 8:
|
||||
return "/coder.agent.v2.Agent/StreamDERPMaps", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return nil, srv.(DRPCAgentServer).
|
||||
StreamDERPMaps(
|
||||
in1.(*proto1.StreamDERPMapsRequest),
|
||||
&drpcAgent_StreamDERPMapsStream{in2.(drpc.Stream)},
|
||||
)
|
||||
}, DRPCAgentServer.StreamDERPMaps, true
|
||||
case 9:
|
||||
return "/coder.agent.v2.Agent/CoordinateTailnet", drpcEncoding_File_agent_proto_agent_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return nil, srv.(DRPCAgentServer).
|
||||
CoordinateTailnet(
|
||||
&drpcAgent_CoordinateTailnetStream{in1.(drpc.Stream)},
|
||||
)
|
||||
}, DRPCAgentServer.CoordinateTailnet, true
|
||||
default:
|
||||
return "", nil, nil, nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func DRPCRegisterAgent(mux drpc.Mux, impl DRPCAgentServer) error {
|
||||
return mux.Register(impl, DRPCAgentDescription{})
|
||||
}
|
||||
|
||||
type DRPCAgent_GetManifestStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*Manifest) error
|
||||
}
|
||||
|
||||
type drpcAgent_GetManifestStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_GetManifestStream) SendAndClose(m *Manifest) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_GetServiceBannerStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*ServiceBanner) error
|
||||
}
|
||||
|
||||
type drpcAgent_GetServiceBannerStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_GetServiceBannerStream) SendAndClose(m *ServiceBanner) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_UpdateStatsStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*UpdateStatsResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_UpdateStatsStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_UpdateStatsStream) SendAndClose(m *UpdateStatsResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_UpdateLifecycleStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*Lifecycle) error
|
||||
}
|
||||
|
||||
type drpcAgent_UpdateLifecycleStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_UpdateLifecycleStream) SendAndClose(m *Lifecycle) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_BatchUpdateAppHealthsStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*BatchUpdateAppHealthResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_BatchUpdateAppHealthsStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_BatchUpdateAppHealthsStream) SendAndClose(m *BatchUpdateAppHealthResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_UpdateStartupStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*Startup) error
|
||||
}
|
||||
|
||||
type drpcAgent_UpdateStartupStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_UpdateStartupStream) SendAndClose(m *Startup) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_BatchUpdateMetadataStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*BatchUpdateMetadataResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_BatchUpdateMetadataStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_BatchUpdateMetadataStream) SendAndClose(m *BatchUpdateMetadataResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_BatchCreateLogsStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*BatchCreateLogsResponse) error
|
||||
}
|
||||
|
||||
type drpcAgent_BatchCreateLogsStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_BatchCreateLogsStream) SendAndClose(m *BatchCreateLogsResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCAgent_StreamDERPMapsStream interface {
|
||||
drpc.Stream
|
||||
Send(*proto1.DERPMap) error
|
||||
}
|
||||
|
||||
type drpcAgent_StreamDERPMapsStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_StreamDERPMapsStream) Send(m *proto1.DERPMap) error {
|
||||
return x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{})
|
||||
}
|
||||
|
||||
type DRPCAgent_CoordinateTailnetStream interface {
|
||||
drpc.Stream
|
||||
Send(*proto1.CoordinateResponse) error
|
||||
Recv() (*proto1.CoordinateRequest, error)
|
||||
}
|
||||
|
||||
type drpcAgent_CoordinateTailnetStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcAgent_CoordinateTailnetStream) Send(m *proto1.CoordinateResponse) error {
|
||||
return x.MsgSend(m, drpcEncoding_File_agent_proto_agent_proto{})
|
||||
}
|
||||
|
||||
func (x *drpcAgent_CoordinateTailnetStream) Recv() (*proto1.CoordinateRequest, error) {
|
||||
m := new(proto1.CoordinateRequest)
|
||||
if err := x.MsgRecv(m, drpcEncoding_File_agent_proto_agent_proto{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (x *drpcAgent_CoordinateTailnetStream) RecvMsg(m *proto1.CoordinateRequest) error {
|
||||
return x.MsgRecv(m, drpcEncoding_File_agent_proto_agent_proto{})
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
package proto
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func SDKAgentMetadataDescriptionsFromProto(descriptions []*WorkspaceAgentMetadata_Description) []codersdk.WorkspaceAgentMetadataDescription {
|
||||
ret := make([]codersdk.WorkspaceAgentMetadataDescription, len(descriptions))
|
||||
for i, description := range descriptions {
|
||||
ret[i] = SDKAgentMetadataDescriptionFromProto(description)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func SDKAgentMetadataDescriptionFromProto(description *WorkspaceAgentMetadata_Description) codersdk.WorkspaceAgentMetadataDescription {
|
||||
return codersdk.WorkspaceAgentMetadataDescription{
|
||||
DisplayName: description.DisplayName,
|
||||
Key: description.Key,
|
||||
Script: description.Script,
|
||||
Interval: int64(description.Interval.AsDuration()),
|
||||
Timeout: int64(description.Timeout.AsDuration()),
|
||||
}
|
||||
}
|
||||
|
||||
func SDKAgentScriptsFromProto(protoScripts []*WorkspaceAgentScript) ([]codersdk.WorkspaceAgentScript, error) {
|
||||
ret := make([]codersdk.WorkspaceAgentScript, len(protoScripts))
|
||||
for i, protoScript := range protoScripts {
|
||||
app, err := SDKAgentScriptFromProto(protoScript)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse script %v: %w", i, err)
|
||||
}
|
||||
ret[i] = app
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func SDKAgentScriptFromProto(protoScript *WorkspaceAgentScript) (codersdk.WorkspaceAgentScript, error) {
|
||||
id, err := uuid.FromBytes(protoScript.LogSourceId)
|
||||
if err != nil {
|
||||
return codersdk.WorkspaceAgentScript{}, xerrors.Errorf("parse id: %w", err)
|
||||
}
|
||||
|
||||
return codersdk.WorkspaceAgentScript{
|
||||
LogSourceID: id,
|
||||
LogPath: protoScript.LogPath,
|
||||
Script: protoScript.Script,
|
||||
Cron: protoScript.Cron,
|
||||
RunOnStart: protoScript.RunOnStart,
|
||||
RunOnStop: protoScript.RunOnStop,
|
||||
StartBlocksLogin: protoScript.StartBlocksLogin,
|
||||
Timeout: protoScript.Timeout.AsDuration(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SDKAppsFromProto(protoApps []*WorkspaceApp) ([]codersdk.WorkspaceApp, error) {
|
||||
ret := make([]codersdk.WorkspaceApp, len(protoApps))
|
||||
for i, protoApp := range protoApps {
|
||||
app, err := SDKAppFromProto(protoApp)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse app %v (%q): %w", i, protoApp.Slug, err)
|
||||
}
|
||||
ret[i] = app
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func SDKAppFromProto(protoApp *WorkspaceApp) (codersdk.WorkspaceApp, error) {
|
||||
id, err := uuid.FromBytes(protoApp.Id)
|
||||
if err != nil {
|
||||
return codersdk.WorkspaceApp{}, xerrors.Errorf("parse id: %w", err)
|
||||
}
|
||||
|
||||
var sharingLevel codersdk.WorkspaceAppSharingLevel = codersdk.WorkspaceAppSharingLevel(strings.ToLower(protoApp.SharingLevel.String()))
|
||||
if _, ok := codersdk.MapWorkspaceAppSharingLevels[sharingLevel]; !ok {
|
||||
return codersdk.WorkspaceApp{}, xerrors.Errorf("unknown app sharing level: %v (%q)", protoApp.SharingLevel, protoApp.SharingLevel.String())
|
||||
}
|
||||
|
||||
var health codersdk.WorkspaceAppHealth = codersdk.WorkspaceAppHealth(strings.ToLower(protoApp.Health.String()))
|
||||
if _, ok := codersdk.MapWorkspaceAppHealths[health]; !ok {
|
||||
return codersdk.WorkspaceApp{}, xerrors.Errorf("unknown app health: %v (%q)", protoApp.Health, protoApp.Health.String())
|
||||
}
|
||||
|
||||
return codersdk.WorkspaceApp{
|
||||
ID: id,
|
||||
URL: protoApp.Url,
|
||||
External: protoApp.External,
|
||||
Slug: protoApp.Slug,
|
||||
DisplayName: protoApp.DisplayName,
|
||||
Command: protoApp.Command,
|
||||
Icon: protoApp.Icon,
|
||||
Subdomain: protoApp.Subdomain,
|
||||
SubdomainName: protoApp.SubdomainName,
|
||||
SharingLevel: sharingLevel,
|
||||
Healthcheck: codersdk.Healthcheck{
|
||||
URL: protoApp.Healthcheck.Url,
|
||||
Interval: int32(protoApp.Healthcheck.Interval.AsDuration().Seconds()),
|
||||
Threshold: protoApp.Healthcheck.Threshold,
|
||||
},
|
||||
Health: health,
|
||||
}, nil
|
||||
}
|
||||
@@ -196,8 +196,8 @@ func (s *ptyState) waitForStateOrContext(ctx context.Context, state State) (Stat
|
||||
// until EOF or an error writing to ptty or reading from conn.
|
||||
func readConnLoop(ctx context.Context, conn net.Conn, ptty pty.PTYCmd, metrics *prometheus.CounterVec, logger slog.Logger) {
|
||||
decoder := json.NewDecoder(conn)
|
||||
var req codersdk.ReconnectingPTYRequest
|
||||
for {
|
||||
var req codersdk.ReconnectingPTYRequest
|
||||
err := decoder.Decode(&req)
|
||||
if xerrors.Is(err, io.EOF) {
|
||||
return
|
||||
|
||||
@@ -13,6 +13,10 @@ import (
|
||||
func Get(username string) (string, error) {
|
||||
// This command will output "UserShell: /bin/zsh" if successful, we
|
||||
// can ignore the error since we have fallback behavior.
|
||||
if !filepath.IsLocal(username) {
|
||||
return "", xerrors.Errorf("username is nonlocal path: %s", username)
|
||||
}
|
||||
//nolint: gosec // input checked above
|
||||
out, _ := exec.Command("dscl", ".", "-read", filepath.Join("/Users", username), "UserShell").Output()
|
||||
s, ok := strings.CutPrefix(string(out), "UserShell: ")
|
||||
if ok {
|
||||
|
||||
+6
-6
@@ -8,7 +8,6 @@ import (
|
||||
"net/http/pprof"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -117,7 +116,7 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
defer logWriter.Close()
|
||||
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := slog.Make(sinks...).Leveled(slog.LevelDebug)
|
||||
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
|
||||
|
||||
logger.Info(ctx, "spawning reaper process")
|
||||
// Do not start a reaper on the child process. It's important
|
||||
@@ -144,7 +143,7 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
// Note that we don't want to handle these signals in the
|
||||
// process that runs as PID 1, that's why we do this after
|
||||
// the reaper forked.
|
||||
ctx, stopNotify := signal.NotifyContext(ctx, InterruptSignals...)
|
||||
ctx, stopNotify := inv.SignalNotifyContext(ctx, InterruptSignals...)
|
||||
defer stopNotify()
|
||||
|
||||
// DumpHandler does signal handling, so we call it after the
|
||||
@@ -154,13 +153,14 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
logWriter := &lumberjackWriteCloseFixer{w: &lumberjack.Logger{
|
||||
Filename: filepath.Join(logDir, "coder-agent.log"),
|
||||
MaxSize: 5, // MB
|
||||
// Without this, rotated logs will never be deleted.
|
||||
MaxBackups: 1,
|
||||
// Per customer incident on November 17th, 2023, its helpful
|
||||
// to have the log of the last few restarts to debug a failing agent.
|
||||
MaxBackups: 10,
|
||||
}}
|
||||
defer logWriter.Close()
|
||||
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := slog.Make(sinks...).Leveled(slog.LevelDebug)
|
||||
logger := inv.Logger.AppendSinks(sinks...).Leveled(slog.LevelDebug)
|
||||
|
||||
version := buildinfo.Version()
|
||||
logger.Info(ctx, "agent is starting now",
|
||||
|
||||
+63
-125
@@ -16,10 +16,11 @@ import (
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
"github.com/coder/coder/v2/provisionersdk/proto"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
func TestWorkspaceAgent(t *testing.T) {
|
||||
@@ -28,83 +29,62 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
t.Run("LogDirectory", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
authToken := uuid.NewString()
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(authToken),
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).
|
||||
WithAgent().
|
||||
Do()
|
||||
logDir := t.TempDir()
|
||||
inv, _ := clitest.New(t,
|
||||
"agent",
|
||||
"--auth", "token",
|
||||
"--agent-token", authToken,
|
||||
"--agent-token", r.AgentToken,
|
||||
"--agent-url", client.URL.String(),
|
||||
"--log-dir", logDir,
|
||||
)
|
||||
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
ctx := inv.Context()
|
||||
pty.ExpectMatchContext(ctx, "agent is starting now")
|
||||
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
|
||||
info, err := os.Stat(filepath.Join(logDir, "coder-agent.log"))
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, info.Size(), int64(0))
|
||||
require.Eventually(t, func() bool {
|
||||
info, err := os.Stat(filepath.Join(logDir, "coder-agent.log"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return info.Size() > 0
|
||||
}, testutil.WaitLong, testutil.IntervalMedium)
|
||||
})
|
||||
|
||||
t.Run("Azure", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
instanceID := "instanceidentifier"
|
||||
certificates, metadataClient := coderdtest.NewAzureInstanceIdentity(t, instanceID)
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
AzureCertificates: certificates,
|
||||
IncludeProvisionerDaemon: true,
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AzureCertificates: certificates,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: []*proto.Response{{
|
||||
Type: &proto.Response_Apply{
|
||||
Apply: &proto.ApplyComplete{
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "somename",
|
||||
Type: "someinstance",
|
||||
Agents: []*proto.Agent{{
|
||||
Auth: &proto.Agent_InstanceId{
|
||||
InstanceId: instanceID,
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
agents[0].Auth = &proto.Agent_InstanceId{InstanceId: instanceID}
|
||||
return agents
|
||||
}).Do()
|
||||
|
||||
inv, _ := clitest.New(t, "agent", "--auth", "azure-instance-identity", "--agent-url", client.URL.String())
|
||||
inv = inv.WithContext(
|
||||
//nolint:revive,staticcheck
|
||||
context.WithValue(inv.Context(), "azure-client", metadataClient),
|
||||
)
|
||||
|
||||
ctx := inv.Context()
|
||||
clitest.Start(t, inv)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, workspace.ID)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, r.Workspace.ID)
|
||||
require.NoError(t, err)
|
||||
resources := workspace.LatestBuild.Resources
|
||||
if assert.NotEmpty(t, workspace.LatestBuild.Resources) && assert.NotEmpty(t, resources[0].Agents) {
|
||||
@@ -120,43 +100,28 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
t.Parallel()
|
||||
instanceID := "instanceidentifier"
|
||||
certificates, metadataClient := coderdtest.NewAWSInstanceIdentity(t, instanceID)
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
AWSCertificates: certificates,
|
||||
IncludeProvisionerDaemon: true,
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
AWSCertificates: certificates,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: []*proto.Response{{
|
||||
Type: &proto.Response_Apply{
|
||||
Apply: &proto.ApplyComplete{
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "somename",
|
||||
Type: "someinstance",
|
||||
Agents: []*proto.Agent{{
|
||||
Auth: &proto.Agent_InstanceId{
|
||||
InstanceId: instanceID,
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
agents[0].Auth = &proto.Agent_InstanceId{InstanceId: instanceID}
|
||||
return agents
|
||||
}).Do()
|
||||
|
||||
inv, _ := clitest.New(t, "agent", "--auth", "aws-instance-identity", "--agent-url", client.URL.String())
|
||||
inv = inv.WithContext(
|
||||
//nolint:revive,staticcheck
|
||||
context.WithValue(inv.Context(), "aws-client", metadataClient),
|
||||
)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
ctx := inv.Context()
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, workspace.ID)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, r.Workspace.ID)
|
||||
require.NoError(t, err)
|
||||
resources := workspace.LatestBuild.Resources
|
||||
if assert.NotEmpty(t, resources) && assert.NotEmpty(t, resources[0].Agents) {
|
||||
@@ -172,38 +137,22 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
t.Parallel()
|
||||
instanceID := "instanceidentifier"
|
||||
validator, metadataClient := coderdtest.NewGoogleInstanceIdentity(t, instanceID, false)
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
GoogleTokenValidator: validator,
|
||||
IncludeProvisionerDaemon: true,
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
GoogleTokenValidator: validator,
|
||||
})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: []*proto.Response{{
|
||||
Type: &proto.Response_Apply{
|
||||
Apply: &proto.ApplyComplete{
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "somename",
|
||||
Type: "someinstance",
|
||||
Agents: []*proto.Agent{{
|
||||
Auth: &proto.Agent_InstanceId{
|
||||
InstanceId: instanceID,
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, owner.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
member, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: memberUser.ID,
|
||||
}).WithAgent(func(agents []*proto.Agent) []*proto.Agent {
|
||||
agents[0].Auth = &proto.Agent_InstanceId{InstanceId: instanceID}
|
||||
return agents
|
||||
}).Do()
|
||||
|
||||
inv, cfg := clitest.New(t, "agent", "--auth", "google-instance-identity", "--agent-url", client.URL.String())
|
||||
ptytest.New(t).Attach(inv)
|
||||
clitest.SetupConfig(t, member, cfg)
|
||||
|
||||
clitest.Start(t,
|
||||
inv.WithContext(
|
||||
//nolint:revive,staticcheck
|
||||
@@ -212,9 +161,8 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
)
|
||||
|
||||
ctx := inv.Context()
|
||||
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, workspace.ID)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, r.Workspace.ID)
|
||||
require.NoError(t, err)
|
||||
resources := workspace.LatestBuild.Resources
|
||||
if assert.NotEmpty(t, resources) && assert.NotEmpty(t, resources[0].Agents) {
|
||||
@@ -244,37 +192,27 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
t.Run("PostStartup", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
authToken := uuid.NewString()
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(authToken),
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent().Do()
|
||||
|
||||
logDir := t.TempDir()
|
||||
inv, _ := clitest.New(t,
|
||||
"agent",
|
||||
"--auth", "token",
|
||||
"--agent-token", authToken,
|
||||
"--agent-token", r.AgentToken,
|
||||
"--agent-url", client.URL.String(),
|
||||
"--log-dir", logDir,
|
||||
)
|
||||
// Set the subsystems for the agent.
|
||||
inv.Environ.Set(agent.EnvAgentSubsystem, fmt.Sprintf("%s,%s", codersdk.AgentSubsystemExectrace, codersdk.AgentSubsystemEnvbox))
|
||||
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
pty.ExpectMatchContext(inv.Context(), "agent is starting now")
|
||||
|
||||
resources := coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
resources := coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
require.Len(t, resources, 1)
|
||||
require.Len(t, resources[0].Agents, 1)
|
||||
require.Len(t, resources[0].Agents[0].Subsystems, 2)
|
||||
|
||||
@@ -0,0 +1,58 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func (r *RootCmd) autoupdate() *clibase.Cmd {
|
||||
client := new(codersdk.Client)
|
||||
cmd := &clibase.Cmd{
|
||||
Annotations: workspaceCommand,
|
||||
Use: "autoupdate <workspace> <always|never>",
|
||||
Short: "Toggle auto-update policy for a workspace",
|
||||
Middleware: clibase.Chain(
|
||||
clibase.RequireNArgs(2),
|
||||
r.InitClient(client),
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
policy := strings.ToLower(inv.Args[1])
|
||||
err := validateAutoUpdatePolicy(policy)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("validate policy: %w", err)
|
||||
}
|
||||
|
||||
workspace, err := namedWorkspace(inv.Context(), client, inv.Args[0])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get workspace: %w", err)
|
||||
}
|
||||
|
||||
err = client.UpdateWorkspaceAutomaticUpdates(inv.Context(), workspace.ID, codersdk.UpdateWorkspaceAutomaticUpdatesRequest{
|
||||
AutomaticUpdates: codersdk.AutomaticUpdates(policy),
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("update workspace automatic updates policy: %w", err)
|
||||
}
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Updated workspace %q auto-update policy to %q\n", workspace.Name, policy)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = append(cmd.Options, cliui.SkipPromptOption())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validateAutoUpdatePolicy(arg string) error {
|
||||
switch codersdk.AutomaticUpdates(arg) {
|
||||
case codersdk.AutomaticUpdatesAlways, codersdk.AutomaticUpdatesNever:
|
||||
return nil
|
||||
default:
|
||||
return xerrors.Errorf("invalid option %q must be either of %q or %q", arg, codersdk.AutomaticUpdatesAlways, codersdk.AutomaticUpdatesNever)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
func TestAutoUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, owner.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
require.Equal(t, codersdk.AutomaticUpdatesNever, workspace.AutomaticUpdates)
|
||||
|
||||
expectedPolicy := codersdk.AutomaticUpdatesAlways
|
||||
inv, root := clitest.New(t, "autoupdate", workspace.Name, string(expectedPolicy))
|
||||
clitest.SetupConfig(t, member, root)
|
||||
var buf bytes.Buffer
|
||||
inv.Stdout = &buf
|
||||
err := inv.Run()
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, buf.String(), fmt.Sprintf("Updated workspace %q auto-update policy to %q", workspace.Name, expectedPolicy))
|
||||
|
||||
workspace = coderdtest.MustWorkspace(t, client, workspace.ID)
|
||||
require.Equal(t, expectedPolicy, workspace.AutomaticUpdates)
|
||||
})
|
||||
|
||||
t.Run("InvalidArgs", func(t *testing.T) {
|
||||
type testcase struct {
|
||||
Name string
|
||||
Args []string
|
||||
ErrorContains string
|
||||
}
|
||||
|
||||
cases := []testcase{
|
||||
{
|
||||
Name: "NoPolicy",
|
||||
Args: []string{"autoupdate", "ws"},
|
||||
ErrorContains: "wanted 2 args but got 1",
|
||||
},
|
||||
{
|
||||
Name: "InvalidPolicy",
|
||||
Args: []string{"autoupdate", "ws", "sometimes"},
|
||||
ErrorContains: `invalid option "sometimes" must be either of`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
inv, root := clitest.New(t, c.Args...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
err := inv.Run()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), c.ErrorContains)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -7,9 +7,13 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/xerrors"
|
||||
@@ -168,6 +172,7 @@ func (c *Cmd) Invoke(args ...string) *Invocation {
|
||||
Stdout: io.Discard,
|
||||
Stderr: io.Discard,
|
||||
Stdin: strings.NewReader(""),
|
||||
Logger: slog.Make(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,6 +188,11 @@ type Invocation struct {
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Stdin io.Reader
|
||||
Logger slog.Logger
|
||||
Net Net
|
||||
|
||||
// testing
|
||||
signalNotifyContext func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)
|
||||
}
|
||||
|
||||
// WithOS returns the invocation as a main package, filling in the invocation's unset
|
||||
@@ -194,6 +204,36 @@ func (inv *Invocation) WithOS() *Invocation {
|
||||
i.Stdin = os.Stdin
|
||||
i.Args = os.Args[1:]
|
||||
i.Environ = ParseEnviron(os.Environ(), "")
|
||||
i.Net = osNet{}
|
||||
})
|
||||
}
|
||||
|
||||
// WithTestSignalNotifyContext allows overriding the default implementation of SignalNotifyContext.
|
||||
// This should only be used in testing.
|
||||
func (inv *Invocation) WithTestSignalNotifyContext(
|
||||
_ testing.TB, // ensure we only call this from tests
|
||||
f func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc),
|
||||
) *Invocation {
|
||||
return inv.with(func(i *Invocation) {
|
||||
i.signalNotifyContext = f
|
||||
})
|
||||
}
|
||||
|
||||
// SignalNotifyContext is equivalent to signal.NotifyContext, but supports being overridden in
|
||||
// tests.
|
||||
func (inv *Invocation) SignalNotifyContext(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc) {
|
||||
if inv.signalNotifyContext == nil {
|
||||
return signal.NotifyContext(parent, signals...)
|
||||
}
|
||||
return inv.signalNotifyContext(parent, signals...)
|
||||
}
|
||||
|
||||
func (inv *Invocation) WithTestParsedFlags(
|
||||
_ testing.TB, // ensure we only call this from tests
|
||||
parsedFlags *pflag.FlagSet,
|
||||
) *Invocation {
|
||||
return inv.with(func(i *Invocation) {
|
||||
i.parsedFlags = parsedFlags
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
package clibase
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/pion/udp"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Net abstracts CLI commands interacting with the operating system networking.
|
||||
//
|
||||
// At present, it covers opening local listening sockets, since doing this
|
||||
// in testing is a challenge without flakes, since it's hard to pick a port we
|
||||
// know a priori will be free.
|
||||
type Net interface {
|
||||
// Listen has the same semantics as `net.Listen` but also supports `udp`
|
||||
Listen(network, address string) (net.Listener, error)
|
||||
}
|
||||
|
||||
// osNet is an implementation that call the real OS for networking.
|
||||
type osNet struct{}
|
||||
|
||||
func (osNet) Listen(network, address string) (net.Listener, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp4", "tcp6", "unix", "unixpacket":
|
||||
return net.Listen(network, address)
|
||||
case "udp":
|
||||
host, port, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("split %q: %w", address, err)
|
||||
}
|
||||
|
||||
var portInt int
|
||||
portInt, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse port %v from %q as int: %w", port, address, err)
|
||||
}
|
||||
|
||||
// Use pion here so that we get a stream-style net.Conn listener, instead
|
||||
// of a packet-oriented connection that can read and write to multiple
|
||||
// addresses.
|
||||
return udp.Listen(network, &net.UDPAddr{
|
||||
IP: net.ParseIP(host),
|
||||
Port: portInt,
|
||||
})
|
||||
default:
|
||||
return nil, xerrors.Errorf("unknown listen network %q", network)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,211 @@
|
||||
package clilog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/sloghuman"
|
||||
"cdr.dev/slog/sloggers/slogjson"
|
||||
"cdr.dev/slog/sloggers/slogstackdriver"
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/coderd/tracing"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
type (
|
||||
Option func(*Builder)
|
||||
Builder struct {
|
||||
Filter []string
|
||||
Human string
|
||||
JSON string
|
||||
Stackdriver string
|
||||
Trace bool
|
||||
Verbose bool
|
||||
}
|
||||
)
|
||||
|
||||
func New(opts ...Option) *Builder {
|
||||
b := &Builder{}
|
||||
for _, opt := range opts {
|
||||
opt(b)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func WithFilter(filters ...string) Option {
|
||||
return func(b *Builder) {
|
||||
b.Filter = filters
|
||||
}
|
||||
}
|
||||
|
||||
func WithHuman(loc string) Option {
|
||||
return func(b *Builder) {
|
||||
b.Human = loc
|
||||
}
|
||||
}
|
||||
|
||||
func WithJSON(loc string) Option {
|
||||
return func(b *Builder) {
|
||||
b.JSON = loc
|
||||
}
|
||||
}
|
||||
|
||||
func WithStackdriver(loc string) Option {
|
||||
return func(b *Builder) {
|
||||
b.Stackdriver = loc
|
||||
}
|
||||
}
|
||||
|
||||
func WithTrace() Option {
|
||||
return func(b *Builder) {
|
||||
b.Trace = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithVerbose() Option {
|
||||
return func(b *Builder) {
|
||||
b.Verbose = true
|
||||
}
|
||||
}
|
||||
|
||||
func FromDeploymentValues(vals *codersdk.DeploymentValues) Option {
|
||||
return func(b *Builder) {
|
||||
b.Filter = vals.Logging.Filter.Value()
|
||||
b.Human = vals.Logging.Human.Value()
|
||||
b.JSON = vals.Logging.JSON.Value()
|
||||
b.Stackdriver = vals.Logging.Stackdriver.Value()
|
||||
b.Trace = vals.Trace.Enable.Value()
|
||||
b.Verbose = vals.Verbose.Value()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) Build(inv *clibase.Invocation) (log slog.Logger, closeLog func(), err error) {
|
||||
var (
|
||||
sinks = []slog.Sink{}
|
||||
closers = []func() error{}
|
||||
)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
for _, closer := range closers {
|
||||
_ = closer()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
noopClose := func() {}
|
||||
|
||||
addSinkIfProvided := func(sinkFn func(io.Writer) slog.Sink, loc string) error {
|
||||
switch loc {
|
||||
case "":
|
||||
|
||||
case "/dev/stdout":
|
||||
sinks = append(sinks, sinkFn(inv.Stdout))
|
||||
|
||||
case "/dev/stderr":
|
||||
sinks = append(sinks, sinkFn(inv.Stderr))
|
||||
|
||||
default:
|
||||
fi, err := os.OpenFile(loc, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("open log file %q: %w", loc, err)
|
||||
}
|
||||
closers = append(closers, fi.Close)
|
||||
sinks = append(sinks, sinkFn(fi))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = addSinkIfProvided(sloghuman.Sink, b.Human)
|
||||
if err != nil {
|
||||
return slog.Logger{}, noopClose, xerrors.Errorf("add human sink: %w", err)
|
||||
}
|
||||
err = addSinkIfProvided(slogjson.Sink, b.JSON)
|
||||
if err != nil {
|
||||
return slog.Logger{}, noopClose, xerrors.Errorf("add json sink: %w", err)
|
||||
}
|
||||
err = addSinkIfProvided(slogstackdriver.Sink, b.Stackdriver)
|
||||
if err != nil {
|
||||
return slog.Logger{}, noopClose, xerrors.Errorf("add stackdriver sink: %w", err)
|
||||
}
|
||||
|
||||
if b.Trace {
|
||||
sinks = append(sinks, tracing.SlogSink{})
|
||||
}
|
||||
|
||||
// User should log to null device if they don't want logs.
|
||||
if len(sinks) == 0 {
|
||||
return slog.Logger{}, noopClose, xerrors.New("no loggers provided, use /dev/null to disable logging")
|
||||
}
|
||||
|
||||
filter := &debugFilterSink{next: sinks}
|
||||
|
||||
err = filter.compile(b.Filter)
|
||||
if err != nil {
|
||||
return slog.Logger{}, noopClose, xerrors.Errorf("compile filters: %w", err)
|
||||
}
|
||||
|
||||
level := slog.LevelInfo
|
||||
// Debug logging is always enabled if a filter is present.
|
||||
if b.Verbose || filter.re != nil {
|
||||
level = slog.LevelDebug
|
||||
}
|
||||
|
||||
return inv.Logger.AppendSinks(filter).Leveled(level), func() {
|
||||
for _, closer := range closers {
|
||||
_ = closer()
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ slog.Sink = &debugFilterSink{}
|
||||
|
||||
type debugFilterSink struct {
|
||||
next []slog.Sink
|
||||
re *regexp.Regexp
|
||||
}
|
||||
|
||||
func (f *debugFilterSink) compile(res []string) error {
|
||||
if len(res) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var reb strings.Builder
|
||||
for i, re := range res {
|
||||
_, _ = fmt.Fprintf(&reb, "(%s)", re)
|
||||
if i != len(res)-1 {
|
||||
_, _ = reb.WriteRune('|')
|
||||
}
|
||||
}
|
||||
|
||||
re, err := regexp.Compile(reb.String())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("compile regex: %w", err)
|
||||
}
|
||||
f.re = re
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *debugFilterSink) LogEntry(ctx context.Context, ent slog.SinkEntry) {
|
||||
if ent.Level == slog.LevelDebug {
|
||||
logName := strings.Join(ent.LoggerNames, ".")
|
||||
if f.re != nil && !f.re.MatchString(logName) && !f.re.MatchString(ent.Message) {
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, sink := range f.next {
|
||||
sink.LogEntry(ctx, ent)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *debugFilterSink) Sync() {
|
||||
for _, sink := range f.next {
|
||||
sink.Sync()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,243 @@
|
||||
package clilog_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/cli/clilog"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBuilder(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("NoConfiguration", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "test",
|
||||
Handler: testHandler(t),
|
||||
}
|
||||
err := cmd.Invoke().Run()
|
||||
require.ErrorContains(t, err, "no loggers provided, use /dev/null to disable logging")
|
||||
})
|
||||
|
||||
t.Run("Verbose", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempFile := filepath.Join(t.TempDir(), "test.log")
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "test",
|
||||
Handler: testHandler(t,
|
||||
clilog.WithHuman(tempFile),
|
||||
clilog.WithVerbose(),
|
||||
),
|
||||
}
|
||||
err := cmd.Invoke().Run()
|
||||
require.NoError(t, err)
|
||||
assertLogs(t, tempFile, debugLog, infoLog, warnLog, filterLog)
|
||||
})
|
||||
|
||||
t.Run("WithFilter", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempFile := filepath.Join(t.TempDir(), "test.log")
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "test",
|
||||
Handler: testHandler(t,
|
||||
clilog.WithHuman(tempFile),
|
||||
// clilog.WithVerbose(), // implicit
|
||||
clilog.WithFilter("important debug message"),
|
||||
),
|
||||
}
|
||||
err := cmd.Invoke().Run()
|
||||
require.NoError(t, err)
|
||||
assertLogs(t, tempFile, infoLog, warnLog, filterLog)
|
||||
})
|
||||
|
||||
t.Run("WithHuman", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempFile := filepath.Join(t.TempDir(), "test.log")
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "test",
|
||||
Handler: testHandler(t, clilog.WithHuman(tempFile)),
|
||||
}
|
||||
err := cmd.Invoke().Run()
|
||||
require.NoError(t, err)
|
||||
assertLogs(t, tempFile, infoLog, warnLog)
|
||||
})
|
||||
|
||||
t.Run("WithJSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempFile := filepath.Join(t.TempDir(), "test.log")
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "test",
|
||||
Handler: testHandler(t, clilog.WithJSON(tempFile), clilog.WithVerbose()),
|
||||
}
|
||||
err := cmd.Invoke().Run()
|
||||
require.NoError(t, err)
|
||||
assertLogsJSON(t, tempFile, debug, debugLog, info, infoLog, warn, warnLog, debug, filterLog)
|
||||
})
|
||||
|
||||
t.Run("FromDeploymentValues", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("Defaults", func(t *testing.T) {
|
||||
stdoutPath := filepath.Join(t.TempDir(), "stdout")
|
||||
stderrPath := filepath.Join(t.TempDir(), "stderr")
|
||||
|
||||
stdout, err := os.OpenFile(stdoutPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = stdout.Close() })
|
||||
|
||||
stderr, err := os.OpenFile(stderrPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = stderr.Close() })
|
||||
|
||||
// Use the default deployment values.
|
||||
dv := coderdtest.DeploymentValues(t)
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "test",
|
||||
Handler: testHandler(t, clilog.FromDeploymentValues(dv)),
|
||||
}
|
||||
inv := cmd.Invoke()
|
||||
inv.Stdout = stdout
|
||||
inv.Stderr = stderr
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
assertLogs(t, stdoutPath, "")
|
||||
assertLogs(t, stderrPath, infoLog, warnLog)
|
||||
})
|
||||
|
||||
t.Run("Override", func(t *testing.T) {
|
||||
tempFile := filepath.Join(t.TempDir(), "test.log")
|
||||
tempJSON := filepath.Join(t.TempDir(), "test.json")
|
||||
dv := &codersdk.DeploymentValues{
|
||||
Logging: codersdk.LoggingConfig{
|
||||
Filter: []string{"foo", "baz"},
|
||||
Human: clibase.String(tempFile),
|
||||
JSON: clibase.String(tempJSON),
|
||||
},
|
||||
Verbose: true,
|
||||
Trace: codersdk.TraceConfig{
|
||||
Enable: true,
|
||||
},
|
||||
}
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "test",
|
||||
Handler: testHandler(t, clilog.FromDeploymentValues(dv)),
|
||||
}
|
||||
err := cmd.Invoke().Run()
|
||||
require.NoError(t, err)
|
||||
assertLogs(t, tempFile, infoLog, warnLog)
|
||||
assertLogsJSON(t, tempJSON, info, infoLog, warn, warnLog)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("NotFound", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tempFile := filepath.Join(t.TempDir(), "doesnotexist", "test.log")
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "test",
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
logger, closeLog, err := clilog.New(
|
||||
clilog.WithFilter("foo", "baz"),
|
||||
clilog.WithHuman(tempFile),
|
||||
clilog.WithVerbose(),
|
||||
).Build(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeLog()
|
||||
logger.Error(inv.Context(), "you will never see this")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
err := cmd.Invoke().Run()
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
debug = "DEBUG"
|
||||
info = "INFO"
|
||||
warn = "WARN"
|
||||
debugLog = "this is a debug message"
|
||||
infoLog = "this is an info message"
|
||||
warnLog = "this is a warning message"
|
||||
filterLog = "this is an important debug message you want to see"
|
||||
)
|
||||
|
||||
func testHandler(t testing.TB, opts ...clilog.Option) clibase.HandlerFunc {
|
||||
t.Helper()
|
||||
|
||||
return func(inv *clibase.Invocation) error {
|
||||
logger, closeLog, err := clilog.New(opts...).Build(inv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeLog()
|
||||
logger.Debug(inv.Context(), debugLog)
|
||||
logger.Info(inv.Context(), infoLog)
|
||||
logger.Warn(inv.Context(), warnLog)
|
||||
logger.Debug(inv.Context(), filterLog)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func assertLogs(t testing.TB, path string, expected ...string) {
|
||||
t.Helper()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
require.NoError(t, err)
|
||||
|
||||
logs := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
if !assert.Len(t, logs, len(expected)) {
|
||||
t.Logf(string(data))
|
||||
t.FailNow()
|
||||
}
|
||||
for i, log := range logs {
|
||||
require.Contains(t, log, expected[i])
|
||||
}
|
||||
}
|
||||
|
||||
func assertLogsJSON(t testing.TB, path string, levelExpected ...string) {
|
||||
t.Helper()
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(levelExpected)%2 != 0 {
|
||||
t.Errorf("levelExpected must be a list of level-message pairs")
|
||||
return
|
||||
}
|
||||
|
||||
logs := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
if !assert.Len(t, logs, len(levelExpected)/2) {
|
||||
t.Logf(string(data))
|
||||
t.FailNow()
|
||||
}
|
||||
for i, log := range logs {
|
||||
var entry struct {
|
||||
Level string `json:"level"`
|
||||
Message string `json:"msg"`
|
||||
}
|
||||
err := json.NewDecoder(strings.NewReader(log)).Decode(&entry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, levelExpected[2*i], entry.Level)
|
||||
require.Equal(t, levelExpected[2*i+1], entry.Message)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
// Package clilog provides a fluent API for configuring structured logging.
|
||||
package clilog
|
||||
@@ -44,6 +44,13 @@ const (
|
||||
cgroupV2MemoryStat = "/sys/fs/cgroup/memory.stat"
|
||||
)
|
||||
|
||||
const (
|
||||
// 9223372036854771712 is the highest positive signed 64-bit integer (263-1),
|
||||
// rounded down to multiples of 4096 (2^12), the most common page size on x86 systems.
|
||||
// This is used by docker to indicate no memory limit.
|
||||
UnlimitedMemory int64 = 9223372036854771712
|
||||
)
|
||||
|
||||
// ContainerCPU returns the CPU usage of the container cgroup.
|
||||
// This is calculated as difference of two samples of the
|
||||
// CPU usage of the container cgroup.
|
||||
@@ -271,6 +278,10 @@ func (s *Statter) cGroupV1Memory(p Prefix) (*Result, error) {
|
||||
// Nonetheless, if it is not, assume there is no limit set.
|
||||
maxUsageBytes = -1
|
||||
}
|
||||
// Set to unlimited if we detect the unlimited docker value.
|
||||
if maxUsageBytes == UnlimitedMemory {
|
||||
maxUsageBytes = -1
|
||||
}
|
||||
|
||||
// need a space after total_rss so we don't hit something else
|
||||
usageBytes, err := readInt64(s.fs, cgroupV1MemoryUsageBytes)
|
||||
|
||||
@@ -197,6 +197,18 @@ func TestStatter(t *testing.T) {
|
||||
assert.Nil(t, mem.Total)
|
||||
assert.Equal(t, "B", mem.Unit)
|
||||
})
|
||||
t.Run("ContainerMemory/NoLimit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV1DockerNoMemoryLimit)
|
||||
s, err := New(WithFS(fs), withNoWait)
|
||||
require.NoError(t, err)
|
||||
mem, err := s.ContainerMemory(PrefixDefault)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mem)
|
||||
assert.Equal(t, 268435456.0, mem.Used)
|
||||
assert.Nil(t, mem.Total)
|
||||
assert.Equal(t, "B", mem.Unit)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("CGroupV2", func(t *testing.T) {
|
||||
@@ -384,6 +396,17 @@ proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
|
||||
cgroupV1MemoryUsageBytes: "536870912",
|
||||
cgroupV1MemoryStat: "total_inactive_file 268435456",
|
||||
}
|
||||
fsContainerCgroupV1DockerNoMemoryLimit = map[string]string{
|
||||
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
|
||||
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
|
||||
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
|
||||
cgroupV1CPUAcctUsage: "0",
|
||||
cgroupV1CFSQuotaUs: "-1",
|
||||
cgroupV1CFSPeriodUs: "100000",
|
||||
cgroupV1MemoryMaxUsageBytes: "9223372036854771712",
|
||||
cgroupV1MemoryUsageBytes: "536870912",
|
||||
cgroupV1MemoryStat: "total_inactive_file 268435456",
|
||||
}
|
||||
fsContainerCgroupV1AltPath = map[string]string{
|
||||
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
|
||||
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
|
||||
|
||||
@@ -59,13 +59,18 @@ func NewWithCommand(
|
||||
t testing.TB, cmd *clibase.Cmd, args ...string,
|
||||
) (*clibase.Invocation, config.Root) {
|
||||
configDir := config.Root(t.TempDir())
|
||||
logger := slogtest.Make(t, nil)
|
||||
// I really would like to fail test on error logs, but realistically, turning on by default
|
||||
// in all our CLI tests is going to create a lot of flaky noise.
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).
|
||||
Leveled(slog.LevelDebug).
|
||||
Named("cli")
|
||||
i := &clibase.Invocation{
|
||||
Command: cmd,
|
||||
Args: append([]string{"--global-config", string(configDir)}, args...),
|
||||
Stdin: io.LimitReader(nil, 0),
|
||||
Stdout: (&logWriter{prefix: "stdout", log: logger}),
|
||||
Stderr: (&logWriter{prefix: "stderr", log: logger}),
|
||||
Logger: logger,
|
||||
}
|
||||
t.Logf("invoking command: %s %s", cmd.Name(), strings.Join(i.Args, " "))
|
||||
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
package clitest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type FakeSignalNotifier struct {
|
||||
sync.Mutex
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
signals []os.Signal
|
||||
stopped bool
|
||||
}
|
||||
|
||||
func NewFakeSignalNotifier(t *testing.T) *FakeSignalNotifier {
|
||||
fsn := &FakeSignalNotifier{t: t}
|
||||
return fsn
|
||||
}
|
||||
|
||||
func (f *FakeSignalNotifier) Stop() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.stopped = true
|
||||
if f.cancel == nil {
|
||||
f.t.Error("stopped before started")
|
||||
return
|
||||
}
|
||||
f.cancel()
|
||||
}
|
||||
|
||||
func (f *FakeSignalNotifier) NotifyContext(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.signals = signals
|
||||
f.ctx, f.cancel = context.WithCancel(parent)
|
||||
return f.ctx, f.Stop
|
||||
}
|
||||
|
||||
func (f *FakeSignalNotifier) Notify() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.cancel == nil {
|
||||
f.t.Error("notified before started")
|
||||
return
|
||||
}
|
||||
f.cancel()
|
||||
}
|
||||
|
||||
func (f *FakeSignalNotifier) AssertStopped() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
assert.True(f.t, f.stopped)
|
||||
}
|
||||
+101
-43
@@ -5,12 +5,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@@ -25,9 +27,31 @@ import (
|
||||
func TestAgent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
waitLines := func(t *testing.T, output <-chan string, lines ...string) error {
|
||||
t.Helper()
|
||||
|
||||
var got []string
|
||||
outerLoop:
|
||||
for _, want := range lines {
|
||||
for {
|
||||
select {
|
||||
case line := <-output:
|
||||
got = append(got, line)
|
||||
if strings.Contains(line, want) {
|
||||
continue outerLoop
|
||||
}
|
||||
case <-time.After(testutil.WaitShort):
|
||||
assert.Failf(t, "timed out waiting for line", "want: %q; got: %q", want, got)
|
||||
return xerrors.Errorf("timed out waiting for line: %q; got: %q", want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
iter []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error
|
||||
iter []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error
|
||||
logs chan []codersdk.WorkspaceAgentLog
|
||||
opts cliui.AgentOptions
|
||||
want []string
|
||||
@@ -38,12 +62,15 @@ func TestAgent(t *testing.T) {
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnecting
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, t *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return waitLines(t, output, "⧗ Waiting for the workspace agent to connect")
|
||||
},
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
return nil
|
||||
@@ -62,12 +89,15 @@ func TestAgent(t *testing.T) {
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnecting
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, t *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return waitLines(t, output, "⧗ Waiting for the workspace agent to connect")
|
||||
},
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleStartTimeout
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
@@ -87,18 +117,24 @@ func TestAgent(t *testing.T) {
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: 1 * time.Millisecond,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnecting
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleStarting
|
||||
agent.StartedAt = ptr.Ref(time.Now())
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, t *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return waitLines(t, output, "⧗ Waiting for the workspace agent to connect")
|
||||
},
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentTimeout
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, t *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return waitLines(t, output, "The workspace agent is having trouble connecting, wait for it to connect or restart your workspace.")
|
||||
},
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleReady
|
||||
@@ -120,8 +156,8 @@ func TestAgent(t *testing.T) {
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: 1 * time.Millisecond,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentDisconnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now().Add(-1 * time.Minute))
|
||||
agent.LastConnectedAt = ptr.Ref(time.Now().Add(-1 * time.Minute))
|
||||
@@ -131,7 +167,10 @@ func TestAgent(t *testing.T) {
|
||||
agent.ReadyAt = ptr.Ref(time.Now())
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, t *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return waitLines(t, output, "⧗ The workspace agent lost connection")
|
||||
},
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.DisconnectedAt = nil
|
||||
agent.LastConnectedAt = ptr.Ref(time.Now())
|
||||
@@ -151,8 +190,8 @@ func TestAgent(t *testing.T) {
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleStarting
|
||||
@@ -170,7 +209,7 @@ func TestAgent(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleReady
|
||||
agent.ReadyAt = ptr.Ref(time.Now())
|
||||
logs <- []codersdk.WorkspaceAgentLog{
|
||||
@@ -195,8 +234,8 @@ func TestAgent(t *testing.T) {
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
agent.StartedAt = ptr.Ref(time.Now())
|
||||
@@ -224,8 +263,8 @@ func TestAgent(t *testing.T) {
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentDisconnected
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleOff
|
||||
return nil
|
||||
@@ -239,8 +278,8 @@ func TestAgent(t *testing.T) {
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleStarting
|
||||
@@ -253,7 +292,10 @@ func TestAgent(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, t *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return waitLines(t, output, "Hello world")
|
||||
},
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.ReadyAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleShuttingDown
|
||||
return nil
|
||||
@@ -272,12 +314,15 @@ func TestAgent(t *testing.T) {
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnecting
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, t *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return waitLines(t, output, "⧗ Waiting for the workspace agent to connect")
|
||||
},
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return xerrors.New("bad")
|
||||
},
|
||||
},
|
||||
@@ -292,13 +337,16 @@ func TestAgent(t *testing.T) {
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
iter: []func(context.Context, *testing.T, *codersdk.WorkspaceAgent, <-chan string, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, _ <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentTimeout
|
||||
agent.TroubleshootingURL = "https://troubleshoot"
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
func(_ context.Context, t *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return waitLines(t, output, "The workspace agent is having trouble connecting, wait for it to connect or restart your workspace.")
|
||||
},
|
||||
func(_ context.Context, _ *testing.T, agent *codersdk.WorkspaceAgent, output <-chan string, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return xerrors.New("bad")
|
||||
},
|
||||
},
|
||||
@@ -317,21 +365,27 @@ func TestAgent(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
var buf bytes.Buffer
|
||||
r, w, err := os.Pipe()
|
||||
require.NoError(t, err, "create pipe failed")
|
||||
defer r.Close()
|
||||
defer w.Close()
|
||||
|
||||
agent := codersdk.WorkspaceAgent{
|
||||
ID: uuid.New(),
|
||||
Status: codersdk.WorkspaceAgentConnecting,
|
||||
CreatedAt: time.Now(),
|
||||
LifecycleState: codersdk.WorkspaceAgentLifecycleCreated,
|
||||
}
|
||||
output := make(chan string, 100) // Buffered to avoid blocking, overflow is discarded.
|
||||
logs := make(chan []codersdk.WorkspaceAgentLog, 1)
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
tc.opts.Fetch = func(_ context.Context, _ uuid.UUID) (codersdk.WorkspaceAgent, error) {
|
||||
t.Log("iter", len(tc.iter))
|
||||
var err error
|
||||
if len(tc.iter) > 0 {
|
||||
err = tc.iter[0](ctx, &agent, logs)
|
||||
err = tc.iter[0](ctx, t, &agent, output, logs)
|
||||
tc.iter = tc.iter[1:]
|
||||
}
|
||||
return agent, err
|
||||
@@ -352,27 +406,25 @@ func TestAgent(t *testing.T) {
|
||||
close(fetchLogs)
|
||||
return fetchLogs, closeFunc(func() error { return nil }), nil
|
||||
}
|
||||
err := cliui.Agent(inv.Context(), &buf, uuid.Nil, tc.opts)
|
||||
err := cliui.Agent(inv.Context(), w, uuid.Nil, tc.opts)
|
||||
_ = w.Close()
|
||||
return err
|
||||
},
|
||||
}
|
||||
inv := cmd.Invoke()
|
||||
|
||||
w := clitest.StartWithWaiter(t, inv)
|
||||
if tc.wantErr {
|
||||
w.RequireError()
|
||||
} else {
|
||||
w.RequireSuccess()
|
||||
}
|
||||
waiter := clitest.StartWithWaiter(t, inv)
|
||||
|
||||
s := bufio.NewScanner(&buf)
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
t.Log(line)
|
||||
select {
|
||||
case output <- line:
|
||||
default:
|
||||
t.Logf("output overflow: %s", line)
|
||||
}
|
||||
if len(tc.want) == 0 {
|
||||
for i := 0; i < 5; i++ {
|
||||
t.Log(line)
|
||||
}
|
||||
require.Fail(t, "unexpected line", line)
|
||||
}
|
||||
require.Contains(t, line, tc.want[0])
|
||||
@@ -382,6 +434,12 @@ func TestAgent(t *testing.T) {
|
||||
if len(tc.want) > 0 {
|
||||
require.Fail(t, "missing lines: "+strings.Join(tc.want, ", "))
|
||||
}
|
||||
|
||||
if tc.wantErr {
|
||||
waiter.RequireError()
|
||||
} else {
|
||||
waiter.RequireSuccess()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
package cliui
|
||||
|
||||
import (
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
var defaultQuery = "owner:me"
|
||||
|
||||
// WorkspaceFilter wraps codersdk.WorkspaceFilter
|
||||
// and allows easy integration to a CLI command.
|
||||
// Example usage:
|
||||
//
|
||||
// func (r *RootCmd) MyCmd() *clibase.Cmd {
|
||||
// var (
|
||||
// filter cliui.WorkspaceFilter
|
||||
// ...
|
||||
// )
|
||||
// cmd := &clibase.Cmd{
|
||||
// ...
|
||||
// }
|
||||
// filter.AttachOptions(&cmd.Options)
|
||||
// ...
|
||||
// return cmd
|
||||
// }
|
||||
//
|
||||
// The above will add the following flags to the command:
|
||||
// --all
|
||||
// --search
|
||||
type WorkspaceFilter struct {
|
||||
searchQuery string
|
||||
all bool
|
||||
}
|
||||
|
||||
func (w *WorkspaceFilter) Filter() codersdk.WorkspaceFilter {
|
||||
var f codersdk.WorkspaceFilter
|
||||
if w.all {
|
||||
return f
|
||||
}
|
||||
f.FilterQuery = w.searchQuery
|
||||
if f.FilterQuery == "" {
|
||||
f.FilterQuery = defaultQuery
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (w *WorkspaceFilter) AttachOptions(opts *clibase.OptionSet) {
|
||||
*opts = append(*opts,
|
||||
clibase.Option{
|
||||
Flag: "all",
|
||||
FlagShorthand: "a",
|
||||
Description: "Specifies whether all workspaces will be listed or not.",
|
||||
|
||||
Value: clibase.BoolOf(&w.all),
|
||||
},
|
||||
clibase.Option{
|
||||
Flag: "search",
|
||||
Description: "Search for a workspace with a query.",
|
||||
Default: defaultQuery,
|
||||
Value: clibase.StringOf(&w.searchQuery),
|
||||
},
|
||||
)
|
||||
}
|
||||
+1
-1
@@ -71,7 +71,7 @@ func Prompt(inv *clibase.Invocation, opts PromptOptions) (string, error) {
|
||||
} else {
|
||||
renderedNo = Bold(ConfirmNo)
|
||||
}
|
||||
pretty.Fprintf(inv.Stdout, DefaultStyles.Placeholder, "(%s/%s)", renderedYes, renderedNo)
|
||||
pretty.Fprintf(inv.Stdout, DefaultStyles.Placeholder, "(%s/%s) ", renderedYes, renderedNo)
|
||||
} else if opts.Default != "" {
|
||||
_, _ = fmt.Fprint(inv.Stdout, pretty.Sprint(DefaultStyles.Placeholder, "("+opts.Default+") "))
|
||||
}
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
package cliutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
hostname string
|
||||
hostnameOnce sync.Once
|
||||
)
|
||||
|
||||
// Hostname returns the hostname of the machine, lowercased,
|
||||
// with any trailing domain suffix stripped.
|
||||
// It is cached after the first call.
|
||||
// If the hostname cannot be determined, for any reason,
|
||||
// localhost will be returned instead.
|
||||
func Hostname() string {
|
||||
hostnameOnce.Do(func() { hostname = getHostname() })
|
||||
return hostname
|
||||
}
|
||||
|
||||
func getHostname() string {
|
||||
h, err := os.Hostname()
|
||||
if err != nil {
|
||||
// Something must be very wrong if this fails.
|
||||
// We'll just return localhost and hope for the best.
|
||||
return "localhost"
|
||||
}
|
||||
|
||||
// On some platforms, the hostname can be an FQDN. We only want the hostname.
|
||||
if idx := strings.Index(h, "."); idx != -1 {
|
||||
h = h[:idx]
|
||||
}
|
||||
|
||||
// For the sake of consistency, we also want to lowercase the hostname.
|
||||
// Per RFC 4343, DNS lookups must be case-insensitive.
|
||||
return strings.ToLower(h)
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
package levenshtein
|
||||
|
||||
import (
|
||||
"golang.org/x/exp/constraints"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Matches returns the closest matches to the needle from the haystack.
|
||||
// The maxDistance parameter is the maximum Matches distance to consider.
|
||||
// If no matches are found, an empty slice is returned.
|
||||
func Matches(needle string, maxDistance int, haystack ...string) (matches []string) {
|
||||
for _, hay := range haystack {
|
||||
if d, err := Distance(needle, hay, maxDistance); err == nil && d <= maxDistance {
|
||||
matches = append(matches, hay)
|
||||
}
|
||||
}
|
||||
|
||||
return matches
|
||||
}
|
||||
|
||||
var ErrMaxDist = xerrors.New("levenshtein: maxDist exceeded")
|
||||
|
||||
// Distance returns the edit distance between a and b using the
|
||||
// Wagner-Fischer algorithm.
|
||||
// A and B must be less than 255 characters long.
|
||||
// maxDist is the maximum distance to consider.
|
||||
// A value of -1 for maxDist means no maximum.
|
||||
func Distance(a, b string, maxDist int) (int, error) {
|
||||
if len(a) > 255 {
|
||||
return 0, xerrors.Errorf("levenshtein: a must be less than 255 characters long")
|
||||
}
|
||||
if len(b) > 255 {
|
||||
return 0, xerrors.Errorf("levenshtein: b must be less than 255 characters long")
|
||||
}
|
||||
m := uint8(len(a))
|
||||
n := uint8(len(b))
|
||||
|
||||
// Special cases for empty strings
|
||||
if m == 0 {
|
||||
return int(n), nil
|
||||
}
|
||||
if n == 0 {
|
||||
return int(m), nil
|
||||
}
|
||||
|
||||
// Allocate a matrix of size m+1 * n+1
|
||||
d := make([][]uint8, 0)
|
||||
var i, j uint8
|
||||
for i = 0; i < m+1; i++ {
|
||||
di := make([]uint8, n+1)
|
||||
d = append(d, di)
|
||||
}
|
||||
|
||||
// Source prefixes
|
||||
for i = 1; i < m+1; i++ {
|
||||
d[i][0] = i
|
||||
}
|
||||
|
||||
// Target prefixes
|
||||
for j = 1; j < n; j++ {
|
||||
d[0][j] = j // nolint:gosec // this cannot overflow
|
||||
}
|
||||
|
||||
// Compute the distance
|
||||
for j = 0; j < n; j++ {
|
||||
for i = 0; i < m; i++ {
|
||||
var subCost uint8
|
||||
// Equal
|
||||
if a[i] != b[j] {
|
||||
subCost = 1
|
||||
}
|
||||
// Don't forget: matrix is +1 size
|
||||
d[i+1][j+1] = min(
|
||||
d[i][j+1]+1, // deletion
|
||||
d[i+1][j]+1, // insertion
|
||||
d[i][j]+subCost, // substitution
|
||||
)
|
||||
// check maxDist on the diagonal
|
||||
if maxDist > -1 && i == j && d[i+1][j+1] > uint8(maxDist) {
|
||||
return int(d[i+1][j+1]), ErrMaxDist
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return int(d[m][n]), nil
|
||||
}
|
||||
|
||||
func min[T constraints.Ordered](ts ...T) T {
|
||||
if len(ts) == 0 {
|
||||
panic("min: no arguments")
|
||||
}
|
||||
m := ts[0]
|
||||
for _, t := range ts[1:] {
|
||||
if t < m {
|
||||
m = t
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
@@ -0,0 +1,194 @@
|
||||
package levenshtein_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliutil/levenshtein"
|
||||
)
|
||||
|
||||
func Test_Levenshtein_Matches(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, tt := range []struct {
|
||||
Name string
|
||||
Needle string
|
||||
MaxDistance int
|
||||
Haystack []string
|
||||
Expected []string
|
||||
}{
|
||||
{
|
||||
Name: "empty",
|
||||
Needle: "",
|
||||
MaxDistance: 0,
|
||||
Haystack: []string{},
|
||||
Expected: []string{},
|
||||
},
|
||||
{
|
||||
Name: "empty haystack",
|
||||
Needle: "foo",
|
||||
MaxDistance: 0,
|
||||
Haystack: []string{},
|
||||
Expected: []string{},
|
||||
},
|
||||
{
|
||||
Name: "empty needle",
|
||||
Needle: "",
|
||||
MaxDistance: 0,
|
||||
Haystack: []string{"foo"},
|
||||
Expected: []string{},
|
||||
},
|
||||
{
|
||||
Name: "exact match distance 0",
|
||||
Needle: "foo",
|
||||
MaxDistance: 0,
|
||||
Haystack: []string{"foo", "fob"},
|
||||
Expected: []string{"foo"},
|
||||
},
|
||||
{
|
||||
Name: "exact match distance 1",
|
||||
Needle: "foo",
|
||||
MaxDistance: 1,
|
||||
Haystack: []string{"foo", "bar"},
|
||||
Expected: []string{"foo"},
|
||||
},
|
||||
{
|
||||
Name: "not found",
|
||||
Needle: "foo",
|
||||
MaxDistance: 1,
|
||||
Haystack: []string{"bar"},
|
||||
Expected: []string{},
|
||||
},
|
||||
{
|
||||
Name: "1 deletion",
|
||||
Needle: "foo",
|
||||
MaxDistance: 1,
|
||||
Haystack: []string{"bar", "fo"},
|
||||
Expected: []string{"fo"},
|
||||
},
|
||||
{
|
||||
Name: "one deletion, two matches",
|
||||
Needle: "foo",
|
||||
MaxDistance: 1,
|
||||
Haystack: []string{"bar", "fo", "fou"},
|
||||
Expected: []string{"fo", "fou"},
|
||||
},
|
||||
{
|
||||
Name: "one deletion, one addition",
|
||||
Needle: "foo",
|
||||
MaxDistance: 1,
|
||||
Haystack: []string{"bar", "fo", "fou", "f"},
|
||||
Expected: []string{"fo", "fou"},
|
||||
},
|
||||
{
|
||||
Name: "distance 2",
|
||||
Needle: "foo",
|
||||
MaxDistance: 2,
|
||||
Haystack: []string{"bar", "boo", "boof"},
|
||||
Expected: []string{"boo", "boof"},
|
||||
},
|
||||
{
|
||||
Name: "longer input",
|
||||
Needle: "kuberenetes",
|
||||
MaxDistance: 5,
|
||||
Haystack: []string{"kubernetes", "kubeconfig", "kubectl", "kube"},
|
||||
Expected: []string{"kubernetes"},
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
t.Run(tt.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actual := levenshtein.Matches(tt.Needle, tt.MaxDistance, tt.Haystack...)
|
||||
require.ElementsMatch(t, tt.Expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Levenshtein_Distance(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tt := range []struct {
|
||||
Name string
|
||||
A string
|
||||
B string
|
||||
MaxDist int
|
||||
Expected int
|
||||
Error string
|
||||
}{
|
||||
{
|
||||
Name: "empty",
|
||||
A: "",
|
||||
B: "",
|
||||
MaxDist: -1,
|
||||
Expected: 0,
|
||||
},
|
||||
{
|
||||
Name: "a empty",
|
||||
A: "",
|
||||
B: "foo",
|
||||
MaxDist: -1,
|
||||
Expected: 3,
|
||||
},
|
||||
{
|
||||
Name: "b empty",
|
||||
A: "foo",
|
||||
B: "",
|
||||
MaxDist: -1,
|
||||
Expected: 3,
|
||||
},
|
||||
{
|
||||
Name: "a is b",
|
||||
A: "foo",
|
||||
B: "foo",
|
||||
MaxDist: -1,
|
||||
Expected: 0,
|
||||
},
|
||||
{
|
||||
Name: "one addition",
|
||||
A: "foo",
|
||||
B: "fooo",
|
||||
MaxDist: -1,
|
||||
Expected: 1,
|
||||
},
|
||||
{
|
||||
Name: "one deletion",
|
||||
A: "fooo",
|
||||
B: "foo",
|
||||
MaxDist: -1,
|
||||
Expected: 1,
|
||||
},
|
||||
{
|
||||
Name: "one substitution",
|
||||
A: "foo",
|
||||
B: "fou",
|
||||
MaxDist: -1,
|
||||
Expected: 1,
|
||||
},
|
||||
{
|
||||
Name: "different strings entirely",
|
||||
A: "foo",
|
||||
B: "bar",
|
||||
MaxDist: -1,
|
||||
Expected: 3,
|
||||
},
|
||||
{
|
||||
Name: "different strings, max distance 2",
|
||||
A: "foo",
|
||||
B: "bar",
|
||||
MaxDist: 2,
|
||||
Error: levenshtein.ErrMaxDist.Error(),
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
t.Run(tt.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
actual, err := levenshtein.Distance(tt.A, tt.B, tt.MaxDist)
|
||||
if tt.Error == "" {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.Expected, actual)
|
||||
} else {
|
||||
require.EqualError(t, err, tt.Error)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
package cliutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type discardAfterClose struct {
|
||||
sync.Mutex
|
||||
wc io.WriteCloser
|
||||
closed bool
|
||||
}
|
||||
|
||||
// DiscardAfterClose is an io.WriteCloser that discards writes after it is closed without errors.
|
||||
// It is useful as a target for a slog.Sink such that an underlying WriteCloser, like a file, can
|
||||
// be cleaned up without race conditions from still-active loggers.
|
||||
func DiscardAfterClose(wc io.WriteCloser) io.WriteCloser {
|
||||
return &discardAfterClose{wc: wc}
|
||||
}
|
||||
|
||||
func (d *discardAfterClose) Write(p []byte) (n int, err error) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if d.closed {
|
||||
return len(p), nil
|
||||
}
|
||||
return d.wc.Write(p)
|
||||
}
|
||||
|
||||
func (d *discardAfterClose) Close() error {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if d.closed {
|
||||
return nil
|
||||
}
|
||||
d.closed = true
|
||||
return d.wc.Close()
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
package cliutil_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/cliutil"
|
||||
)
|
||||
|
||||
func TestDiscardAfterClose(t *testing.T) {
|
||||
t.Parallel()
|
||||
exErr := xerrors.New("test")
|
||||
fwc := &fakeWriteCloser{err: exErr}
|
||||
uut := cliutil.DiscardAfterClose(fwc)
|
||||
|
||||
n, err := uut.Write([]byte("one"))
|
||||
require.Equal(t, 3, n)
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err = uut.Write([]byte("two"))
|
||||
require.Equal(t, 3, n)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = uut.Close()
|
||||
require.Equal(t, exErr, err)
|
||||
|
||||
n, err = uut.Write([]byte("three"))
|
||||
require.Equal(t, 5, n)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, fwc.writes, 2)
|
||||
require.EqualValues(t, "one", fwc.writes[0])
|
||||
require.EqualValues(t, "two", fwc.writes[1])
|
||||
}
|
||||
|
||||
type fakeWriteCloser struct {
|
||||
writes [][]byte
|
||||
closed bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeWriteCloser) Write(p []byte) (n int, err error) {
|
||||
q := make([]byte, len(p))
|
||||
copy(q, p)
|
||||
f.writes = append(f.writes, q)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (f *fakeWriteCloser) Close() error {
|
||||
f.closed = true
|
||||
return f.err
|
||||
}
|
||||
+24
-4
@@ -13,6 +13,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cli/safeexec"
|
||||
@@ -46,9 +47,10 @@ const (
|
||||
// sshConfigOptions represents options that can be stored and read
|
||||
// from the coder config in ~/.ssh/coder.
|
||||
type sshConfigOptions struct {
|
||||
waitEnum string
|
||||
userHostPrefix string
|
||||
sshOptions []string
|
||||
waitEnum string
|
||||
userHostPrefix string
|
||||
sshOptions []string
|
||||
disableAutostart bool
|
||||
}
|
||||
|
||||
// addOptions expects options in the form of "option=value" or "option value".
|
||||
@@ -106,7 +108,7 @@ func (o sshConfigOptions) equal(other sshConfigOptions) bool {
|
||||
if !slices.Equal(opt1, opt2) {
|
||||
return false
|
||||
}
|
||||
return o.waitEnum == other.waitEnum && o.userHostPrefix == other.userHostPrefix
|
||||
return o.waitEnum == other.waitEnum && o.userHostPrefix == other.userHostPrefix && o.disableAutostart == other.disableAutostart
|
||||
}
|
||||
|
||||
func (o sshConfigOptions) asList() (list []string) {
|
||||
@@ -116,6 +118,9 @@ func (o sshConfigOptions) asList() (list []string) {
|
||||
if o.userHostPrefix != "" {
|
||||
list = append(list, fmt.Sprintf("ssh-host-prefix: %s", o.userHostPrefix))
|
||||
}
|
||||
if o.disableAutostart {
|
||||
list = append(list, fmt.Sprintf("disable-autostart: %v", o.disableAutostart))
|
||||
}
|
||||
for _, opt := range o.sshOptions {
|
||||
list = append(list, fmt.Sprintf("ssh-option: %s", opt))
|
||||
}
|
||||
@@ -392,6 +397,9 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
if sshConfigOpts.waitEnum != "auto" {
|
||||
flags += " --wait=" + sshConfigOpts.waitEnum
|
||||
}
|
||||
if sshConfigOpts.disableAutostart {
|
||||
flags += " --disable-autostart=true"
|
||||
}
|
||||
defaultOptions = append(defaultOptions, fmt.Sprintf(
|
||||
"ProxyCommand %s --global-config %s ssh --stdio%s %s",
|
||||
escapedCoderBinary, escapedGlobalConfig, flags, workspaceHostname,
|
||||
@@ -566,6 +574,13 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
Default: "auto",
|
||||
Value: clibase.EnumOf(&sshConfigOpts.waitEnum, "yes", "no", "auto"),
|
||||
},
|
||||
{
|
||||
Flag: "disable-autostart",
|
||||
Description: "Disable starting the workspace automatically when connecting via SSH.",
|
||||
Env: "CODER_CONFIGSSH_DISABLE_AUTOSTART",
|
||||
Value: clibase.BoolOf(&sshConfigOpts.disableAutostart),
|
||||
Default: "false",
|
||||
},
|
||||
{
|
||||
Flag: "force-unix-filepaths",
|
||||
Env: "CODER_CONFIGSSH_UNIX_FILEPATHS",
|
||||
@@ -602,6 +617,9 @@ func sshConfigWriteSectionHeader(w io.Writer, addNewline bool, o sshConfigOption
|
||||
if o.userHostPrefix != "" {
|
||||
_, _ = fmt.Fprintf(&ow, "# :%s=%s\n", "ssh-host-prefix", o.userHostPrefix)
|
||||
}
|
||||
if o.disableAutostart {
|
||||
_, _ = fmt.Fprintf(&ow, "# :%s=%v\n", "disable-autostart", o.disableAutostart)
|
||||
}
|
||||
for _, opt := range o.sshOptions {
|
||||
_, _ = fmt.Fprintf(&ow, "# :%s=%s\n", "ssh-option", opt)
|
||||
}
|
||||
@@ -634,6 +652,8 @@ func sshConfigParseLastOptions(r io.Reader) (o sshConfigOptions) {
|
||||
o.userHostPrefix = parts[1]
|
||||
case "ssh-option":
|
||||
o.sshOptions = append(o.sshOptions, parts[1])
|
||||
case "disable-autostart":
|
||||
o.disableAutostart, _ = strconv.ParseBool(parts[1])
|
||||
default:
|
||||
// Unknown option, ignore.
|
||||
}
|
||||
|
||||
+36
-61
@@ -22,8 +22,9 @@ import (
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
"github.com/coder/coder/v2/provisionersdk/proto"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
@@ -64,8 +65,7 @@ func TestConfigSSH(t *testing.T) {
|
||||
const hostname = "test-coder."
|
||||
const expectedKey = "ConnectionAttempts"
|
||||
const removeKey = "ConnectionTimeout"
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{
|
||||
ConfigSSH: codersdk.SSHConfigResponse{
|
||||
HostnamePrefix: hostname,
|
||||
SSHConfigOptions: map[string]string{
|
||||
@@ -76,32 +76,13 @@ func TestConfigSSH(t *testing.T) {
|
||||
},
|
||||
})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
authToken := uuid.NewString()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionPlan: []*proto.Response{{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Resources: []*proto.Resource{{
|
||||
Name: "example",
|
||||
Type: "aws_instance",
|
||||
Agents: []*proto.Agent{{
|
||||
Id: uuid.NewString(),
|
||||
Name: "example",
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(authToken),
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, owner.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
_ = agenttest.New(t, client.URL, authToken)
|
||||
resources := coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
member, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: memberUser.ID,
|
||||
}).WithAgent().Do()
|
||||
_ = agenttest.New(t, client.URL, r.AgentToken)
|
||||
resources := coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
agentConn, err := client.DialWorkspaceAgent(context.Background(), resources[0].Agents[0].ID, nil)
|
||||
require.NoError(t, err)
|
||||
defer agentConn.Close()
|
||||
@@ -172,7 +153,7 @@ func TestConfigSSH(t *testing.T) {
|
||||
|
||||
home := filepath.Dir(filepath.Dir(sshConfigFile))
|
||||
// #nosec
|
||||
sshCmd := exec.Command("ssh", "-F", sshConfigFile, hostname+workspace.Name, "echo", "test")
|
||||
sshCmd := exec.Command("ssh", "-F", sshConfigFile, hostname+r.Workspace.Name, "echo", "test")
|
||||
pty = ptytest.New(t)
|
||||
// Set HOME because coder config is included from ~/.ssh/coder.
|
||||
sshCmd.Env = append(sshCmd.Env, fmt.Sprintf("HOME=%s", home))
|
||||
@@ -213,13 +194,13 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
match, write string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
matches []match
|
||||
writeConfig writeConfig
|
||||
wantConfig wantConfig
|
||||
wantErr bool
|
||||
echoResponse *echo.Responses
|
||||
name string
|
||||
args []string
|
||||
matches []match
|
||||
writeConfig writeConfig
|
||||
wantConfig wantConfig
|
||||
wantErr bool
|
||||
hasAgent bool
|
||||
}{
|
||||
{
|
||||
name: "Config file is created",
|
||||
@@ -576,11 +557,8 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
args: []string{
|
||||
"-y", "--coder-binary-path", "/foo/bar/coder",
|
||||
},
|
||||
wantErr: false,
|
||||
echoResponse: &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(""),
|
||||
},
|
||||
wantErr: false,
|
||||
hasAgent: true,
|
||||
wantConfig: wantConfig{
|
||||
regexMatch: "ProxyCommand /foo/bar/coder",
|
||||
},
|
||||
@@ -591,15 +569,14 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, tt.echoResponse)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID)
|
||||
_ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
)
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
if tt.hasAgent {
|
||||
_ = dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent().Do()
|
||||
}
|
||||
|
||||
// Prepare ssh config files.
|
||||
sshConfigName := sshConfigFileName(t)
|
||||
@@ -613,6 +590,7 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
}
|
||||
args = append(args, tt.args...)
|
||||
inv, root := clitest.New(t, args...)
|
||||
//nolint:gocritic // This has always ran with the admin user.
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
pty := ptytest.New(t)
|
||||
@@ -710,17 +688,14 @@ func TestConfigSSH_Hostnames(t *testing.T) {
|
||||
resources = append(resources, resource)
|
||||
}
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
// authToken := uuid.NewString()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID,
|
||||
echo.WithResources(resources))
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, owner.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
member, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: memberUser.ID,
|
||||
}).Resource(resources...).Do()
|
||||
sshConfigFile := sshConfigFileName(t)
|
||||
|
||||
inv, root := clitest.New(t, "config-ssh", "--ssh-config-file", sshConfigFile)
|
||||
@@ -745,7 +720,7 @@ func TestConfigSSH_Hostnames(t *testing.T) {
|
||||
|
||||
var expectedHosts []string
|
||||
for _, hostnamePattern := range tt.expected {
|
||||
hostname := strings.ReplaceAll(hostnamePattern, "@", workspace.Name)
|
||||
hostname := strings.ReplaceAll(hostnamePattern, "@", r.Workspace.Name)
|
||||
expectedHosts = append(expectedHosts, hostname)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
package cli
|
||||
|
||||
const (
|
||||
timeFormat = "3:04PM MST"
|
||||
dateFormat = "Jan 2, 2006"
|
||||
)
|
||||
+57
-14
@@ -26,8 +26,9 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
stopAfter time.Duration
|
||||
workspaceName string
|
||||
|
||||
parameterFlags workspaceParameterFlags
|
||||
autoUpdates string
|
||||
parameterFlags workspaceParameterFlags
|
||||
autoUpdates string
|
||||
copyParametersFrom string
|
||||
)
|
||||
client := new(codersdk.Client)
|
||||
cmd := &clibase.Cmd{
|
||||
@@ -76,7 +77,24 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
return xerrors.Errorf("A workspace already exists named %q!", workspaceName)
|
||||
}
|
||||
|
||||
var sourceWorkspace codersdk.Workspace
|
||||
if copyParametersFrom != "" {
|
||||
sourceWorkspaceOwner, sourceWorkspaceName, err := splitNamedWorkspace(copyParametersFrom)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sourceWorkspace, err = client.WorkspaceByOwnerAndName(inv.Context(), sourceWorkspaceOwner, sourceWorkspaceName, codersdk.WorkspaceOptions{})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get source workspace: %w", err)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Coder will use the same template %q as the source workspace.\n", sourceWorkspace.TemplateName)
|
||||
templateName = sourceWorkspace.TemplateName
|
||||
}
|
||||
|
||||
var template codersdk.Template
|
||||
var templateVersionID uuid.UUID
|
||||
if templateName == "" {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, pretty.Sprint(cliui.DefaultStyles.Wrap, "Select a template below to preview the provisioned infrastructure:"))
|
||||
|
||||
@@ -118,11 +136,19 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
}
|
||||
|
||||
template = templateByName[option]
|
||||
templateVersionID = template.ActiveVersionID
|
||||
} else if sourceWorkspace.LatestBuild.TemplateVersionID != uuid.Nil {
|
||||
template, err = client.Template(inv.Context(), sourceWorkspace.TemplateID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get template by name: %w", err)
|
||||
}
|
||||
templateVersionID = sourceWorkspace.LatestBuild.TemplateVersionID
|
||||
} else {
|
||||
template, err = client.TemplateByName(inv.Context(), organization.ID, templateName)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get template by name: %w", err)
|
||||
}
|
||||
templateVersionID = template.ActiveVersionID
|
||||
}
|
||||
|
||||
var schedSpec *string
|
||||
@@ -134,18 +160,28 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
schedSpec = ptr.Ref(sched.String())
|
||||
}
|
||||
|
||||
cliRichParameters, err := asWorkspaceBuildParameters(parameterFlags.richParameters)
|
||||
cliBuildParameters, err := asWorkspaceBuildParameters(parameterFlags.richParameters)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("can't parse given parameter values: %w", err)
|
||||
}
|
||||
|
||||
var sourceWorkspaceParameters []codersdk.WorkspaceBuildParameter
|
||||
if copyParametersFrom != "" {
|
||||
sourceWorkspaceParameters, err = client.WorkspaceBuildParameters(inv.Context(), sourceWorkspace.LatestBuild.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get source workspace build parameters: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
richParameters, err := prepWorkspaceBuild(inv, client, prepWorkspaceBuildArgs{
|
||||
Action: WorkspaceCreate,
|
||||
Template: template,
|
||||
NewWorkspaceName: workspaceName,
|
||||
Action: WorkspaceCreate,
|
||||
TemplateVersionID: templateVersionID,
|
||||
NewWorkspaceName: workspaceName,
|
||||
|
||||
RichParameterFile: parameterFlags.richParameterFile,
|
||||
RichParameters: cliRichParameters,
|
||||
RichParameters: cliBuildParameters,
|
||||
|
||||
SourceWorkspaceParameters: sourceWorkspaceParameters,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare build: %w", err)
|
||||
@@ -165,7 +201,7 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
}
|
||||
|
||||
workspace, err := client.CreateWorkspace(inv.Context(), organization.ID, workspaceOwner, codersdk.CreateWorkspaceRequest{
|
||||
TemplateID: template.ID,
|
||||
TemplateVersionID: templateVersionID,
|
||||
Name: workspaceName,
|
||||
AutostartSchedule: schedSpec,
|
||||
TTLMillis: ttlMillis,
|
||||
@@ -217,6 +253,12 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
Default: string(codersdk.AutomaticUpdatesNever),
|
||||
Value: clibase.StringOf(&autoUpdates),
|
||||
},
|
||||
clibase.Option{
|
||||
Flag: "copy-parameters-from",
|
||||
Env: "CODER_WORKSPACE_COPY_PARAMETERS_FROM",
|
||||
Description: "Specify the source workspace name to copy parameters from.",
|
||||
Value: clibase.StringOf(©ParametersFrom),
|
||||
},
|
||||
cliui.SkipPromptOption(),
|
||||
)
|
||||
cmd.Options = append(cmd.Options, parameterFlags.cliParameters()...)
|
||||
@@ -224,12 +266,12 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
}
|
||||
|
||||
type prepWorkspaceBuildArgs struct {
|
||||
Action WorkspaceCLIAction
|
||||
Template codersdk.Template
|
||||
NewWorkspaceName string
|
||||
WorkspaceID uuid.UUID
|
||||
Action WorkspaceCLIAction
|
||||
TemplateVersionID uuid.UUID
|
||||
NewWorkspaceName string
|
||||
|
||||
LastBuildParameters []codersdk.WorkspaceBuildParameter
|
||||
LastBuildParameters []codersdk.WorkspaceBuildParameter
|
||||
SourceWorkspaceParameters []codersdk.WorkspaceBuildParameter
|
||||
|
||||
PromptBuildOptions bool
|
||||
BuildOptions []codersdk.WorkspaceBuildParameter
|
||||
@@ -244,7 +286,7 @@ type prepWorkspaceBuildArgs struct {
|
||||
func prepWorkspaceBuild(inv *clibase.Invocation, client *codersdk.Client, args prepWorkspaceBuildArgs) ([]codersdk.WorkspaceBuildParameter, error) {
|
||||
ctx := inv.Context()
|
||||
|
||||
templateVersion, err := client.TemplateVersion(ctx, args.Template.ActiveVersionID)
|
||||
templateVersion, err := client.TemplateVersion(ctx, args.TemplateVersionID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get template version: %w", err)
|
||||
}
|
||||
@@ -264,6 +306,7 @@ func prepWorkspaceBuild(inv *clibase.Invocation, client *codersdk.Client, args p
|
||||
|
||||
resolver := new(ParameterResolver).
|
||||
WithLastBuildParameters(args.LastBuildParameters).
|
||||
WithSourceWorkspaceParameters(args.SourceWorkspaceParameters).
|
||||
WithPromptBuildOptions(args.PromptBuildOptions).
|
||||
WithBuildOptions(args.BuildOptions).
|
||||
WithPromptRichParameters(args.PromptRichParameters).
|
||||
|
||||
@@ -391,6 +391,149 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
}
|
||||
<-doneChan
|
||||
})
|
||||
|
||||
t.Run("WrongParameterName/DidYouMean", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, echoResponses)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
wrongFirstParameterName := "frst-prameter"
|
||||
inv, root := clitest.New(t, "create", "my-workspace", "--template", template.Name,
|
||||
"--parameter", fmt.Sprintf("%s=%s", wrongFirstParameterName, firstParameterValue),
|
||||
"--parameter", fmt.Sprintf("%s=%s", secondParameterName, secondParameterValue),
|
||||
"--parameter", fmt.Sprintf("%s=%s", immutableParameterName, immutableParameterValue))
|
||||
clitest.SetupConfig(t, member, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
err := inv.Run()
|
||||
assert.ErrorContains(t, err, "parameter \""+wrongFirstParameterName+"\" is not present in the template")
|
||||
assert.ErrorContains(t, err, "Did you mean: "+firstParameterName)
|
||||
})
|
||||
|
||||
t.Run("CopyParameters", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, echoResponses)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
// Firstly, create a regular workspace using template with parameters.
|
||||
inv, root := clitest.New(t, "create", "my-workspace", "--template", template.Name, "-y",
|
||||
"--parameter", fmt.Sprintf("%s=%s", firstParameterName, firstParameterValue),
|
||||
"--parameter", fmt.Sprintf("%s=%s", secondParameterName, secondParameterValue),
|
||||
"--parameter", fmt.Sprintf("%s=%s", immutableParameterName, immutableParameterValue))
|
||||
clitest.SetupConfig(t, member, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
err := inv.Run()
|
||||
require.NoError(t, err, "can't create first workspace")
|
||||
|
||||
// Secondly, create a new workspace using parameters from the previous workspace.
|
||||
const otherWorkspace = "other-workspace"
|
||||
|
||||
inv, root = clitest.New(t, "create", "--copy-parameters-from", "my-workspace", otherWorkspace, "-y")
|
||||
clitest.SetupConfig(t, member, root)
|
||||
pty = ptytest.New(t).Attach(inv)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
err = inv.Run()
|
||||
require.NoError(t, err, "can't create a workspace based on the source workspace")
|
||||
|
||||
// Verify if the new workspace uses expected parameters.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
Name: otherWorkspace,
|
||||
})
|
||||
require.NoError(t, err, "can't list available workspaces")
|
||||
require.Len(t, workspaces.Workspaces, 1)
|
||||
|
||||
otherWorkspaceLatestBuild := workspaces.Workspaces[0].LatestBuild
|
||||
|
||||
buildParameters, err := client.WorkspaceBuildParameters(ctx, otherWorkspaceLatestBuild.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, buildParameters, 3)
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: firstParameterName, Value: firstParameterValue})
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: secondParameterName, Value: secondParameterValue})
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: immutableParameterName, Value: immutableParameterValue})
|
||||
})
|
||||
|
||||
t.Run("CopyParametersFromNotUpdatedWorkspace", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, echoResponses)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
|
||||
// Firstly, create a regular workspace using template with parameters.
|
||||
inv, root := clitest.New(t, "create", "my-workspace", "--template", template.Name, "-y",
|
||||
"--parameter", fmt.Sprintf("%s=%s", firstParameterName, firstParameterValue),
|
||||
"--parameter", fmt.Sprintf("%s=%s", secondParameterName, secondParameterValue),
|
||||
"--parameter", fmt.Sprintf("%s=%s", immutableParameterName, immutableParameterValue))
|
||||
clitest.SetupConfig(t, member, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
err := inv.Run()
|
||||
require.NoError(t, err, "can't create first workspace")
|
||||
|
||||
// Secondly, update the template to the newer version.
|
||||
version2 := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, prepareEchoResponses([]*proto.RichParameter{
|
||||
{Name: "third_parameter", Type: "string", DefaultValue: "not-relevant"},
|
||||
}), func(ctvr *codersdk.CreateTemplateVersionRequest) {
|
||||
ctvr.TemplateID = template.ID
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version2.ID)
|
||||
coderdtest.UpdateActiveTemplateVersion(t, client, template.ID, version2.ID)
|
||||
|
||||
// Thirdly, create a new workspace using parameters from the previous workspace.
|
||||
const otherWorkspace = "other-workspace"
|
||||
|
||||
inv, root = clitest.New(t, "create", "--copy-parameters-from", "my-workspace", otherWorkspace, "-y")
|
||||
clitest.SetupConfig(t, member, root)
|
||||
pty = ptytest.New(t).Attach(inv)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
err = inv.Run()
|
||||
require.NoError(t, err, "can't create a workspace based on the source workspace")
|
||||
|
||||
// Verify if the new workspace uses expected parameters.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
workspaces, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
Name: otherWorkspace,
|
||||
})
|
||||
require.NoError(t, err, "can't list available workspaces")
|
||||
require.Len(t, workspaces.Workspaces, 1)
|
||||
|
||||
otherWorkspaceLatestBuild := workspaces.Workspaces[0].LatestBuild
|
||||
require.Equal(t, version.ID, otherWorkspaceLatestBuild.TemplateVersionID)
|
||||
|
||||
buildParameters, err := client.WorkspaceBuildParameters(ctx, otherWorkspaceLatestBuild.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, buildParameters, 3)
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: firstParameterName, Value: firstParameterValue})
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: secondParameterName, Value: secondParameterValue})
|
||||
require.Contains(t, buildParameters, codersdk.WorkspaceBuildParameter{Name: immutableParameterName, Value: immutableParameterValue})
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreateValidateRichParameters(t *testing.T) {
|
||||
|
||||
+12
-5
@@ -22,6 +22,7 @@ import (
|
||||
func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
var symlinkDir string
|
||||
var gitbranch string
|
||||
var dotfilesRepoDir string
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "dotfiles <git_repo_url>",
|
||||
@@ -35,11 +36,10 @@ func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
var (
|
||||
dotfilesRepoDir = "dotfiles"
|
||||
gitRepo = inv.Args[0]
|
||||
cfg = r.createConfig()
|
||||
cfgDir = string(cfg)
|
||||
dotfilesDir = filepath.Join(cfgDir, dotfilesRepoDir)
|
||||
gitRepo = inv.Args[0]
|
||||
cfg = r.createConfig()
|
||||
cfgDir = string(cfg)
|
||||
dotfilesDir = filepath.Join(cfgDir, dotfilesRepoDir)
|
||||
// This follows the same pattern outlined by others in the market:
|
||||
// https://github.com/coder/coder/pull/1696#issue-1245742312
|
||||
installScriptSet = []string{
|
||||
@@ -290,6 +290,13 @@ func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
"If empty, will default to cloning the default branch or using the existing branch in the cloned repo on disk.",
|
||||
Value: clibase.StringOf(&gitbranch),
|
||||
},
|
||||
{
|
||||
Flag: "repo-dir",
|
||||
Default: "dotfiles",
|
||||
Env: "CODER_DOTFILES_REPO_DIR",
|
||||
Description: "Specifies the directory for the dotfiles repository, relative to global config directory.",
|
||||
Value: clibase.StringOf(&dotfilesRepoDir),
|
||||
},
|
||||
cliui.SkipPromptOption(),
|
||||
}
|
||||
return cmd
|
||||
|
||||
@@ -50,6 +50,68 @@ func TestDotfiles(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "wow")
|
||||
})
|
||||
t.Run("SwitchRepoDir", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, root := clitest.New(t)
|
||||
testRepo := testGitRepo(t, root)
|
||||
|
||||
// nolint:gosec
|
||||
err := os.WriteFile(filepath.Join(testRepo, ".bashrc"), []byte("wow"), 0o750)
|
||||
require.NoError(t, err)
|
||||
|
||||
c := exec.Command("git", "add", ".bashrc")
|
||||
c.Dir = testRepo
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
c = exec.Command("git", "commit", "-m", `"add .bashrc"`)
|
||||
c.Dir = testRepo
|
||||
out, err := c.CombinedOutput()
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
inv, _ := clitest.New(t, "dotfiles", "--global-config", string(root), "--symlink-dir", string(root), "--repo-dir", "testrepo", "-y", testRepo)
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err := os.ReadFile(filepath.Join(string(root), ".bashrc"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "wow")
|
||||
|
||||
stat, staterr := os.Stat(filepath.Join(string(root), "testrepo"))
|
||||
require.NoError(t, staterr)
|
||||
require.True(t, stat.IsDir())
|
||||
})
|
||||
t.Run("SwitchRepoDirRelative", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, root := clitest.New(t)
|
||||
testRepo := testGitRepo(t, root)
|
||||
|
||||
// nolint:gosec
|
||||
err := os.WriteFile(filepath.Join(testRepo, ".bashrc"), []byte("wow"), 0o750)
|
||||
require.NoError(t, err)
|
||||
|
||||
c := exec.Command("git", "add", ".bashrc")
|
||||
c.Dir = testRepo
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
c = exec.Command("git", "commit", "-m", `"add .bashrc"`)
|
||||
c.Dir = testRepo
|
||||
out, err := c.CombinedOutput()
|
||||
require.NoError(t, err, string(out))
|
||||
|
||||
inv, _ := clitest.New(t, "dotfiles", "--global-config", string(root), "--symlink-dir", string(root), "--repo-dir", "./relrepo", "-y", testRepo)
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err := os.ReadFile(filepath.Join(string(root), ".bashrc"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "wow")
|
||||
|
||||
stat, staterr := os.Stat(filepath.Join(string(root), "relrepo"))
|
||||
require.NoError(t, staterr)
|
||||
require.True(t, stat.IsDir())
|
||||
})
|
||||
t.Run("InstallScript", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "windows" {
|
||||
|
||||
+113
-51
@@ -10,6 +10,7 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -173,11 +174,12 @@ func (s *scaletestStrategyFlags) attach(opts *clibase.OptionSet) {
|
||||
|
||||
func (s *scaletestStrategyFlags) toStrategy() harness.ExecutionStrategy {
|
||||
var strategy harness.ExecutionStrategy
|
||||
if s.concurrency == 1 {
|
||||
switch s.concurrency {
|
||||
case 1:
|
||||
strategy = harness.LinearExecutionStrategy{}
|
||||
} else if s.concurrency == 0 {
|
||||
case 0:
|
||||
strategy = harness.ConcurrentExecutionStrategy{}
|
||||
} else {
|
||||
default:
|
||||
strategy = harness.ParallelExecutionStrategy{
|
||||
Limit: int(s.concurrency),
|
||||
}
|
||||
@@ -244,7 +246,9 @@ func (o *scaleTestOutput) write(res harness.Results, stdout io.Writer) error {
|
||||
err := s.Sync()
|
||||
// On Linux, EINVAL is returned when calling fsync on /dev/stdout. We
|
||||
// can safely ignore this error.
|
||||
if err != nil && !xerrors.Is(err, syscall.EINVAL) {
|
||||
// On macOS, ENOTTY is returned when calling sync on /dev/stdout. We
|
||||
// can safely ignore this error.
|
||||
if err != nil && !xerrors.Is(err, syscall.EINVAL) && !xerrors.Is(err, syscall.ENOTTY) {
|
||||
return xerrors.Errorf("flush output file: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -394,6 +398,8 @@ func (r *userCleanupRunner) Run(ctx context.Context, _ string, _ io.Writer) erro
|
||||
}
|
||||
|
||||
func (r *RootCmd) scaletestCleanup() *clibase.Cmd {
|
||||
var template string
|
||||
|
||||
cleanupStrategy := &scaletestStrategyFlags{cleanup: true}
|
||||
client := new(codersdk.Client)
|
||||
|
||||
@@ -407,22 +413,29 @@ func (r *RootCmd) scaletestCleanup() *clibase.Cmd {
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
|
||||
_, err := requireAdmin(ctx, client)
|
||||
me, err := requireAdmin(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client.HTTPClient = &http.Client{
|
||||
Transport: &headerTransport{
|
||||
transport: http.DefaultTransport,
|
||||
header: map[string][]string{
|
||||
Transport: &codersdk.HeaderTransport{
|
||||
Transport: http.DefaultTransport,
|
||||
Header: map[string][]string{
|
||||
codersdk.BypassRatelimitHeader: {"true"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if template != "" {
|
||||
_, err := parseTemplate(ctx, client, me.OrganizationIDs, template)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cliui.Infof(inv.Stdout, "Fetching scaletest workspaces...")
|
||||
workspaces, err := getScaletestWorkspaces(ctx, client)
|
||||
workspaces, err := getScaletestWorkspaces(ctx, client, template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -494,6 +507,15 @@ func (r *RootCmd) scaletestCleanup() *clibase.Cmd {
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = clibase.OptionSet{
|
||||
{
|
||||
Flag: "template",
|
||||
Env: "CODER_SCALETEST_CLEANUP_TEMPLATE",
|
||||
Description: "Name or ID of the template. Only delete workspaces created from the given template.",
|
||||
Value: clibase.StringOf(&template),
|
||||
},
|
||||
}
|
||||
|
||||
cleanupStrategy.attach(&cmd.Options)
|
||||
return cmd
|
||||
}
|
||||
@@ -548,9 +570,9 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
}
|
||||
|
||||
client.HTTPClient = &http.Client{
|
||||
Transport: &headerTransport{
|
||||
transport: http.DefaultTransport,
|
||||
header: map[string][]string{
|
||||
Transport: &codersdk.HeaderTransport{
|
||||
Transport: http.DefaultTransport,
|
||||
Header: map[string][]string{
|
||||
codersdk.BypassRatelimitHeader: {"true"},
|
||||
},
|
||||
},
|
||||
@@ -564,34 +586,12 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
return xerrors.Errorf("could not parse --output flags")
|
||||
}
|
||||
|
||||
var tpl codersdk.Template
|
||||
if template == "" {
|
||||
return xerrors.Errorf("--template is required")
|
||||
}
|
||||
if id, err := uuid.Parse(template); err == nil && id != uuid.Nil {
|
||||
tpl, err = client.Template(ctx, id)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get template by ID %q: %w", template, err)
|
||||
}
|
||||
} else {
|
||||
// List templates in all orgs until we find a match.
|
||||
orgLoop:
|
||||
for _, orgID := range me.OrganizationIDs {
|
||||
tpls, err := client.TemplatesByOrganization(ctx, orgID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("list templates in org %q: %w", orgID, err)
|
||||
}
|
||||
|
||||
for _, t := range tpls {
|
||||
if t.Name == template {
|
||||
tpl = t
|
||||
break orgLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if tpl.ID == uuid.Nil {
|
||||
return xerrors.Errorf("could not find template %q in any organization", template)
|
||||
tpl, err := parseTemplate(ctx, client, me.OrganizationIDs, template)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
|
||||
cliRichParameters, err := asWorkspaceBuildParameters(parameterFlags.richParameters)
|
||||
@@ -600,9 +600,9 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
}
|
||||
|
||||
richParameters, err := prepWorkspaceBuild(inv, client, prepWorkspaceBuildArgs{
|
||||
Action: WorkspaceCreate,
|
||||
Template: tpl,
|
||||
NewWorkspaceName: "scaletest-N", // TODO: the scaletest runner will pass in a different name here. Does this matter?
|
||||
Action: WorkspaceCreate,
|
||||
TemplateVersionID: tpl.ActiveVersionID,
|
||||
NewWorkspaceName: "scaletest-N", // TODO: the scaletest runner will pass in a different name here. Does this matter?
|
||||
|
||||
RichParameterFile: parameterFlags.richParameterFile,
|
||||
RichParameters: cliRichParameters,
|
||||
@@ -859,6 +859,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
|
||||
tickInterval time.Duration
|
||||
bytesPerTick int64
|
||||
ssh bool
|
||||
template string
|
||||
|
||||
client = &codersdk.Client{}
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
@@ -874,26 +875,43 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
|
||||
Middleware: clibase.Chain(
|
||||
r.InitClient(client),
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
Handler: func(inv *clibase.Invocation) (err error) {
|
||||
ctx := inv.Context()
|
||||
|
||||
notifyCtx, stop := signal.NotifyContext(ctx, InterruptSignals...) // Checked later.
|
||||
defer stop()
|
||||
ctx = notifyCtx
|
||||
|
||||
me, err := requireAdmin(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := workspacetraffic.NewMetrics(reg, "username", "workspace_name", "agent_name")
|
||||
|
||||
logger := slog.Make(sloghuman.Sink(io.Discard))
|
||||
logger := inv.Logger
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
|
||||
// Bypass rate limiting
|
||||
client.HTTPClient = &http.Client{
|
||||
Transport: &headerTransport{
|
||||
transport: http.DefaultTransport,
|
||||
header: map[string][]string{
|
||||
Transport: &codersdk.HeaderTransport{
|
||||
Transport: http.DefaultTransport,
|
||||
Header: map[string][]string{
|
||||
codersdk.BypassRatelimitHeader: {"true"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
workspaces, err := getScaletestWorkspaces(inv.Context(), client)
|
||||
if template != "" {
|
||||
_, err := parseTemplate(ctx, client, me.OrganizationIDs, template)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse template: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
workspaces, err := getScaletestWorkspaces(inv.Context(), client, template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -955,6 +973,7 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
|
||||
ReadMetrics: metrics.ReadMetrics(ws.OwnerName, ws.Name, agentName),
|
||||
WriteMetrics: metrics.WriteMetrics(ws.OwnerName, ws.Name, agentName),
|
||||
SSH: ssh,
|
||||
Echo: ssh,
|
||||
}
|
||||
|
||||
if err := config.Validate(); err != nil {
|
||||
@@ -980,6 +999,11 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
|
||||
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
|
||||
}
|
||||
|
||||
// If the command was interrupted, skip stats.
|
||||
if notifyCtx.Err() != nil {
|
||||
return notifyCtx.Err()
|
||||
}
|
||||
|
||||
res := th.Results()
|
||||
for _, o := range outputs {
|
||||
err = o.write(res, inv.Stdout)
|
||||
@@ -997,6 +1021,13 @@ func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
|
||||
}
|
||||
|
||||
cmd.Options = []clibase.Option{
|
||||
{
|
||||
Flag: "template",
|
||||
FlagShorthand: "t",
|
||||
Env: "CODER_SCALETEST_TEMPLATE",
|
||||
Description: "Name or ID of the template. Traffic generation will be limited to workspaces created from this template.",
|
||||
Value: clibase.StringOf(&template),
|
||||
},
|
||||
{
|
||||
Flag: "bytes-per-tick",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_TRAFFIC_BYTES_PER_TICK",
|
||||
@@ -1058,7 +1089,7 @@ func (r *RootCmd) scaletestDashboard() *clibase.Cmd {
|
||||
return xerrors.Errorf("--jitter must be less than --interval")
|
||||
}
|
||||
ctx := inv.Context()
|
||||
logger := slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelInfo)
|
||||
logger := inv.Logger.AppendSinks(sloghuman.Sink(inv.Stdout))
|
||||
if r.verbose {
|
||||
logger = logger.Leveled(slog.LevelDebug)
|
||||
}
|
||||
@@ -1281,7 +1312,7 @@ func isScaleTestWorkspace(workspace codersdk.Workspace) bool {
|
||||
strings.HasPrefix(workspace.Name, "scaletest-")
|
||||
}
|
||||
|
||||
func getScaletestWorkspaces(ctx context.Context, client *codersdk.Client) ([]codersdk.Workspace, error) {
|
||||
func getScaletestWorkspaces(ctx context.Context, client *codersdk.Client, template string) ([]codersdk.Workspace, error) {
|
||||
var (
|
||||
pageNumber = 0
|
||||
limit = 100
|
||||
@@ -1290,9 +1321,10 @@ func getScaletestWorkspaces(ctx context.Context, client *codersdk.Client) ([]cod
|
||||
|
||||
for {
|
||||
page, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
Name: "scaletest-",
|
||||
Offset: pageNumber * limit,
|
||||
Limit: limit,
|
||||
Name: "scaletest-",
|
||||
Template: template,
|
||||
Offset: pageNumber * limit,
|
||||
Limit: limit,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("fetch scaletest workspaces page %d: %w", pageNumber, err)
|
||||
@@ -1349,3 +1381,33 @@ func getScaletestUsers(ctx context.Context, client *codersdk.Client) ([]codersdk
|
||||
|
||||
return users, nil
|
||||
}
|
||||
|
||||
func parseTemplate(ctx context.Context, client *codersdk.Client, organizationIDs []uuid.UUID, template string) (tpl codersdk.Template, err error) {
|
||||
if id, err := uuid.Parse(template); err == nil && id != uuid.Nil {
|
||||
tpl, err = client.Template(ctx, id)
|
||||
if err != nil {
|
||||
return tpl, xerrors.Errorf("get template by ID %q: %w", template, err)
|
||||
}
|
||||
} else {
|
||||
// List templates in all orgs until we find a match.
|
||||
orgLoop:
|
||||
for _, orgID := range organizationIDs {
|
||||
tpls, err := client.TemplatesByOrganization(ctx, orgID)
|
||||
if err != nil {
|
||||
return tpl, xerrors.Errorf("list templates in org %q: %w", orgID, err)
|
||||
}
|
||||
|
||||
for _, t := range tpls {
|
||||
if t.Name == template {
|
||||
tpl = t
|
||||
break orgLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if tpl.ID == uuid.Nil {
|
||||
return tpl, xerrors.Errorf("could not find template %q in any organization", template)
|
||||
}
|
||||
|
||||
return tpl, nil
|
||||
}
|
||||
|
||||
@@ -91,6 +91,56 @@ func TestScaleTestWorkspaceTraffic(t *testing.T) {
|
||||
require.ErrorContains(t, err, "no scaletest workspaces exist")
|
||||
}
|
||||
|
||||
// This test just validates that the CLI command accepts its known arguments.
|
||||
func TestScaleTestWorkspaceTraffic_Template(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancelFunc()
|
||||
|
||||
log := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
Logger: &log,
|
||||
})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
inv, root := clitest.New(t, "exp", "scaletest", "workspace-traffic",
|
||||
"--template", "doesnotexist",
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "could not find template \"doesnotexist\" in any organization")
|
||||
}
|
||||
|
||||
// This test just validates that the CLI command accepts its known arguments.
|
||||
func TestScaleTestCleanup_Template(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancelFunc()
|
||||
|
||||
log := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
Logger: &log,
|
||||
})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
inv, root := clitest.New(t, "exp", "scaletest", "cleanup",
|
||||
"--template", "doesnotexist",
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "could not find template \"doesnotexist\" in any organization")
|
||||
}
|
||||
|
||||
// This test just validates that the CLI command accepts its known arguments.
|
||||
func TestScaleTestDashboard(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
+1
-2
@@ -2,7 +2,6 @@ package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os/signal"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@@ -63,7 +62,7 @@ fi
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
|
||||
ctx, stop := signal.NotifyContext(ctx, InterruptSignals...)
|
||||
ctx, stop := inv.SignalNotifyContext(ctx, InterruptSignals...)
|
||||
defer stop()
|
||||
|
||||
client, err := r.createAgentClient()
|
||||
|
||||
+1
-2
@@ -4,7 +4,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
@@ -26,7 +25,7 @@ func (r *RootCmd) gitAskpass() *clibase.Cmd {
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
|
||||
ctx, stop := signal.NotifyContext(ctx, InterruptSignals...)
|
||||
ctx, stop := inv.SignalNotifyContext(ctx, InterruptSignals...)
|
||||
defer stop()
|
||||
|
||||
user, host, err := gitauth.ParseAskpass(inv.Args[0])
|
||||
|
||||
+1
-2
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
@@ -30,7 +29,7 @@ func (r *RootCmd) gitssh() *clibase.Cmd {
|
||||
|
||||
// Catch interrupt signals to ensure the temporary private
|
||||
// key file is cleaned up on most cases.
|
||||
ctx, stop := signal.NotifyContext(ctx, InterruptSignals...)
|
||||
ctx, stop := inv.SignalNotifyContext(ctx, InterruptSignals...)
|
||||
defer stop()
|
||||
|
||||
// Early check so errors are reported immediately.
|
||||
|
||||
+11
-17
@@ -16,7 +16,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
|
||||
@@ -24,9 +23,10 @@ import (
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
func prepareTestGitSSH(ctx context.Context, t *testing.T) (*agentsdk.Client, string, gossh.PublicKey) {
|
||||
t.Helper()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
@@ -48,25 +48,19 @@ func prepareTestGitSSH(ctx context.Context, t *testing.T) (*agentsdk.Client, str
|
||||
require.NoError(t, err)
|
||||
|
||||
// setup template
|
||||
agentToken := uuid.NewString()
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionPlan: echo.PlanComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(agentToken),
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: user.OrganizationID,
|
||||
OwnerID: user.UserID,
|
||||
}).WithAgent().Do()
|
||||
|
||||
// start workspace agent
|
||||
agentClient := agentsdk.New(client.URL)
|
||||
agentClient.SetSessionToken(agentToken)
|
||||
_ = agenttest.New(t, client.URL, agentToken, func(o *agent.Options) {
|
||||
agentClient.SetSessionToken(r.AgentToken)
|
||||
_ = agenttest.New(t, client.URL, r.AgentToken, func(o *agent.Options) {
|
||||
o.Client = agentClient
|
||||
})
|
||||
_ = coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
return agentClient, agentToken, pubkey
|
||||
_ = coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
return agentClient, r.AgentToken, pubkey
|
||||
}
|
||||
|
||||
func serveSSHForGitSSH(t *testing.T, handler func(ssh.Session), pubkeys ...gossh.PublicKey) *net.TCPAddr {
|
||||
|
||||
+37
-73
@@ -1,19 +1,17 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/coder/pretty"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/coderd/schedule/cron"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/pretty"
|
||||
)
|
||||
|
||||
// workspaceListRow is the type provided to the OutputFormatter. This is a bit
|
||||
@@ -31,57 +29,42 @@ type workspaceListRow struct {
|
||||
LastBuilt string `json:"-" table:"last built"`
|
||||
Outdated bool `json:"-" table:"outdated"`
|
||||
StartsAt string `json:"-" table:"starts at"`
|
||||
StartsNext string `json:"-" table:"starts next"`
|
||||
StopsAfter string `json:"-" table:"stops after"`
|
||||
StopsNext string `json:"-" table:"stops next"`
|
||||
DailyCost string `json:"-" table:"daily cost"`
|
||||
}
|
||||
|
||||
func workspaceListRowFromWorkspace(now time.Time, usersByID map[uuid.UUID]codersdk.User, workspace codersdk.Workspace) workspaceListRow {
|
||||
func workspaceListRowFromWorkspace(now time.Time, workspace codersdk.Workspace) workspaceListRow {
|
||||
status := codersdk.WorkspaceDisplayStatus(workspace.LatestBuild.Job.Status, workspace.LatestBuild.Transition)
|
||||
|
||||
lastBuilt := now.UTC().Sub(workspace.LatestBuild.Job.CreatedAt).Truncate(time.Second)
|
||||
autostartDisplay := "-"
|
||||
if !ptr.NilOrEmpty(workspace.AutostartSchedule) {
|
||||
if sched, err := cron.Weekly(*workspace.AutostartSchedule); err == nil {
|
||||
autostartDisplay = fmt.Sprintf("%s %s (%s)", sched.Time(), sched.DaysOfWeek(), sched.Location())
|
||||
}
|
||||
}
|
||||
|
||||
autostopDisplay := "-"
|
||||
if !ptr.NilOrZero(workspace.TTLMillis) {
|
||||
dur := time.Duration(*workspace.TTLMillis) * time.Millisecond
|
||||
autostopDisplay = durationDisplay(dur)
|
||||
if !workspace.LatestBuild.Deadline.IsZero() && workspace.LatestBuild.Deadline.Time.After(now) && status == "Running" {
|
||||
remaining := time.Until(workspace.LatestBuild.Deadline.Time)
|
||||
autostopDisplay = fmt.Sprintf("%s (%s)", autostopDisplay, relative(remaining))
|
||||
}
|
||||
}
|
||||
schedRow := scheduleListRowFromWorkspace(now, workspace)
|
||||
|
||||
healthy := ""
|
||||
if status == "Starting" || status == "Started" {
|
||||
healthy = strconv.FormatBool(workspace.Health.Healthy)
|
||||
}
|
||||
user := usersByID[workspace.OwnerID]
|
||||
return workspaceListRow{
|
||||
Workspace: workspace,
|
||||
WorkspaceName: user.Username + "/" + workspace.Name,
|
||||
WorkspaceName: workspace.OwnerName + "/" + workspace.Name,
|
||||
Template: workspace.TemplateName,
|
||||
Status: status,
|
||||
Healthy: healthy,
|
||||
LastBuilt: durationDisplay(lastBuilt),
|
||||
Outdated: workspace.Outdated,
|
||||
StartsAt: autostartDisplay,
|
||||
StopsAfter: autostopDisplay,
|
||||
StartsAt: schedRow.StartsAt,
|
||||
StartsNext: schedRow.StartsNext,
|
||||
StopsAfter: schedRow.StopsAfter,
|
||||
StopsNext: schedRow.StopsNext,
|
||||
DailyCost: strconv.Itoa(int(workspace.LatestBuild.DailyCost)),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RootCmd) list() *clibase.Cmd {
|
||||
var (
|
||||
all bool
|
||||
defaultQuery = "owner:me"
|
||||
searchQuery string
|
||||
displayWorkspaces []workspaceListRow
|
||||
formatter = cliui.NewOutputFormatter(
|
||||
filter cliui.WorkspaceFilter
|
||||
formatter = cliui.NewOutputFormatter(
|
||||
cliui.TableFormat(
|
||||
[]workspaceListRow{},
|
||||
[]string{
|
||||
@@ -109,18 +92,12 @@ func (r *RootCmd) list() *clibase.Cmd {
|
||||
r.InitClient(client),
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
filter := codersdk.WorkspaceFilter{
|
||||
FilterQuery: searchQuery,
|
||||
}
|
||||
if all && searchQuery == defaultQuery {
|
||||
filter.FilterQuery = ""
|
||||
}
|
||||
|
||||
res, err := client.Workspaces(inv.Context(), filter)
|
||||
res, err := queryConvertWorkspaces(inv.Context(), client, filter.Filter(), workspaceListRowFromWorkspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res.Workspaces) == 0 {
|
||||
|
||||
if len(res) == 0 {
|
||||
pretty.Fprintf(inv.Stderr, cliui.DefaultStyles.Prompt, "No workspaces found! Create one:\n")
|
||||
_, _ = fmt.Fprintln(inv.Stderr)
|
||||
_, _ = fmt.Fprintln(inv.Stderr, " "+pretty.Sprint(cliui.DefaultStyles.Code, "coder create <name>"))
|
||||
@@ -128,23 +105,7 @@ func (r *RootCmd) list() *clibase.Cmd {
|
||||
return nil
|
||||
}
|
||||
|
||||
userRes, err := client.Users(inv.Context(), codersdk.UsersRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usersByID := map[uuid.UUID]codersdk.User{}
|
||||
for _, user := range userRes.Users {
|
||||
usersByID[user.ID] = user
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
displayWorkspaces = make([]workspaceListRow, len(res.Workspaces))
|
||||
for i, workspace := range res.Workspaces {
|
||||
displayWorkspaces[i] = workspaceListRowFromWorkspace(now, usersByID, workspace)
|
||||
}
|
||||
|
||||
out, err := formatter.Format(inv.Context(), displayWorkspaces)
|
||||
out, err := formatter.Format(inv.Context(), res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -153,22 +114,25 @@ func (r *RootCmd) list() *clibase.Cmd {
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Options = clibase.OptionSet{
|
||||
{
|
||||
Flag: "all",
|
||||
FlagShorthand: "a",
|
||||
Description: "Specifies whether all workspaces will be listed or not.",
|
||||
|
||||
Value: clibase.BoolOf(&all),
|
||||
},
|
||||
{
|
||||
Flag: "search",
|
||||
Description: "Search for a workspace with a query.",
|
||||
Default: defaultQuery,
|
||||
Value: clibase.StringOf(&searchQuery),
|
||||
},
|
||||
}
|
||||
|
||||
filter.AttachOptions(&cmd.Options)
|
||||
formatter.AttachOptions(&cmd.Options)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// queryConvertWorkspaces is a helper function for converting
|
||||
// codersdk.Workspaces to a different type.
|
||||
// It's used by the list command to convert workspaces to
|
||||
// workspaceListRow, and by the schedule command to
|
||||
// convert workspaces to scheduleListRow.
|
||||
func queryConvertWorkspaces[T any](ctx context.Context, client *codersdk.Client, filter codersdk.WorkspaceFilter, convertF func(time.Time, codersdk.Workspace) T) ([]T, error) {
|
||||
var empty []T
|
||||
workspaces, err := client.Workspaces(ctx, filter)
|
||||
if err != nil {
|
||||
return empty, xerrors.Errorf("query workspaces: %w", err)
|
||||
}
|
||||
converted := make([]T, len(workspaces.Workspaces))
|
||||
for i, workspace := range workspaces.Workspaces {
|
||||
converted[i] = convertF(time.Now(), workspace)
|
||||
}
|
||||
return converted, nil
|
||||
}
|
||||
|
||||
+20
-18
@@ -11,6 +11,8 @@ import (
|
||||
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
@@ -20,14 +22,15 @@ func TestList(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("Single", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, owner.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
member, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
// setup template
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: memberUser.ID,
|
||||
}).WithAgent().Do()
|
||||
|
||||
inv, root := clitest.New(t, "ls")
|
||||
clitest.SetupConfig(t, member, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
@@ -40,7 +43,7 @@ func TestList(t *testing.T) {
|
||||
assert.NoError(t, errC)
|
||||
close(done)
|
||||
}()
|
||||
pty.ExpectMatch(workspace.Name)
|
||||
pty.ExpectMatch(r.Workspace.Name)
|
||||
pty.ExpectMatch("Started")
|
||||
cancelFunc()
|
||||
<-done
|
||||
@@ -48,14 +51,13 @@ func TestList(t *testing.T) {
|
||||
|
||||
t.Run("JSON", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
client, db := coderdtest.NewWithDatabase(t, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, owner.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
member, memberUser := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
_ = dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: owner.OrganizationID,
|
||||
OwnerID: memberUser.ID,
|
||||
}).WithAgent().Do()
|
||||
|
||||
inv, root := clitest.New(t, "list", "--output=json")
|
||||
clitest.SetupConfig(t, member, root)
|
||||
@@ -68,8 +70,8 @@ func TestList(t *testing.T) {
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
var templates []codersdk.Workspace
|
||||
require.NoError(t, json.Unmarshal(out.Bytes(), &templates))
|
||||
require.Len(t, templates, 1)
|
||||
var workspaces []codersdk.Workspace
|
||||
require.NoError(t, json.Unmarshal(out.Bytes(), &workspaces))
|
||||
require.Len(t, workspaces, 1)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -147,6 +147,10 @@ func (r *RootCmd) login() *clibase.Cmd {
|
||||
rawURL = inv.Args[0]
|
||||
}
|
||||
|
||||
if rawURL == "" {
|
||||
return xerrors.Errorf("no url argument provided")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(rawURL, "http://") && !strings.HasPrefix(rawURL, "https://") {
|
||||
scheme := "https"
|
||||
if strings.HasPrefix(rawURL, "localhost") {
|
||||
|
||||
@@ -3,6 +3,8 @@ package cli_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
@@ -36,6 +38,39 @@ func TestLogin(t *testing.T) {
|
||||
require.ErrorContains(t, err, errMsg)
|
||||
})
|
||||
|
||||
t.Run("InitialUserNonCoderURLFail", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write([]byte("Not Found"))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
badLoginURL := ts.URL
|
||||
root, _ := clitest.New(t, "login", badLoginURL)
|
||||
err := root.Run()
|
||||
errMsg := fmt.Sprintf("Failed to check server %q for first user, is the URL correct and is coder accessible from your browser?", badLoginURL)
|
||||
require.ErrorContains(t, err, errMsg)
|
||||
})
|
||||
|
||||
t.Run("InitialUserNonCoderURLSuccess", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("X-Coder-Build-Version", "something")
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write([]byte("Not Found"))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
badLoginURL := ts.URL
|
||||
root, _ := clitest.New(t, "login", badLoginURL)
|
||||
err := root.Run()
|
||||
// this means we passed the check for a valid coder server
|
||||
require.ErrorContains(t, err, "the initial user cannot be created in non-interactive mode")
|
||||
})
|
||||
|
||||
t.Run("InitialUserTTY", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, nil)
|
||||
|
||||
@@ -20,6 +20,13 @@ type workspaceParameterFlags struct {
|
||||
|
||||
richParameterFile string
|
||||
richParameters []string
|
||||
|
||||
promptRichParameters bool
|
||||
}
|
||||
|
||||
func (wpf *workspaceParameterFlags) allOptions() []clibase.Option {
|
||||
options := append(wpf.cliBuildOptions(), wpf.cliParameters()...)
|
||||
return append(options, wpf.alwaysPrompt())
|
||||
}
|
||||
|
||||
func (wpf *workspaceParameterFlags) cliBuildOptions() []clibase.Option {
|
||||
@@ -55,6 +62,14 @@ func (wpf *workspaceParameterFlags) cliParameters() []clibase.Option {
|
||||
}
|
||||
}
|
||||
|
||||
func (wpf *workspaceParameterFlags) alwaysPrompt() clibase.Option {
|
||||
return clibase.Option{
|
||||
Flag: "always-prompt",
|
||||
Description: "Always prompt all parameters. Does not pull parameter values from existing workspace.",
|
||||
Value: clibase.BoolOf(&wpf.promptRichParameters),
|
||||
}
|
||||
}
|
||||
|
||||
func asWorkspaceBuildParameters(nameValuePairs []string) ([]codersdk.WorkspaceBuildParameter, error) {
|
||||
var params []codersdk.WorkspaceBuildParameter
|
||||
for _, nameValue := range nameValuePairs {
|
||||
|
||||
@@ -2,14 +2,15 @@ package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/pretty"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/cli/cliutil/levenshtein"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/pretty"
|
||||
)
|
||||
|
||||
type WorkspaceCLIAction int
|
||||
@@ -22,7 +23,8 @@ const (
|
||||
)
|
||||
|
||||
type ParameterResolver struct {
|
||||
lastBuildParameters []codersdk.WorkspaceBuildParameter
|
||||
lastBuildParameters []codersdk.WorkspaceBuildParameter
|
||||
sourceWorkspaceParameters []codersdk.WorkspaceBuildParameter
|
||||
|
||||
richParameters []codersdk.WorkspaceBuildParameter
|
||||
richParametersFile map[string]string
|
||||
@@ -37,6 +39,11 @@ func (pr *ParameterResolver) WithLastBuildParameters(params []codersdk.Workspace
|
||||
return pr
|
||||
}
|
||||
|
||||
func (pr *ParameterResolver) WithSourceWorkspaceParameters(params []codersdk.WorkspaceBuildParameter) *ParameterResolver {
|
||||
pr.sourceWorkspaceParameters = params
|
||||
return pr
|
||||
}
|
||||
|
||||
func (pr *ParameterResolver) WithRichParameters(params []codersdk.WorkspaceBuildParameter) *ParameterResolver {
|
||||
pr.richParameters = params
|
||||
return pr
|
||||
@@ -68,6 +75,7 @@ func (pr *ParameterResolver) Resolve(inv *clibase.Invocation, action WorkspaceCL
|
||||
|
||||
staged = pr.resolveWithParametersMapFile(staged)
|
||||
staged = pr.resolveWithCommandLineOrEnv(staged)
|
||||
staged = pr.resolveWithSourceBuildParameters(staged, templateVersionParameters)
|
||||
staged = pr.resolveWithLastBuildParameters(staged, templateVersionParameters)
|
||||
if err = pr.verifyConstraints(staged, action, templateVersionParameters); err != nil {
|
||||
return nil, err
|
||||
@@ -159,11 +167,35 @@ next:
|
||||
return resolved
|
||||
}
|
||||
|
||||
func (pr *ParameterResolver) resolveWithSourceBuildParameters(resolved []codersdk.WorkspaceBuildParameter, templateVersionParameters []codersdk.TemplateVersionParameter) []codersdk.WorkspaceBuildParameter {
|
||||
next:
|
||||
for _, buildParameter := range pr.sourceWorkspaceParameters {
|
||||
tvp := findTemplateVersionParameter(buildParameter, templateVersionParameters)
|
||||
if tvp == nil {
|
||||
continue // it looks like this parameter is not present anymore
|
||||
}
|
||||
|
||||
if tvp.Ephemeral {
|
||||
continue // ephemeral parameters should not be passed to consecutive builds
|
||||
}
|
||||
|
||||
for i, r := range resolved {
|
||||
if r.Name == buildParameter.Name {
|
||||
resolved[i].Value = buildParameter.Value
|
||||
continue next
|
||||
}
|
||||
}
|
||||
|
||||
resolved = append(resolved, buildParameter)
|
||||
}
|
||||
return resolved
|
||||
}
|
||||
|
||||
func (pr *ParameterResolver) verifyConstraints(resolved []codersdk.WorkspaceBuildParameter, action WorkspaceCLIAction, templateVersionParameters []codersdk.TemplateVersionParameter) error {
|
||||
for _, r := range resolved {
|
||||
tvp := findTemplateVersionParameter(r, templateVersionParameters)
|
||||
if tvp == nil {
|
||||
return xerrors.Errorf("parameter %q is not present in the template", r.Name)
|
||||
return templateVersionParametersNotFound(r.Name, templateVersionParameters)
|
||||
}
|
||||
|
||||
if tvp.Ephemeral && !pr.promptBuildOptions && findWorkspaceBuildParameter(tvp.Name, pr.buildOptions) == nil {
|
||||
@@ -194,7 +226,7 @@ func (pr *ParameterResolver) resolveWithInput(resolved []codersdk.WorkspaceBuild
|
||||
(action == WorkspaceUpdate && promptParameterOption) ||
|
||||
(action == WorkspaceUpdate && tvp.Mutable && tvp.Required) ||
|
||||
(action == WorkspaceUpdate && !tvp.Mutable && firstTimeUse) ||
|
||||
(action == WorkspaceUpdate && tvp.Mutable && !tvp.Ephemeral && pr.promptRichParameters) {
|
||||
(tvp.Mutable && !tvp.Ephemeral && pr.promptRichParameters) {
|
||||
parameterValue, err := cliui.RichParameter(inv, tvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -254,3 +286,19 @@ func isValidTemplateParameterOption(buildParameter codersdk.WorkspaceBuildParame
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func templateVersionParametersNotFound(unknown string, params []codersdk.TemplateVersionParameter) error {
|
||||
var sb strings.Builder
|
||||
_, _ = sb.WriteString(fmt.Sprintf("parameter %q is not present in the template.", unknown))
|
||||
// Going with a fairly generous edit distance
|
||||
maxDist := len(unknown) / 2
|
||||
var paramNames []string
|
||||
for _, p := range params {
|
||||
paramNames = append(paramNames, p.Name)
|
||||
}
|
||||
matches := levenshtein.Matches(unknown, maxDist, paramNames...)
|
||||
if len(matches) > 0 {
|
||||
_, _ = sb.WriteString(fmt.Sprintf("\nDid you mean: %s", strings.Join(matches, ", ")))
|
||||
}
|
||||
return xerrors.Errorf(sb.String())
|
||||
}
|
||||
|
||||
+3
-2
@@ -40,15 +40,16 @@ func (r *RootCmd) ping() *clibase.Cmd {
|
||||
workspaceName := inv.Args[0]
|
||||
_, workspaceAgent, err := getWorkspaceAndAgent(
|
||||
ctx, inv, client,
|
||||
false, // Do not autostart for a ping.
|
||||
codersdk.Me, workspaceName,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var logger slog.Logger
|
||||
logger := inv.Logger
|
||||
if r.verbose {
|
||||
logger = slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelDebug)
|
||||
logger = logger.AppendSinks(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelDebug)
|
||||
}
|
||||
|
||||
if r.disableDirect {
|
||||
|
||||
+1
-1
@@ -19,7 +19,7 @@ func TestPing(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t, nil)
|
||||
client, workspace, agentToken := setupWorkspaceForAgent(t)
|
||||
inv, root := clitest.New(t, "ping", workspace.Name)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t)
|
||||
|
||||
+29
-36
@@ -12,7 +12,6 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/pion/udp"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
@@ -26,8 +25,9 @@ import (
|
||||
|
||||
func (r *RootCmd) portForward() *clibase.Cmd {
|
||||
var (
|
||||
tcpForwards []string // <port>:<port>
|
||||
udpForwards []string // <port>:<port>
|
||||
tcpForwards []string // <port>:<port>
|
||||
udpForwards []string // <port>:<port>
|
||||
disableAutostart bool
|
||||
)
|
||||
client := new(codersdk.Client)
|
||||
cmd := &clibase.Cmd{
|
||||
@@ -76,7 +76,7 @@ func (r *RootCmd) portForward() *clibase.Cmd {
|
||||
return xerrors.New("no port-forwards requested")
|
||||
}
|
||||
|
||||
workspace, workspaceAgent, err := getWorkspaceAndAgent(ctx, inv, client, codersdk.Me, inv.Args[0])
|
||||
workspace, workspaceAgent, err := getWorkspaceAndAgent(ctx, inv, client, !disableAutostart, codersdk.Me, inv.Args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -98,9 +98,9 @@ func (r *RootCmd) portForward() *clibase.Cmd {
|
||||
return xerrors.Errorf("await agent: %w", err)
|
||||
}
|
||||
|
||||
var logger slog.Logger
|
||||
logger := inv.Logger
|
||||
if r.verbose {
|
||||
logger = slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelDebug)
|
||||
logger = logger.AppendSinks(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelDebug)
|
||||
}
|
||||
|
||||
if r.disableDirect {
|
||||
@@ -120,6 +120,7 @@ func (r *RootCmd) portForward() *clibase.Cmd {
|
||||
wg = new(sync.WaitGroup)
|
||||
listeners = make([]net.Listener, len(specs))
|
||||
closeAllListeners = func() {
|
||||
logger.Debug(ctx, "closing all listeners")
|
||||
for _, l := range listeners {
|
||||
if l == nil {
|
||||
continue
|
||||
@@ -131,8 +132,9 @@ func (r *RootCmd) portForward() *clibase.Cmd {
|
||||
defer closeAllListeners()
|
||||
|
||||
for i, spec := range specs {
|
||||
l, err := listenAndPortForward(ctx, inv, conn, wg, spec)
|
||||
l, err := listenAndPortForward(ctx, inv, conn, wg, spec, logger)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "failed to listen", slog.F("spec", spec), slog.Error(err))
|
||||
return err
|
||||
}
|
||||
listeners[i] = l
|
||||
@@ -150,8 +152,10 @@ func (r *RootCmd) portForward() *clibase.Cmd {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Debug(ctx, "command context expired waiting for signal", slog.Error(ctx.Err()))
|
||||
closeErr = ctx.Err()
|
||||
case <-sigs:
|
||||
case sig := <-sigs:
|
||||
logger.Debug(ctx, "received signal", slog.F("signal", sig))
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nReceived signal, closing all listeners and active connections")
|
||||
}
|
||||
|
||||
@@ -160,6 +164,7 @@ func (r *RootCmd) portForward() *clibase.Cmd {
|
||||
}()
|
||||
|
||||
conn.AwaitReachable(ctx)
|
||||
logger.Debug(ctx, "read to accept connections to forward")
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Ready!")
|
||||
wg.Wait()
|
||||
return closeErr
|
||||
@@ -180,44 +185,28 @@ func (r *RootCmd) portForward() *clibase.Cmd {
|
||||
Description: "Forward UDP port(s) from the workspace to the local machine. The UDP connection has TCP-like semantics to support stateful UDP protocols.",
|
||||
Value: clibase.StringArrayOf(&udpForwards),
|
||||
},
|
||||
sshDisableAutostartOption(clibase.BoolOf(&disableAutostart)),
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func listenAndPortForward(ctx context.Context, inv *clibase.Invocation, conn *codersdk.WorkspaceAgentConn, wg *sync.WaitGroup, spec portForwardSpec) (net.Listener, error) {
|
||||
func listenAndPortForward(
|
||||
ctx context.Context,
|
||||
inv *clibase.Invocation,
|
||||
conn *codersdk.WorkspaceAgentConn,
|
||||
wg *sync.WaitGroup,
|
||||
spec portForwardSpec,
|
||||
logger slog.Logger,
|
||||
) (net.Listener, error) {
|
||||
logger = logger.With(slog.F("network", spec.listenNetwork), slog.F("address", spec.listenAddress))
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Forwarding '%v://%v' locally to '%v://%v' in the workspace\n", spec.listenNetwork, spec.listenAddress, spec.dialNetwork, spec.dialAddress)
|
||||
|
||||
var (
|
||||
l net.Listener
|
||||
err error
|
||||
)
|
||||
switch spec.listenNetwork {
|
||||
case "tcp":
|
||||
l, err = net.Listen(spec.listenNetwork, spec.listenAddress)
|
||||
case "udp":
|
||||
var host, port string
|
||||
host, port, err = net.SplitHostPort(spec.listenAddress)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("split %q: %w", spec.listenAddress, err)
|
||||
}
|
||||
|
||||
var portInt int
|
||||
portInt, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse port %v from %q as int: %w", port, spec.listenAddress, err)
|
||||
}
|
||||
|
||||
l, err = udp.Listen(spec.listenNetwork, &net.UDPAddr{
|
||||
IP: net.ParseIP(host),
|
||||
Port: portInt,
|
||||
})
|
||||
default:
|
||||
return nil, xerrors.Errorf("unknown listen network %q", spec.listenNetwork)
|
||||
}
|
||||
l, err := inv.Net.Listen(spec.listenNetwork, spec.listenAddress)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("listen '%v://%v': %w", spec.listenNetwork, spec.listenAddress, err)
|
||||
}
|
||||
logger.Debug(ctx, "listening")
|
||||
|
||||
wg.Add(1)
|
||||
go func(spec portForwardSpec) {
|
||||
@@ -227,12 +216,14 @@ func listenAndPortForward(ctx context.Context, inv *clibase.Invocation, conn *co
|
||||
if err != nil {
|
||||
// Silently ignore net.ErrClosed errors.
|
||||
if xerrors.Is(err, net.ErrClosed) {
|
||||
logger.Debug(ctx, "listener closed")
|
||||
return
|
||||
}
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Error accepting connection from '%v://%v': %v\n", spec.listenNetwork, spec.listenAddress, err)
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Killing listener")
|
||||
return
|
||||
}
|
||||
logger.Debug(ctx, "accepted connection", slog.F("remote_addr", netConn.RemoteAddr()))
|
||||
|
||||
go func(netConn net.Conn) {
|
||||
defer netConn.Close()
|
||||
@@ -242,8 +233,10 @@ func listenAndPortForward(ctx context.Context, inv *clibase.Invocation, conn *co
|
||||
return
|
||||
}
|
||||
defer remoteConn.Close()
|
||||
logger.Debug(ctx, "dialed remote", slog.F("remote_addr", netConn.RemoteAddr()))
|
||||
|
||||
agentssh.Bicopy(ctx, netConn, remoteConn)
|
||||
logger.Debug(ctx, "connection closing", slog.F("remote_addr", netConn.RemoteAddr()))
|
||||
}(netConn)
|
||||
}
|
||||
}(spec)
|
||||
|
||||
+147
-100
@@ -13,13 +13,15 @@ import (
|
||||
"github.com/pion/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/agent"
|
||||
"github.com/coder/coder/v2/agent/agenttest"
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
@@ -44,47 +46,35 @@ func TestPortForward_None(t *testing.T) {
|
||||
pty.ExpectMatch("port-forward <workspace>")
|
||||
}
|
||||
|
||||
//nolint:tparallel,paralleltest // Subtests require setup that must not be done in parallel.
|
||||
func TestPortForward(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
name string
|
||||
network string
|
||||
// The flag to pass to `coder port-forward X` to port-forward this type
|
||||
// of connection. Has two format args (both strings), the first is the
|
||||
// local address and the second is the remote address.
|
||||
flag string
|
||||
// The flag(s) to pass to `coder port-forward X` to port-forward this type
|
||||
// of connection. Has one format arg (string) for the remote address.
|
||||
flag []string
|
||||
// setupRemote creates a "remote" listener to emulate a service in the
|
||||
// workspace.
|
||||
setupRemote func(t *testing.T) net.Listener
|
||||
// setupLocal returns an available port that the
|
||||
// port-forward command will listen on "locally". Returns the address
|
||||
// you pass to net.Dial, and the port/path you pass to `coder
|
||||
// port-forward`.
|
||||
setupLocal func(t *testing.T) (string, string)
|
||||
// the local address(es) to "dial"
|
||||
localAddress []string
|
||||
}{
|
||||
{
|
||||
name: "TCP",
|
||||
network: "tcp",
|
||||
flag: "--tcp=%v:%v",
|
||||
flag: []string{"--tcp=5555:%v", "--tcp=6666:%v"},
|
||||
setupRemote: func(t *testing.T) net.Listener {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err, "create TCP listener")
|
||||
return l
|
||||
},
|
||||
setupLocal: func(t *testing.T) (string, string) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err, "create TCP listener to generate random port")
|
||||
defer l.Close()
|
||||
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
require.NoErrorf(t, err, "split TCP address %q", l.Addr().String())
|
||||
return l.Addr().String(), port
|
||||
},
|
||||
localAddress: []string{"127.0.0.1:5555", "127.0.0.1:6666"},
|
||||
},
|
||||
{
|
||||
name: "UDP",
|
||||
network: "udp",
|
||||
flag: "--udp=%v:%v",
|
||||
flag: []string{"--udp=7777:%v", "--udp=8888:%v"},
|
||||
setupRemote: func(t *testing.T) net.Listener {
|
||||
addr := net.UDPAddr{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
@@ -94,61 +84,37 @@ func TestPortForward(t *testing.T) {
|
||||
require.NoError(t, err, "create UDP listener")
|
||||
return l
|
||||
},
|
||||
setupLocal: func(t *testing.T) (string, string) {
|
||||
addr := net.UDPAddr{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Port: 0,
|
||||
}
|
||||
l, err := udp.Listen("udp", &addr)
|
||||
require.NoError(t, err, "create UDP listener to generate random port")
|
||||
defer l.Close()
|
||||
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
require.NoErrorf(t, err, "split UDP address %q", l.Addr().String())
|
||||
return l.Addr().String(), port
|
||||
},
|
||||
localAddress: []string{"127.0.0.1:7777", "127.0.0.1:8888"},
|
||||
},
|
||||
{
|
||||
name: "TCPWithAddress",
|
||||
network: "tcp",
|
||||
flag: "--tcp=%v:%v",
|
||||
network: "tcp", flag: []string{"--tcp=10.10.10.99:9999:%v", "--tcp=10.10.10.10:1010:%v"},
|
||||
setupRemote: func(t *testing.T) net.Listener {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err, "create TCP listener")
|
||||
return l
|
||||
},
|
||||
setupLocal: func(t *testing.T) (string, string) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err, "create TCP listener to generate random port")
|
||||
defer l.Close()
|
||||
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
require.NoErrorf(t, err, "split TCP address %q", l.Addr().String())
|
||||
return l.Addr().String(), fmt.Sprint("0.0.0.0:", port)
|
||||
},
|
||||
localAddress: []string{"10.10.10.99:9999", "10.10.10.10:1010"},
|
||||
},
|
||||
}
|
||||
|
||||
// Setup agent once to be shared between test-cases (avoid expensive
|
||||
// non-parallel setup).
|
||||
var (
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
member, _ = coderdtest.CreateAnotherUser(t, client, admin.OrganizationID)
|
||||
workspace = runAgent(t, client, member)
|
||||
client, db = coderdtest.NewWithDatabase(t, nil)
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
member, memberUser = coderdtest.CreateAnotherUser(t, client, admin.OrganizationID)
|
||||
workspace = runAgent(t, client, memberUser.ID, db)
|
||||
)
|
||||
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
// Delay parallel tests here because setupLocal reserves
|
||||
// a free open port which is not guaranteed to be free
|
||||
// between the listener closing and port-forward ready.
|
||||
t.Run(c.name+"_OnePort", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
p1 := setupTestListener(t, c.setupRemote(t))
|
||||
|
||||
// Create a flag that forwards from local to listener 1.
|
||||
localAddress, localFlag := c.setupLocal(t)
|
||||
flag := fmt.Sprintf(c.flag, localFlag, p1)
|
||||
flag := fmt.Sprintf(c.flag[0], p1)
|
||||
|
||||
// Launch port-forward in a goroutine so we can start dialing
|
||||
// the "local" listener.
|
||||
@@ -158,23 +124,27 @@ func TestPortForward(t *testing.T) {
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
|
||||
iNet := newInProcNet()
|
||||
inv.Net = iNet
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
errC := make(chan error)
|
||||
go func() {
|
||||
errC <- inv.WithContext(ctx).Run()
|
||||
err := inv.WithContext(ctx).Run()
|
||||
t.Logf("command complete; err=%s", err.Error())
|
||||
errC <- err
|
||||
}()
|
||||
pty.ExpectMatchContext(ctx, "Ready!")
|
||||
|
||||
t.Parallel() // Port is reserved, enable parallel execution.
|
||||
|
||||
// Open two connections simultaneously and test them out of
|
||||
// sync.
|
||||
d := net.Dialer{Timeout: testutil.WaitShort}
|
||||
c1, err := d.DialContext(ctx, c.network, localAddress)
|
||||
dialCtx, dialCtxCancel := context.WithTimeout(ctx, testutil.WaitShort)
|
||||
defer dialCtxCancel()
|
||||
c1, err := iNet.dial(dialCtx, addr{c.network, c.localAddress[0]})
|
||||
require.NoError(t, err, "open connection 1 to 'local' listener")
|
||||
defer c1.Close()
|
||||
c2, err := d.DialContext(ctx, c.network, localAddress)
|
||||
c2, err := iNet.dial(dialCtx, addr{c.network, c.localAddress[0]})
|
||||
require.NoError(t, err, "open connection 2 to 'local' listener")
|
||||
defer c2.Close()
|
||||
testDial(t, c2)
|
||||
@@ -186,16 +156,15 @@ func TestPortForward(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run(c.name+"_TwoPorts", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
p1 = setupTestListener(t, c.setupRemote(t))
|
||||
p2 = setupTestListener(t, c.setupRemote(t))
|
||||
)
|
||||
|
||||
// Create a flags for listener 1 and listener 2.
|
||||
localAddress1, localFlag1 := c.setupLocal(t)
|
||||
localAddress2, localFlag2 := c.setupLocal(t)
|
||||
flag1 := fmt.Sprintf(c.flag, localFlag1, p1)
|
||||
flag2 := fmt.Sprintf(c.flag, localFlag2, p2)
|
||||
flag1 := fmt.Sprintf(c.flag[0], p1)
|
||||
flag2 := fmt.Sprintf(c.flag[1], p2)
|
||||
|
||||
// Launch port-forward in a goroutine so we can start dialing
|
||||
// the "local" listeners.
|
||||
@@ -205,6 +174,9 @@ func TestPortForward(t *testing.T) {
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
|
||||
iNet := newInProcNet()
|
||||
inv.Net = iNet
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
errC := make(chan error)
|
||||
@@ -213,15 +185,14 @@ func TestPortForward(t *testing.T) {
|
||||
}()
|
||||
pty.ExpectMatchContext(ctx, "Ready!")
|
||||
|
||||
t.Parallel() // Port is reserved, enable parallel execution.
|
||||
|
||||
// Open a connection to both listener 1 and 2 simultaneously and
|
||||
// then test them out of order.
|
||||
d := net.Dialer{Timeout: testutil.WaitShort}
|
||||
c1, err := d.DialContext(ctx, c.network, localAddress1)
|
||||
dialCtx, dialCtxCancel := context.WithTimeout(ctx, testutil.WaitShort)
|
||||
defer dialCtxCancel()
|
||||
c1, err := iNet.dial(dialCtx, addr{c.network, c.localAddress[0]})
|
||||
require.NoError(t, err, "open connection 1 to 'local' listener 1")
|
||||
defer c1.Close()
|
||||
c2, err := d.DialContext(ctx, c.network, localAddress2)
|
||||
c2, err := iNet.dial(dialCtx, addr{c.network, c.localAddress[1]})
|
||||
require.NoError(t, err, "open connection 2 to 'local' listener 2")
|
||||
defer c2.Close()
|
||||
testDial(t, c2)
|
||||
@@ -233,8 +204,8 @@ func TestPortForward(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// Test doing TCP and UDP at the same time.
|
||||
t.Run("All", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
dials = []addr{}
|
||||
flags = []string{}
|
||||
@@ -244,12 +215,11 @@ func TestPortForward(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
p := setupTestListener(t, c.setupRemote(t))
|
||||
|
||||
localAddress, localFlag := c.setupLocal(t)
|
||||
dials = append(dials, addr{
|
||||
network: c.network,
|
||||
addr: localAddress,
|
||||
addr: c.localAddress[0],
|
||||
})
|
||||
flags = append(flags, fmt.Sprintf(c.flag, localFlag, p))
|
||||
flags = append(flags, fmt.Sprintf(c.flag[0], p))
|
||||
}
|
||||
|
||||
// Launch port-forward in a goroutine so we can start dialing
|
||||
@@ -258,6 +228,9 @@ func TestPortForward(t *testing.T) {
|
||||
clitest.SetupConfig(t, member, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
inv.Stderr = pty.Output()
|
||||
|
||||
iNet := newInProcNet()
|
||||
inv.Net = iNet
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
errC := make(chan error)
|
||||
@@ -266,15 +239,14 @@ func TestPortForward(t *testing.T) {
|
||||
}()
|
||||
pty.ExpectMatchContext(ctx, "Ready!")
|
||||
|
||||
t.Parallel() // Port is reserved, enable parallel execution.
|
||||
|
||||
// Open connections to all items in the "dial" array.
|
||||
var (
|
||||
d = net.Dialer{Timeout: testutil.WaitShort}
|
||||
conns = make([]net.Conn, len(dials))
|
||||
dialCtx, dialCtxCancel = context.WithTimeout(ctx, testutil.WaitShort)
|
||||
conns = make([]net.Conn, len(dials))
|
||||
)
|
||||
defer dialCtxCancel()
|
||||
for i, a := range dials {
|
||||
c, err := d.DialContext(ctx, a.network, a.addr)
|
||||
c, err := iNet.dial(dialCtx, a)
|
||||
require.NoErrorf(t, err, "open connection %v to 'local' listener %v", i+1, i+1)
|
||||
t.Cleanup(func() {
|
||||
_ = c.Close()
|
||||
@@ -296,35 +268,23 @@ func TestPortForward(t *testing.T) {
|
||||
// runAgent creates a fake workspace and starts an agent locally for that
|
||||
// workspace. The agent will be cleaned up on test completion.
|
||||
// nolint:unused
|
||||
func runAgent(t *testing.T, adminClient, userClient *codersdk.Client) codersdk.Workspace {
|
||||
ctx := context.Background()
|
||||
user, err := userClient.User(ctx, codersdk.Me)
|
||||
func runAgent(t *testing.T, client *codersdk.Client, owner uuid.UUID, db database.Store) database.Workspace {
|
||||
user, err := client.User(context.Background(), codersdk.Me)
|
||||
require.NoError(t, err, "specified user does not exist")
|
||||
require.Greater(t, len(user.OrganizationIDs), 0, "user has no organizations")
|
||||
orgID := user.OrganizationIDs[0]
|
||||
r := dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
OrganizationID: orgID,
|
||||
OwnerID: owner,
|
||||
}).WithAgent().Do()
|
||||
|
||||
// Setup template
|
||||
agentToken := uuid.NewString()
|
||||
version := coderdtest.CreateTemplateVersion(t, adminClient, orgID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionPlan: echo.PlanComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(agentToken),
|
||||
})
|
||||
|
||||
// Create template and workspace
|
||||
template := coderdtest.CreateTemplate(t, adminClient, orgID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, adminClient, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, userClient, orgID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, adminClient, workspace.LatestBuild.ID)
|
||||
|
||||
_ = agenttest.New(t, adminClient.URL, agentToken,
|
||||
_ = agenttest.New(t, client.URL, r.AgentToken,
|
||||
func(o *agent.Options) {
|
||||
o.SSHMaxTimeout = 60 * time.Second
|
||||
},
|
||||
)
|
||||
coderdtest.AwaitWorkspaceAgents(t, adminClient, workspace.ID)
|
||||
|
||||
return workspace
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, r.Workspace.ID)
|
||||
return r.Workspace
|
||||
}
|
||||
|
||||
// setupTestListener starts accepting connections and echoing a single packet.
|
||||
@@ -404,3 +364,90 @@ type addr struct {
|
||||
network string
|
||||
addr string
|
||||
}
|
||||
|
||||
func (a addr) Network() string {
|
||||
return a.network
|
||||
}
|
||||
|
||||
func (a addr) Address() string {
|
||||
return a.addr
|
||||
}
|
||||
|
||||
func (a addr) String() string {
|
||||
return a.network + "|" + a.addr
|
||||
}
|
||||
|
||||
type inProcNet struct {
|
||||
sync.Mutex
|
||||
|
||||
listeners map[addr]*inProcListener
|
||||
}
|
||||
|
||||
type inProcListener struct {
|
||||
c chan net.Conn
|
||||
n *inProcNet
|
||||
a addr
|
||||
o sync.Once
|
||||
}
|
||||
|
||||
func newInProcNet() *inProcNet {
|
||||
return &inProcNet{listeners: make(map[addr]*inProcListener)}
|
||||
}
|
||||
|
||||
func (n *inProcNet) Listen(network, address string) (net.Listener, error) {
|
||||
a := addr{network, address}
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
if _, ok := n.listeners[a]; ok {
|
||||
return nil, xerrors.New("busy")
|
||||
}
|
||||
l := newInProcListener(n, a)
|
||||
n.listeners[a] = l
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (n *inProcNet) dial(ctx context.Context, a addr) (net.Conn, error) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
l, ok := n.listeners[a]
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("nothing listening on %s", a)
|
||||
}
|
||||
x, y := net.Pipe()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case l.c <- x:
|
||||
return y, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newInProcListener(n *inProcNet, a addr) *inProcListener {
|
||||
return &inProcListener{
|
||||
c: make(chan net.Conn),
|
||||
n: n,
|
||||
a: a,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *inProcListener) Accept() (net.Conn, error) {
|
||||
c, ok := <-l.c
|
||||
if !ok {
|
||||
return nil, net.ErrClosed
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (l *inProcListener) Close() error {
|
||||
l.o.Do(func() {
|
||||
l.n.Lock()
|
||||
defer l.n.Unlock()
|
||||
delete(l.n.listeners, l.a)
|
||||
close(l.c)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *inProcListener) Addr() net.Addr {
|
||||
return l.a
|
||||
}
|
||||
|
||||
+1
-1
@@ -15,7 +15,7 @@ import (
|
||||
func TestRename(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true, AllowWorkspaceRenames: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, nil)
|
||||
|
||||
+19
-32
@@ -2,15 +2,15 @@ package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/pretty"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/pretty"
|
||||
)
|
||||
|
||||
func (r *RootCmd) restart() *clibase.Cmd {
|
||||
@@ -25,7 +25,7 @@ func (r *RootCmd) restart() *clibase.Cmd {
|
||||
clibase.RequireNArgs(1),
|
||||
r.InitClient(client),
|
||||
),
|
||||
Options: append(parameterFlags.cliBuildOptions(), cliui.SkipPromptOption()),
|
||||
Options: clibase.OptionSet{cliui.SkipPromptOption()},
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
out := inv.Stdout
|
||||
@@ -35,30 +35,7 @@ func (r *RootCmd) restart() *clibase.Cmd {
|
||||
return err
|
||||
}
|
||||
|
||||
lastBuildParameters, err := client.WorkspaceBuildParameters(inv.Context(), workspace.LatestBuild.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template, err := client.Template(inv.Context(), workspace.TemplateID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buildOptions, err := asWorkspaceBuildParameters(parameterFlags.buildOptions)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("can't parse build options: %w", err)
|
||||
}
|
||||
|
||||
buildParameters, err := prepStartWorkspace(inv, client, prepStartWorkspaceArgs{
|
||||
Action: WorkspaceRestart,
|
||||
Template: template,
|
||||
|
||||
LastBuildParameters: lastBuildParameters,
|
||||
|
||||
PromptBuildOptions: parameterFlags.promptBuildOptions,
|
||||
BuildOptions: buildOptions,
|
||||
})
|
||||
startReq, err := buildWorkspaceStartRequest(inv, client, workspace, parameterFlags, WorkspaceRestart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -77,18 +54,25 @@ func (r *RootCmd) restart() *clibase.Cmd {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cliui.WorkspaceBuild(ctx, out, client, build.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
build, err = client.CreateWorkspaceBuild(ctx, workspace.ID, codersdk.CreateWorkspaceBuildRequest{
|
||||
Transition: codersdk.WorkspaceTransitionStart,
|
||||
RichParameterValues: buildParameters,
|
||||
})
|
||||
if err != nil {
|
||||
build, err = client.CreateWorkspaceBuild(ctx, workspace.ID, startReq)
|
||||
// It's possible for a workspace build to fail due to the template requiring starting
|
||||
// workspaces with the active version.
|
||||
if cerr, ok := codersdk.AsError(err); ok && cerr.StatusCode() == http.StatusForbidden {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, "Failed to restart with the template version from your last build. Policy may require you to restart with the current active template version.")
|
||||
build, err = startWorkspace(inv, client, workspace, parameterFlags, WorkspaceUpdate)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("start workspace with active template version: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cliui.WorkspaceBuild(ctx, out, client, build.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -101,5 +85,8 @@ func (r *RootCmd) restart() *clibase.Cmd {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = append(cmd.Options, parameterFlags.allOptions()...)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -239,4 +239,55 @@ func TestRestartWithParameters(t *testing.T) {
|
||||
Value: immutableParameterValue,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("AlwaysPrompt", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create the workspace
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
member, _ := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, mutableParamsResponse)
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, member, owner.OrganizationID, template.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
cwr.RichParameterValues = []codersdk.WorkspaceBuildParameter{
|
||||
{
|
||||
Name: mutableParameterName,
|
||||
Value: mutableParameterValue,
|
||||
},
|
||||
}
|
||||
})
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
inv, root := clitest.New(t, "restart", workspace.Name, "-y", "--always-prompt")
|
||||
clitest.SetupConfig(t, member, root)
|
||||
doneChan := make(chan struct{})
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// We should be prompted for the parameters again.
|
||||
newValue := "xyz"
|
||||
pty.ExpectMatch(mutableParameterName)
|
||||
pty.WriteLine(newValue)
|
||||
pty.ExpectMatch("workspace has been restarted")
|
||||
<-doneChan
|
||||
|
||||
// Verify that the updated values are persisted.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
workspace, err := client.WorkspaceByOwnerAndName(ctx, workspace.OwnerName, workspace.Name, codersdk.WorkspaceOptions{})
|
||||
require.NoError(t, err)
|
||||
actualParameters, err := client.WorkspaceBuildParameters(ctx, workspace.LatestBuild.ID)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, actualParameters, codersdk.WorkspaceBuildParameter{
|
||||
Name: mutableParameterName,
|
||||
Value: newValue,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
+78
-53
@@ -30,7 +30,6 @@ import (
|
||||
|
||||
"github.com/coder/pretty"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/buildinfo"
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
"github.com/coder/coder/v2/cli/cliui"
|
||||
@@ -97,6 +96,7 @@ func (r *RootCmd) Core() []*clibase.Cmd {
|
||||
r.version(defaultVersionInfo),
|
||||
|
||||
// Workspace Commands
|
||||
r.autoupdate(),
|
||||
r.configSSH(),
|
||||
r.create(),
|
||||
r.deleteWorkspace(),
|
||||
@@ -136,14 +136,22 @@ func (r *RootCmd) RunMain(subcommands []*clibase.Cmd) {
|
||||
}
|
||||
err = cmd.Invoke().WithOS().Run()
|
||||
if err != nil {
|
||||
code := 1
|
||||
var exitErr *exitError
|
||||
if errors.As(err, &exitErr) {
|
||||
code = exitErr.code
|
||||
err = exitErr.err
|
||||
}
|
||||
if errors.Is(err, cliui.Canceled) {
|
||||
//nolint:revive
|
||||
os.Exit(1)
|
||||
os.Exit(code)
|
||||
}
|
||||
f := prettyErrorFormatter{w: os.Stderr, verbose: r.verbose}
|
||||
f.format(err)
|
||||
if err != nil {
|
||||
f.format(err)
|
||||
}
|
||||
//nolint:revive
|
||||
os.Exit(1)
|
||||
os.Exit(code)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -441,21 +449,6 @@ func (r *RootCmd) Command(subcommands []*clibase.Cmd) (*clibase.Cmd, error) {
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
type contextKey int
|
||||
|
||||
const (
|
||||
contextKeyLogger contextKey = iota
|
||||
)
|
||||
|
||||
func ContextWithLogger(ctx context.Context, l slog.Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKeyLogger, l)
|
||||
}
|
||||
|
||||
func LoggerFromContext(ctx context.Context) (slog.Logger, bool) {
|
||||
l, ok := ctx.Value(contextKeyLogger).(slog.Logger)
|
||||
return l, ok
|
||||
}
|
||||
|
||||
// RootCmd contains parameters and helpers useful to all commands.
|
||||
type RootCmd struct {
|
||||
clientURL *url.URL
|
||||
@@ -478,11 +471,11 @@ type RootCmd struct {
|
||||
}
|
||||
|
||||
func addTelemetryHeader(client *codersdk.Client, inv *clibase.Invocation) {
|
||||
transport, ok := client.HTTPClient.Transport.(*headerTransport)
|
||||
transport, ok := client.HTTPClient.Transport.(*codersdk.HeaderTransport)
|
||||
if !ok {
|
||||
transport = &headerTransport{
|
||||
transport: client.HTTPClient.Transport,
|
||||
header: http.Header{},
|
||||
transport = &codersdk.HeaderTransport{
|
||||
Transport: client.HTTPClient.Transport,
|
||||
Header: http.Header{},
|
||||
}
|
||||
client.HTTPClient.Transport = transport
|
||||
}
|
||||
@@ -516,13 +509,17 @@ func addTelemetryHeader(client *codersdk.Client, inv *clibase.Invocation) {
|
||||
return
|
||||
}
|
||||
|
||||
transport.header.Add(codersdk.CLITelemetryHeader, s)
|
||||
transport.Header.Add(codersdk.CLITelemetryHeader, s)
|
||||
}
|
||||
|
||||
// InitClient sets client to a new client.
|
||||
// It reads from global configuration files if flags are not set.
|
||||
func (r *RootCmd) InitClient(client *codersdk.Client) clibase.MiddlewareFunc {
|
||||
return r.initClientInternal(client, false)
|
||||
return clibase.Chain(
|
||||
r.initClientInternal(client, false),
|
||||
// By default, we should print warnings in addition to initializing the client
|
||||
r.PrintWarnings(client),
|
||||
)
|
||||
}
|
||||
|
||||
func (r *RootCmd) InitClientMissingTokenOK(client *codersdk.Client) clibase.MiddlewareFunc {
|
||||
@@ -582,7 +579,20 @@ func (r *RootCmd) initClientInternal(client *codersdk.Client, allowTokenMissing
|
||||
client.SetLogBodies(true)
|
||||
}
|
||||
client.DisableDirectConnections = r.disableDirect
|
||||
return next(inv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RootCmd) PrintWarnings(client *codersdk.Client) clibase.MiddlewareFunc {
|
||||
if client == nil {
|
||||
panic("client is nil")
|
||||
}
|
||||
if r == nil {
|
||||
panic("root is nil")
|
||||
}
|
||||
return func(next clibase.HandlerFunc) clibase.HandlerFunc {
|
||||
return func(inv *clibase.Invocation) error {
|
||||
// We send these requests in parallel to minimize latency.
|
||||
var (
|
||||
versionErr = make(chan error)
|
||||
@@ -598,14 +608,14 @@ func (r *RootCmd) initClientInternal(client *codersdk.Client, allowTokenMissing
|
||||
close(warningErr)
|
||||
}()
|
||||
|
||||
if err = <-versionErr; err != nil {
|
||||
if err := <-versionErr; err != nil {
|
||||
// Just log the error here. We never want to fail a command
|
||||
// due to a pre-run.
|
||||
pretty.Fprintf(inv.Stderr, cliui.DefaultStyles.Warn, "check versions error: %s", err)
|
||||
_, _ = fmt.Fprintln(inv.Stderr)
|
||||
}
|
||||
|
||||
if err = <-warningErr; err != nil {
|
||||
if err := <-warningErr; err != nil {
|
||||
// Same as above
|
||||
pretty.Fprintf(inv.Stderr, cliui.DefaultStyles.Warn, "check entitlement warnings error: %s", err)
|
||||
_, _ = fmt.Fprintln(inv.Stderr)
|
||||
@@ -616,10 +626,10 @@ func (r *RootCmd) initClientInternal(client *codersdk.Client, allowTokenMissing
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RootCmd) setClient(ctx context.Context, client *codersdk.Client, serverURL *url.URL) error {
|
||||
transport := &headerTransport{
|
||||
transport: http.DefaultTransport,
|
||||
header: http.Header{},
|
||||
func (r *RootCmd) HeaderTransport(ctx context.Context, serverURL *url.URL) (*codersdk.HeaderTransport, error) {
|
||||
transport := &codersdk.HeaderTransport{
|
||||
Transport: http.DefaultTransport,
|
||||
Header: http.Header{},
|
||||
}
|
||||
headers := r.header
|
||||
if r.headerCommand != "" {
|
||||
@@ -637,23 +647,32 @@ func (r *RootCmd) setClient(ctx context.Context, client *codersdk.Client, server
|
||||
cmd.Stderr = io.Discard
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to run %v: %w", cmd.Args, err)
|
||||
return nil, xerrors.Errorf("failed to run %v: %w", cmd.Args, err)
|
||||
}
|
||||
scanner := bufio.NewScanner(&outBuf)
|
||||
for scanner.Scan() {
|
||||
headers = append(headers, scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return xerrors.Errorf("scan %v: %w", cmd.Args, err)
|
||||
return nil, xerrors.Errorf("scan %v: %w", cmd.Args, err)
|
||||
}
|
||||
}
|
||||
for _, header := range headers {
|
||||
parts := strings.SplitN(header, "=", 2)
|
||||
if len(parts) < 2 {
|
||||
return xerrors.Errorf("split header %q had less than two parts", header)
|
||||
return nil, xerrors.Errorf("split header %q had less than two parts", header)
|
||||
}
|
||||
transport.header.Add(parts[0], parts[1])
|
||||
transport.Header.Add(parts[0], parts[1])
|
||||
}
|
||||
return transport, nil
|
||||
}
|
||||
|
||||
func (r *RootCmd) setClient(ctx context.Context, client *codersdk.Client, serverURL *url.URL) error {
|
||||
transport, err := r.HeaderTransport(ctx, serverURL)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create header transport: %w", err)
|
||||
}
|
||||
|
||||
client.URL = serverURL
|
||||
client.HTTPClient = &http.Client{
|
||||
Transport: transport,
|
||||
@@ -860,24 +879,6 @@ func (r *RootCmd) Verbosef(inv *clibase.Invocation, fmtStr string, args ...inter
|
||||
}
|
||||
}
|
||||
|
||||
type headerTransport struct {
|
||||
transport http.RoundTripper
|
||||
header http.Header
|
||||
}
|
||||
|
||||
func (h *headerTransport) Header() http.Header {
|
||||
return h.header.Clone()
|
||||
}
|
||||
|
||||
func (h *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
for k, v := range h.header {
|
||||
for _, vv := range v {
|
||||
req.Header.Add(k, vv)
|
||||
}
|
||||
}
|
||||
return h.transport.RoundTrip(req)
|
||||
}
|
||||
|
||||
// DumpHandler provides a custom SIGQUIT and SIGTRAP handler that dumps the
|
||||
// stacktrace of all goroutines to stderr and a well-known file in the home
|
||||
// directory. This is useful for debugging deadlock issues that may occur in
|
||||
@@ -968,6 +969,30 @@ func DumpHandler(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
type exitError struct {
|
||||
code int
|
||||
err error
|
||||
}
|
||||
|
||||
var _ error = (*exitError)(nil)
|
||||
|
||||
func (e *exitError) Error() string {
|
||||
if e.err != nil {
|
||||
return fmt.Sprintf("exit code %d: %v", e.code, e.err)
|
||||
}
|
||||
return fmt.Sprintf("exit code %d", e.code)
|
||||
}
|
||||
|
||||
func (e *exitError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// ExitError returns an error that will cause the CLI to exit with the given
|
||||
// exit code. If err is non-nil, it will be wrapped by the returned error.
|
||||
func ExitError(code int, err error) error {
|
||||
return &exitError{code: code, err: err}
|
||||
}
|
||||
|
||||
// IiConnectionErr is a convenience function for checking if the source of an
|
||||
// error is due to a 'connection refused', 'no such host', etc.
|
||||
func isConnectionError(err error) bool {
|
||||
|
||||
+3
-3
@@ -136,9 +136,9 @@ func TestDERPHeaders(t *testing.T) {
|
||||
})
|
||||
|
||||
var (
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
member, _ = coderdtest.CreateAnotherUser(t, client, admin.OrganizationID)
|
||||
workspace = runAgent(t, client, member)
|
||||
admin = coderdtest.CreateFirstUser(t, client)
|
||||
member, memberUser = coderdtest.CreateAnotherUser(t, client, admin.OrganizationID)
|
||||
workspace = runAgent(t, client, memberUser.ID, newOptions.Database)
|
||||
)
|
||||
|
||||
// Inject custom /derp handler so we can inspect the headers.
|
||||
|
||||
+95
-51
@@ -3,9 +3,9 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/v2/cli/clibase"
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
scheduleShowDescriptionLong = `Shows the following information for the given workspace:
|
||||
scheduleShowDescriptionLong = `Shows the following information for the given workspace(s):
|
||||
* The automatic start schedule
|
||||
* The next scheduled start time
|
||||
* The duration after which it will stop
|
||||
@@ -72,25 +72,67 @@ func (r *RootCmd) schedules() *clibase.Cmd {
|
||||
return scheduleCmd
|
||||
}
|
||||
|
||||
// scheduleShow() is just a wrapper for list() with some different defaults.
|
||||
func (r *RootCmd) scheduleShow() *clibase.Cmd {
|
||||
var (
|
||||
filter cliui.WorkspaceFilter
|
||||
formatter = cliui.NewOutputFormatter(
|
||||
cliui.TableFormat(
|
||||
[]scheduleListRow{},
|
||||
[]string{
|
||||
"workspace",
|
||||
"starts at",
|
||||
"starts next",
|
||||
"stops after",
|
||||
"stops next",
|
||||
},
|
||||
),
|
||||
cliui.JSONFormat(),
|
||||
)
|
||||
)
|
||||
client := new(codersdk.Client)
|
||||
showCmd := &clibase.Cmd{
|
||||
Use: "show <workspace-name>",
|
||||
Short: "Show workspace schedule",
|
||||
Use: "show <workspace | --search <query> | --all>",
|
||||
Short: "Show workspace schedules",
|
||||
Long: scheduleShowDescriptionLong,
|
||||
Middleware: clibase.Chain(
|
||||
clibase.RequireNArgs(1),
|
||||
clibase.RequireRangeArgs(0, 1),
|
||||
r.InitClient(client),
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
workspace, err := namedWorkspace(inv.Context(), client, inv.Args[0])
|
||||
// To preserve existing behavior, if an argument is passed we will
|
||||
// only show the schedule for that workspace.
|
||||
// This will clobber the search query if one is passed.
|
||||
f := filter.Filter()
|
||||
if len(inv.Args) == 1 {
|
||||
// If the argument contains a slash, we assume it's a full owner/name reference
|
||||
if strings.Contains(inv.Args[0], "/") {
|
||||
_, workspaceName, err := splitNamedWorkspace(inv.Args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.FilterQuery = fmt.Sprintf("name:%s", workspaceName)
|
||||
} else {
|
||||
// Otherwise, we assume it's a workspace name owned by the current user
|
||||
f.FilterQuery = fmt.Sprintf("owner:me name:%s", inv.Args[0])
|
||||
}
|
||||
}
|
||||
res, err := queryConvertWorkspaces(inv.Context(), client, f, scheduleListRowFromWorkspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return displaySchedule(workspace, inv.Stdout)
|
||||
out, err := formatter.Format(inv.Context(), res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(inv.Stdout, out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
filter.AttachOptions(&showCmd.Options)
|
||||
formatter.AttachOptions(&showCmd.Options)
|
||||
return showCmd
|
||||
}
|
||||
|
||||
@@ -242,50 +284,52 @@ func (r *RootCmd) scheduleOverride() *clibase.Cmd {
|
||||
return overrideCmd
|
||||
}
|
||||
|
||||
func displaySchedule(workspace codersdk.Workspace, out io.Writer) error {
|
||||
loc, err := tz.TimezoneIANA()
|
||||
func displaySchedule(ws codersdk.Workspace, out io.Writer) error {
|
||||
rows := []workspaceListRow{workspaceListRowFromWorkspace(time.Now(), ws)}
|
||||
rendered, err := cliui.DisplayTable(rows, "workspace", []string{
|
||||
"workspace", "starts at", "starts next", "stops after", "stops next",
|
||||
})
|
||||
if err != nil {
|
||||
loc = time.UTC // best effort
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(out, rendered)
|
||||
return err
|
||||
}
|
||||
|
||||
// scheduleListRow is a row in the schedule list.
|
||||
// this is required for proper JSON output.
|
||||
type scheduleListRow struct {
|
||||
WorkspaceName string `json:"workspace" table:"workspace,default_sort"`
|
||||
StartsAt string `json:"starts_at" table:"starts at"`
|
||||
StartsNext string `json:"starts_next" table:"starts next"`
|
||||
StopsAfter string `json:"stops_after" table:"stops after"`
|
||||
StopsNext string `json:"stops_next" table:"stops next"`
|
||||
}
|
||||
|
||||
func scheduleListRowFromWorkspace(now time.Time, workspace codersdk.Workspace) scheduleListRow {
|
||||
autostartDisplay := ""
|
||||
nextStartDisplay := ""
|
||||
if !ptr.NilOrEmpty(workspace.AutostartSchedule) {
|
||||
if sched, err := cron.Weekly(*workspace.AutostartSchedule); err == nil {
|
||||
autostartDisplay = sched.Humanize()
|
||||
nextStartDisplay = timeDisplay(sched.Next(now))
|
||||
}
|
||||
}
|
||||
|
||||
autostopDisplay := ""
|
||||
nextStopDisplay := ""
|
||||
if !ptr.NilOrZero(workspace.TTLMillis) {
|
||||
dur := time.Duration(*workspace.TTLMillis) * time.Millisecond
|
||||
autostopDisplay = durationDisplay(dur)
|
||||
if !workspace.LatestBuild.Deadline.IsZero() && workspace.LatestBuild.Transition == codersdk.WorkspaceTransitionStart {
|
||||
nextStopDisplay = timeDisplay(workspace.LatestBuild.Deadline.Time)
|
||||
}
|
||||
}
|
||||
return scheduleListRow{
|
||||
WorkspaceName: workspace.OwnerName + "/" + workspace.Name,
|
||||
StartsAt: autostartDisplay,
|
||||
StartsNext: nextStartDisplay,
|
||||
StopsAfter: autostopDisplay,
|
||||
StopsNext: nextStopDisplay,
|
||||
}
|
||||
|
||||
var (
|
||||
schedStart = "manual"
|
||||
schedStop = "manual"
|
||||
schedNextStart = "-"
|
||||
schedNextStop = "-"
|
||||
)
|
||||
if !ptr.NilOrEmpty(workspace.AutostartSchedule) {
|
||||
sched, err := cron.Weekly(ptr.NilToEmpty(workspace.AutostartSchedule))
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
_, _ = fmt.Fprintf(out, "Invalid autostart schedule %q for workspace %s: %s\n", *workspace.AutostartSchedule, workspace.Name, err.Error())
|
||||
return nil
|
||||
}
|
||||
schedNext := sched.Next(time.Now()).In(sched.Location())
|
||||
schedStart = fmt.Sprintf("%s %s (%s)", sched.Time(), sched.DaysOfWeek(), sched.Location())
|
||||
schedNextStart = schedNext.Format(timeFormat + " on " + dateFormat)
|
||||
}
|
||||
|
||||
if !ptr.NilOrZero(workspace.TTLMillis) {
|
||||
d := time.Duration(*workspace.TTLMillis) * time.Millisecond
|
||||
schedStop = durationDisplay(d) + " after start"
|
||||
}
|
||||
|
||||
if !workspace.LatestBuild.Deadline.IsZero() {
|
||||
if workspace.LatestBuild.Transition != "start" {
|
||||
schedNextStop = "-"
|
||||
} else {
|
||||
schedNextStop = workspace.LatestBuild.Deadline.Time.In(loc).Format(timeFormat + " on " + dateFormat)
|
||||
schedNextStop = fmt.Sprintf("%s (in %s)", schedNextStop, durationDisplay(time.Until(workspace.LatestBuild.Deadline.Time)))
|
||||
}
|
||||
}
|
||||
|
||||
tw := cliui.Table()
|
||||
tw.AppendRow(table.Row{"Starts at", schedStart})
|
||||
tw.AppendRow(table.Row{"Starts next", schedNextStart})
|
||||
tw.AppendRow(table.Row{"Stops at", schedStop})
|
||||
tw.AppendRow(table.Row{"Stops next", schedNextStop})
|
||||
|
||||
_, _ = fmt.Fprintln(out, tw.Render())
|
||||
return nil
|
||||
}
|
||||
|
||||
+326
-348
@@ -3,8 +3,9 @@ package cli_test
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -14,372 +15,349 @@ import (
|
||||
"github.com/coder/coder/v2/cli/clitest"
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/coderd/database/dbfake"
|
||||
"github.com/coder/coder/v2/coderd/schedule/cron"
|
||||
"github.com/coder/coder/v2/coderd/util/tz"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/pty/ptytest"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
|
||||
// setupTestSchedule creates 4 workspaces:
|
||||
// 1. a-owner-ws1: owned by owner, has both autostart and autostop enabled.
|
||||
// 2. b-owner-ws2: owned by owner, has only autostart enabled.
|
||||
// 3. c-member-ws3: owned by member, has only autostop enabled.
|
||||
// 4. d-member-ws4: owned by member, has neither autostart nor autostop enabled.
|
||||
// It returns the owner and member clients, the database, and the workspaces.
|
||||
// The workspaces are returned in the same order as they are created.
|
||||
func setupTestSchedule(t *testing.T, sched *cron.Schedule) (ownerClient, memberClient *codersdk.Client, db database.Store, ws []codersdk.Workspace) {
|
||||
t.Helper()
|
||||
|
||||
ownerClient, db = coderdtest.NewWithDatabase(t, nil)
|
||||
owner := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
memberClient, memberUser := coderdtest.CreateAnotherUserMutators(t, ownerClient, owner.OrganizationID, nil, func(r *codersdk.CreateUserRequest) {
|
||||
r.Username = "testuser2" // ensure deterministic ordering
|
||||
})
|
||||
_ = dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
Name: "a-owner",
|
||||
OwnerID: owner.UserID,
|
||||
OrganizationID: owner.OrganizationID,
|
||||
AutostartSchedule: sql.NullString{String: sched.String(), Valid: true},
|
||||
Ttl: sql.NullInt64{Int64: 8 * time.Hour.Nanoseconds(), Valid: true},
|
||||
}).WithAgent().Do()
|
||||
|
||||
_ = dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
Name: "b-owner",
|
||||
OwnerID: owner.UserID,
|
||||
OrganizationID: owner.OrganizationID,
|
||||
AutostartSchedule: sql.NullString{String: sched.String(), Valid: true},
|
||||
}).WithAgent().Do()
|
||||
_ = dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
Name: "c-member",
|
||||
OwnerID: memberUser.ID,
|
||||
OrganizationID: owner.OrganizationID,
|
||||
Ttl: sql.NullInt64{Int64: 8 * time.Hour.Nanoseconds(), Valid: true},
|
||||
}).WithAgent().Do()
|
||||
_ = dbfake.WorkspaceBuild(t, db, database.Workspace{
|
||||
Name: "d-member",
|
||||
OwnerID: memberUser.ID,
|
||||
OrganizationID: owner.OrganizationID,
|
||||
}).WithAgent().Do()
|
||||
|
||||
// Need this for LatestBuild.Deadline
|
||||
resp, err := ownerClient.Workspaces(context.Background(), codersdk.WorkspaceFilter{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resp.Workspaces, 4)
|
||||
// Ensure same order as in CLI output
|
||||
ws = resp.Workspaces
|
||||
sort.Slice(ws, func(i, j int) bool {
|
||||
a := ws[i].OwnerName + "/" + ws[i].Name
|
||||
b := ws[j].OwnerName + "/" + ws[j].Name
|
||||
return a < b
|
||||
})
|
||||
|
||||
return ownerClient, memberClient, db, ws
|
||||
}
|
||||
|
||||
//nolint:paralleltest // t.Setenv
|
||||
func TestScheduleShow(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("Enabled", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Given
|
||||
// Set timezone to Asia/Kolkata to surface any timezone-related bugs.
|
||||
t.Setenv("TZ", "Asia/Kolkata")
|
||||
loc, err := tz.TimezoneIANA()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "Asia/Kolkata", loc.String())
|
||||
sched, err := cron.Weekly("CRON_TZ=Europe/Dublin 30 7 * * Mon-Fri")
|
||||
require.NoError(t, err, "invalid schedule")
|
||||
ownerClient, memberClient, _, ws := setupTestSchedule(t, sched)
|
||||
now := time.Now()
|
||||
|
||||
var (
|
||||
tz = "Europe/Dublin"
|
||||
sched = "30 7 * * 1-5"
|
||||
schedCron = fmt.Sprintf("CRON_TZ=%s %s", tz, sched)
|
||||
ttl = 8 * time.Hour
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
cwr.AutostartSchedule = ptr.Ref(schedCron)
|
||||
cwr.TTLMillis = ptr.Ref(ttl.Milliseconds())
|
||||
})
|
||||
_ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
cmdArgs = []string{"schedule", "show", workspace.Name}
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
)
|
||||
t.Run("OwnerNoArgs", func(t *testing.T) {
|
||||
// When: owner specifies no args
|
||||
inv, root := clitest.New(t, "schedule", "show")
|
||||
//nolint:gocritic // Testing that owner user sees all
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
inv, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
err := inv.Run()
|
||||
require.NoError(t, err, "unexpected error")
|
||||
lines := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if assert.Len(t, lines, 4) {
|
||||
assert.Contains(t, lines[0], "Starts at 7:30AM Mon-Fri (Europe/Dublin)")
|
||||
assert.Contains(t, lines[1], "Starts next 7:30AM")
|
||||
// it should have either IST or GMT
|
||||
if !strings.Contains(lines[1], "IST") && !strings.Contains(lines[1], "GMT") {
|
||||
t.Error("expected either IST or GMT")
|
||||
}
|
||||
assert.Contains(t, lines[2], "Stops at 8h after start")
|
||||
assert.NotContains(t, lines[3], "Stops next -")
|
||||
}
|
||||
// Then: they should see their own workspaces.
|
||||
// 1st workspace: a-owner-ws1 has both autostart and autostop enabled.
|
||||
pty.ExpectMatch(ws[0].OwnerName + "/" + ws[0].Name)
|
||||
pty.ExpectMatch(sched.Humanize())
|
||||
pty.ExpectMatch(sched.Next(now).In(loc).Format(time.RFC3339))
|
||||
pty.ExpectMatch("8h")
|
||||
pty.ExpectMatch(ws[0].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339))
|
||||
// 2nd workspace: b-owner-ws2 has only autostart enabled.
|
||||
pty.ExpectMatch(ws[1].OwnerName + "/" + ws[1].Name)
|
||||
pty.ExpectMatch(sched.Humanize())
|
||||
pty.ExpectMatch(sched.Next(now).In(loc).Format(time.RFC3339))
|
||||
})
|
||||
|
||||
t.Run("Manual", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("OwnerAll", func(t *testing.T) {
|
||||
// When: owner lists all workspaces
|
||||
inv, root := clitest.New(t, "schedule", "show", "--all")
|
||||
//nolint:gocritic // Testing that owner user sees all
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
var (
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
cwr.AutostartSchedule = nil
|
||||
cwr.TTLMillis = nil
|
||||
})
|
||||
_ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
cmdArgs = []string{"schedule", "show", workspace.Name}
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
)
|
||||
|
||||
inv, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
err := inv.Run()
|
||||
require.NoError(t, err, "unexpected error")
|
||||
lines := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if assert.Len(t, lines, 4) {
|
||||
assert.Contains(t, lines[0], "Starts at manual")
|
||||
assert.Contains(t, lines[1], "Starts next -")
|
||||
assert.Contains(t, lines[2], "Stops at manual")
|
||||
assert.Contains(t, lines[3], "Stops next -")
|
||||
}
|
||||
// Then: they should see all workspaces
|
||||
// 1st workspace: a-owner-ws1 has both autostart and autostop enabled.
|
||||
pty.ExpectMatch(ws[0].OwnerName + "/" + ws[0].Name)
|
||||
pty.ExpectMatch(sched.Humanize())
|
||||
pty.ExpectMatch(sched.Next(now).In(loc).Format(time.RFC3339))
|
||||
pty.ExpectMatch("8h")
|
||||
pty.ExpectMatch(ws[0].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339))
|
||||
// 2nd workspace: b-owner-ws2 has only autostart enabled.
|
||||
pty.ExpectMatch(ws[1].OwnerName + "/" + ws[1].Name)
|
||||
pty.ExpectMatch(sched.Humanize())
|
||||
pty.ExpectMatch(sched.Next(now).In(loc).Format(time.RFC3339))
|
||||
// 3rd workspace: c-member-ws3 has only autostop enabled.
|
||||
pty.ExpectMatch(ws[2].OwnerName + "/" + ws[2].Name)
|
||||
pty.ExpectMatch("8h")
|
||||
pty.ExpectMatch(ws[2].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339))
|
||||
// 4th workspace: d-member-ws4 has neither autostart nor autostop enabled.
|
||||
pty.ExpectMatch(ws[3].OwnerName + "/" + ws[3].Name)
|
||||
})
|
||||
|
||||
t.Run("NotFound", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("OwnerSearchByName", func(t *testing.T) {
|
||||
// When: owner specifies a search query
|
||||
inv, root := clitest.New(t, "schedule", "show", "--search", "name:"+ws[1].Name)
|
||||
//nolint:gocritic // Testing that owner user sees all
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
var (
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
)
|
||||
|
||||
inv, root := clitest.New(t, "schedule", "show", "doesnotexist")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
err := inv.Run()
|
||||
require.ErrorContains(t, err, "status code 404", "unexpected error")
|
||||
// Then: they should see workspaces matching that query
|
||||
// 2nd workspace: b-owner-ws2 has only autostart enabled.
|
||||
pty.ExpectMatch(ws[1].OwnerName + "/" + ws[1].Name)
|
||||
pty.ExpectMatch(sched.Humanize())
|
||||
pty.ExpectMatch(sched.Next(now).In(loc).Format(time.RFC3339))
|
||||
})
|
||||
}
|
||||
|
||||
func TestScheduleStart(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("OwnerOneArg", func(t *testing.T) {
|
||||
// When: owner asks for a specific workspace by name
|
||||
inv, root := clitest.New(t, "schedule", "show", ws[2].OwnerName+"/"+ws[2].Name)
|
||||
//nolint:gocritic // Testing that owner user sees all
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
cwr.AutostartSchedule = nil
|
||||
// Then: they should see that workspace
|
||||
// 3rd workspace: c-member-ws3 has only autostop enabled.
|
||||
pty.ExpectMatch(ws[2].OwnerName + "/" + ws[2].Name)
|
||||
pty.ExpectMatch("8h")
|
||||
pty.ExpectMatch(ws[2].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339))
|
||||
})
|
||||
|
||||
t.Run("MemberNoArgs", func(t *testing.T) {
|
||||
// When: a member specifies no args
|
||||
inv, root := clitest.New(t, "schedule", "show")
|
||||
clitest.SetupConfig(t, memberClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
// Then: they should see their own workspaces
|
||||
// 1st workspace: c-member-ws3 has only autostop enabled.
|
||||
pty.ExpectMatch(ws[2].OwnerName + "/" + ws[2].Name)
|
||||
pty.ExpectMatch("8h")
|
||||
pty.ExpectMatch(ws[2].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339))
|
||||
// 2nd workspace: d-member-ws4 has neither autostart nor autostop enabled.
|
||||
pty.ExpectMatch(ws[3].OwnerName + "/" + ws[3].Name)
|
||||
})
|
||||
|
||||
t.Run("MemberAll", func(t *testing.T) {
|
||||
// When: a member lists all workspaces
|
||||
inv, root := clitest.New(t, "schedule", "show", "--all")
|
||||
clitest.SetupConfig(t, memberClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
errC := make(chan error)
|
||||
go func() {
|
||||
errC <- inv.WithContext(ctx).Run()
|
||||
}()
|
||||
require.NoError(t, <-errC)
|
||||
|
||||
// Then: they should only see their own
|
||||
// 1st workspace: c-member-ws3 has only autostop enabled.
|
||||
pty.ExpectMatch(ws[2].OwnerName + "/" + ws[2].Name)
|
||||
pty.ExpectMatch("8h")
|
||||
pty.ExpectMatch(ws[2].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339))
|
||||
// 2nd workspace: d-member-ws4 has neither autostart nor autostop enabled.
|
||||
pty.ExpectMatch(ws[3].OwnerName + "/" + ws[3].Name)
|
||||
})
|
||||
|
||||
t.Run("JSON", func(t *testing.T) {
|
||||
// When: owner lists all workspaces in JSON format
|
||||
inv, root := clitest.New(t, "schedule", "show", "--all", "--output", "json")
|
||||
var buf bytes.Buffer
|
||||
inv.Stdout = &buf
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
errC := make(chan error)
|
||||
go func() {
|
||||
errC <- inv.WithContext(ctx).Run()
|
||||
}()
|
||||
assert.NoError(t, <-errC)
|
||||
|
||||
// Then: they should see all workspace schedules in JSON format
|
||||
var parsed []map[string]string
|
||||
require.NoError(t, json.Unmarshal(buf.Bytes(), &parsed))
|
||||
require.Len(t, parsed, 4)
|
||||
// Ensure same order as in CLI output
|
||||
sort.Slice(parsed, func(i, j int) bool {
|
||||
a := parsed[i]["workspace"]
|
||||
b := parsed[j]["workspace"]
|
||||
return a < b
|
||||
})
|
||||
_ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
tz = "Europe/Dublin"
|
||||
sched = "CRON_TZ=Europe/Dublin 30 9 * * Mon-Fri"
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
)
|
||||
|
||||
// Set a well-specified autostart schedule
|
||||
inv, root := clitest.New(t, "schedule", "start", workspace.Name, "9:30AM", "Mon-Fri", tz)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err, "unexpected error")
|
||||
lines := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if assert.Len(t, lines, 4) {
|
||||
assert.Contains(t, lines[0], "Starts at 9:30AM Mon-Fri (Europe/Dublin)")
|
||||
assert.Contains(t, lines[1], "Starts next 9:30AM")
|
||||
// it should have either IST or GMT
|
||||
if !strings.Contains(lines[1], "IST") && !strings.Contains(lines[1], "GMT") {
|
||||
t.Error("expected either IST or GMT")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure autostart schedule updated
|
||||
updated, err := client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err, "fetch updated workspace")
|
||||
require.Equal(t, sched, *updated.AutostartSchedule, "expected autostart schedule to be set")
|
||||
|
||||
// Reset stdout
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
|
||||
// unset schedule
|
||||
inv, root = clitest.New(t, "schedule", "start", workspace.Name, "manual")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
err = inv.Run()
|
||||
assert.NoError(t, err, "unexpected error")
|
||||
lines = strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if assert.Len(t, lines, 4) {
|
||||
assert.Contains(t, lines[0], "Starts at manual")
|
||||
assert.Contains(t, lines[1], "Starts next -")
|
||||
}
|
||||
}
|
||||
|
||||
func TestScheduleStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
ttl = 8*time.Hour + 30*time.Minute
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID)
|
||||
_ = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
)
|
||||
|
||||
// Set the workspace TTL
|
||||
inv, root := clitest.New(t, "schedule", "stop", workspace.Name, ttl.String())
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err, "unexpected error")
|
||||
lines := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if assert.Len(t, lines, 4) {
|
||||
assert.Contains(t, lines[2], "Stops at 8h30m after start")
|
||||
// Should not be manual
|
||||
assert.NotContains(t, lines[3], "Stops next -")
|
||||
}
|
||||
|
||||
// Reset stdout
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
|
||||
// Unset the workspace TTL
|
||||
inv, root = clitest.New(t, "schedule", "stop", workspace.Name, "manual")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
err = inv.Run()
|
||||
assert.NoError(t, err, "unexpected error")
|
||||
lines = strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if assert.Len(t, lines, 4) {
|
||||
assert.Contains(t, lines[2], "Stops at manual")
|
||||
// Deadline of a running workspace is not updated.
|
||||
assert.NotContains(t, lines[3], "Stops next -")
|
||||
}
|
||||
}
|
||||
|
||||
func TestScheduleOverride(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: we have a workspace
|
||||
var (
|
||||
err error
|
||||
ctx = context.Background()
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID)
|
||||
cmdArgs = []string{"schedule", "override-stop", workspace.Name, "10h"}
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
)
|
||||
|
||||
// Given: we wait for the workspace to be built
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
workspace, err = client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
expectedDeadline := time.Now().Add(10 * time.Hour)
|
||||
|
||||
// Assert test invariant: workspace build has a deadline set equal to now plus ttl
|
||||
initDeadline := time.Now().Add(time.Duration(*workspace.TTLMillis) * time.Millisecond)
|
||||
require.WithinDuration(t, initDeadline, workspace.LatestBuild.Deadline.Time, time.Minute)
|
||||
|
||||
inv, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
// When: we execute `coder schedule override workspace <number without units>`
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: the deadline of the latest build is updated assuming the units are minutes
|
||||
updated, err := client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedDeadline, updated.LatestBuild.Deadline.Time, time.Minute)
|
||||
})
|
||||
|
||||
t.Run("InvalidDuration", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: we have a workspace
|
||||
var (
|
||||
err error
|
||||
ctx = context.Background()
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID)
|
||||
cmdArgs = []string{"schedule", "override-stop", workspace.Name, "kwyjibo"}
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
)
|
||||
|
||||
// Given: we wait for the workspace to be built
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
workspace, err = client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert test invariant: workspace build has a deadline set equal to now plus ttl
|
||||
initDeadline := time.Now().Add(time.Duration(*workspace.TTLMillis) * time.Millisecond)
|
||||
require.WithinDuration(t, initDeadline, workspace.LatestBuild.Deadline.Time, time.Minute)
|
||||
|
||||
inv, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
// When: we execute `coder bump workspace <not a number>`
|
||||
err = inv.WithContext(ctx).Run()
|
||||
// Then: the command fails
|
||||
require.ErrorContains(t, err, "invalid duration")
|
||||
})
|
||||
|
||||
t.Run("NoDeadline", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Given: we have a workspace with no deadline set
|
||||
var (
|
||||
err error
|
||||
ctx = context.Background()
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
template = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
cwr.TTLMillis = nil
|
||||
})
|
||||
cmdArgs = []string{"schedule", "override-stop", workspace.Name, "1h"}
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
)
|
||||
require.Zero(t, template.DefaultTTLMillis)
|
||||
require.Empty(t, template.AutostopRequirement.DaysOfWeek)
|
||||
require.EqualValues(t, 1, template.AutostopRequirement.Weeks)
|
||||
|
||||
// Unset the workspace TTL
|
||||
err = client.UpdateWorkspaceTTL(ctx, workspace.ID, codersdk.UpdateWorkspaceTTLRequest{TTLMillis: nil})
|
||||
require.NoError(t, err)
|
||||
workspace, err = client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, workspace.TTLMillis)
|
||||
|
||||
// Given: we wait for the workspace to build
|
||||
coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
workspace, err = client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// NOTE(cian): need to stop and start the workspace as we do not update the deadline
|
||||
// see: https://github.com/coder/coder/issues/2224
|
||||
coderdtest.MustTransitionWorkspace(t, client, workspace.ID, database.WorkspaceTransitionStart, database.WorkspaceTransitionStop)
|
||||
coderdtest.MustTransitionWorkspace(t, client, workspace.ID, database.WorkspaceTransitionStop, database.WorkspaceTransitionStart)
|
||||
|
||||
// Assert test invariant: workspace has no TTL set
|
||||
require.Zero(t, workspace.LatestBuild.Deadline)
|
||||
require.NoError(t, err)
|
||||
|
||||
inv, root := clitest.New(t, cmdArgs...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
|
||||
// When: we execute `coder bump workspace``
|
||||
err = inv.WithContext(ctx).Run()
|
||||
require.Error(t, err)
|
||||
|
||||
// Then: nothing happens and the deadline remains unset
|
||||
updated, err := client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, updated.LatestBuild.Deadline)
|
||||
// 1st workspace: a-owner-ws1 has both autostart and autostop enabled.
|
||||
assert.Equal(t, ws[0].OwnerName+"/"+ws[0].Name, parsed[0]["workspace"])
|
||||
assert.Equal(t, sched.Humanize(), parsed[0]["starts_at"])
|
||||
assert.Equal(t, sched.Next(now).In(loc).Format(time.RFC3339), parsed[0]["starts_next"])
|
||||
assert.Equal(t, "8h", parsed[0]["stops_after"])
|
||||
assert.Equal(t, ws[0].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339), parsed[0]["stops_next"])
|
||||
// 2nd workspace: b-owner-ws2 has only autostart enabled.
|
||||
assert.Equal(t, ws[1].OwnerName+"/"+ws[1].Name, parsed[1]["workspace"])
|
||||
assert.Equal(t, sched.Humanize(), parsed[1]["starts_at"])
|
||||
assert.Equal(t, sched.Next(now).In(loc).Format(time.RFC3339), parsed[1]["starts_next"])
|
||||
assert.Empty(t, parsed[1]["stops_after"])
|
||||
assert.Empty(t, parsed[1]["stops_next"])
|
||||
// 3rd workspace: c-member-ws3 has only autostop enabled.
|
||||
assert.Equal(t, ws[2].OwnerName+"/"+ws[2].Name, parsed[2]["workspace"])
|
||||
assert.Empty(t, parsed[2]["starts_at"])
|
||||
assert.Empty(t, parsed[2]["starts_next"])
|
||||
assert.Equal(t, "8h", parsed[2]["stops_after"])
|
||||
assert.Equal(t, ws[2].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339), parsed[2]["stops_next"])
|
||||
// 4th workspace: d-member-ws4 has neither autostart nor autostop enabled.
|
||||
assert.Equal(t, ws[3].OwnerName+"/"+ws[3].Name, parsed[3]["workspace"])
|
||||
assert.Empty(t, parsed[3]["starts_at"])
|
||||
assert.Empty(t, parsed[3]["starts_next"])
|
||||
assert.Empty(t, parsed[3]["stops_after"])
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:paralleltest // t.Setenv
|
||||
func TestScheduleStartDefaults(t *testing.T) {
|
||||
t.Setenv("TZ", "Pacific/Tongatapu")
|
||||
var (
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
_ = coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID, func(cwr *codersdk.CreateWorkspaceRequest) {
|
||||
cwr.AutostartSchedule = nil
|
||||
})
|
||||
stdoutBuf = &bytes.Buffer{}
|
||||
func TestScheduleModify(t *testing.T) {
|
||||
// Given
|
||||
// Set timezone to Asia/Kolkata to surface any timezone-related bugs.
|
||||
t.Setenv("TZ", "Asia/Kolkata")
|
||||
loc, err := tz.TimezoneIANA()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "Asia/Kolkata", loc.String())
|
||||
sched, err := cron.Weekly("CRON_TZ=Europe/Dublin 30 7 * * Mon-Fri")
|
||||
require.NoError(t, err, "invalid schedule")
|
||||
ownerClient, _, _, ws := setupTestSchedule(t, sched)
|
||||
now := time.Now()
|
||||
|
||||
t.Run("SetStart", func(t *testing.T) {
|
||||
// When: we set the start schedule
|
||||
inv, root := clitest.New(t,
|
||||
"schedule", "start", ws[3].OwnerName+"/"+ws[3].Name, "7:30AM", "Mon-Fri", "Europe/Dublin",
|
||||
)
|
||||
//nolint:gocritic // this workspace is not owned by the same user
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
// Then: the updated schedule should be shown
|
||||
pty.ExpectMatch(ws[3].OwnerName + "/" + ws[3].Name)
|
||||
pty.ExpectMatch(sched.Humanize())
|
||||
pty.ExpectMatch(sched.Next(now).In(loc).Format(time.RFC3339))
|
||||
})
|
||||
|
||||
t.Run("SetStop", func(t *testing.T) {
|
||||
// When: we set the stop schedule
|
||||
inv, root := clitest.New(t,
|
||||
"schedule", "stop", ws[2].OwnerName+"/"+ws[2].Name, "8h30m",
|
||||
)
|
||||
//nolint:gocritic // this workspace is not owned by the same user
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
// Then: the updated schedule should be shown
|
||||
pty.ExpectMatch(ws[2].OwnerName + "/" + ws[2].Name)
|
||||
pty.ExpectMatch("8h30m")
|
||||
pty.ExpectMatch(ws[2].LatestBuild.Deadline.Time.In(loc).Format(time.RFC3339))
|
||||
})
|
||||
|
||||
t.Run("UnsetStart", func(t *testing.T) {
|
||||
// When: we unset the start schedule
|
||||
inv, root := clitest.New(t,
|
||||
"schedule", "start", ws[1].OwnerName+"/"+ws[1].Name, "manual",
|
||||
)
|
||||
//nolint:gocritic // this workspace is owned by owner
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
// Then: the updated schedule should be shown
|
||||
pty.ExpectMatch(ws[1].OwnerName + "/" + ws[1].Name)
|
||||
})
|
||||
|
||||
t.Run("UnsetStop", func(t *testing.T) {
|
||||
// When: we unset the stop schedule
|
||||
inv, root := clitest.New(t,
|
||||
"schedule", "stop", ws[0].OwnerName+"/"+ws[0].Name, "manual",
|
||||
)
|
||||
//nolint:gocritic // this workspace is owned by owner
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
// Then: the updated schedule should be shown
|
||||
pty.ExpectMatch(ws[0].OwnerName + "/" + ws[0].Name)
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:paralleltest // t.Setenv
|
||||
func TestScheduleOverride(t *testing.T) {
|
||||
// Given
|
||||
// Set timezone to Asia/Kolkata to surface any timezone-related bugs.
|
||||
t.Setenv("TZ", "Asia/Kolkata")
|
||||
loc, err := tz.TimezoneIANA()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "Asia/Kolkata", loc.String())
|
||||
sched, err := cron.Weekly("CRON_TZ=Europe/Dublin 30 7 * * Mon-Fri")
|
||||
require.NoError(t, err, "invalid schedule")
|
||||
ownerClient, _, _, ws := setupTestSchedule(t, sched)
|
||||
now := time.Now()
|
||||
// To avoid the likelihood of time-related flakes, only matching up to the hour.
|
||||
expectedDeadline := time.Now().In(loc).Add(10 * time.Hour).Format("2006-01-02T15:")
|
||||
|
||||
// When: we override the stop schedule
|
||||
inv, root := clitest.New(t,
|
||||
"schedule", "override-stop", ws[0].OwnerName+"/"+ws[0].Name, "10h",
|
||||
)
|
||||
|
||||
// Set an underspecified schedule
|
||||
inv, root := clitest.New(t, "schedule", "start", workspace.Name, "9:30AM")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
inv.Stdout = stdoutBuf
|
||||
err := inv.Run()
|
||||
require.NoError(t, err, "unexpected error")
|
||||
lines := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if assert.Len(t, lines, 4) {
|
||||
assert.Contains(t, lines[0], "Starts at 9:30AM daily (Pacific/Tongatapu)")
|
||||
assert.Contains(t, lines[1], "Starts next 9:30AM +13 on")
|
||||
assert.Contains(t, lines[2], "Stops at 8h after start")
|
||||
}
|
||||
clitest.SetupConfig(t, ownerClient, root)
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
require.NoError(t, inv.Run())
|
||||
|
||||
// Then: the updated schedule should be shown
|
||||
pty.ExpectMatch(ws[0].OwnerName + "/" + ws[0].Name)
|
||||
pty.ExpectMatch(sched.Humanize())
|
||||
pty.ExpectMatch(sched.Next(now).In(loc).Format(time.RFC3339))
|
||||
pty.ExpectMatch("8h")
|
||||
pty.ExpectMatch(expectedDeadline)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user