Compare commits
1179 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3ab1f6845c | |||
| ad513fa8b9 | |||
| 7ad944da5f | |||
| 8ee500c59d | |||
| 72780c8031 | |||
| 1314cd8fcb | |||
| 10da570916 | |||
| b47bd7ccb5 | |||
| d3991fac26 | |||
| 2ed453035e | |||
| 7493b28155 | |||
| e68d508469 | |||
| b6018198b7 | |||
| f7327dd2c6 | |||
| 4b81cfaa0d | |||
| cf35c0dfc5 | |||
| 44f9b0228a | |||
| 00cbb211b4 | |||
| eddd4f8888 | |||
| 4c1e63aae8 | |||
| 6ea32e4e80 | |||
| aa7b267170 | |||
| e482d39052 | |||
| cd1e088f7c | |||
| d6e9870209 | |||
| d2c7c8e1d8 | |||
| 57c7d7d4d2 | |||
| 496ec6cfc5 | |||
| 75fcc24b60 | |||
| b955c5fefc | |||
| f48e8dcf88 | |||
| 1472cce6c3 | |||
| 6b69970d7c | |||
| 805eaca4b7 | |||
| 9b6750a4db | |||
| 4456d0bfea | |||
| 924bccbfcf | |||
| 66649f97a8 | |||
| c575292ba6 | |||
| 929f2d585a | |||
| b7eafe1635 | |||
| 437256d352 | |||
| e23e5262b4 | |||
| b104bb7005 | |||
| dcc63aec90 | |||
| 27c2fb8a56 | |||
| 6a12912d6e | |||
| 336e663180 | |||
| a96c4a3f29 | |||
| 8d0e8f45e0 | |||
| 280fc9d5ef | |||
| b84bba8fd1 | |||
| cb36783e8c | |||
| ed82b864d0 | |||
| af036b440b | |||
| ddabe9cc7f | |||
| 4cc270b12b | |||
| 98d9a65cf5 | |||
| 0b4f333a6f | |||
| a75346dd32 | |||
| bd944e0d21 | |||
| 78b06397a6 | |||
| ea2ae10bc3 | |||
| e029df61ff | |||
| e8f82538e8 | |||
| 91a0c7abe0 | |||
| 471dc6126c | |||
| 3114a60e5c | |||
| c236a29307 | |||
| 4d36b859d2 | |||
| 5c9167df36 | |||
| ca710e52b6 | |||
| de60bff88e | |||
| edd9628aa6 | |||
| ff794e4b23 | |||
| 34dfbfa9d3 | |||
| 975424b3a2 | |||
| 92798e957f | |||
| f6c36d903e | |||
| 3761205bae | |||
| e85b88ca90 | |||
| f54d385266 | |||
| bc55ffdf0d | |||
| 32829080ac | |||
| c3aface285 | |||
| 74c4553a3f | |||
| 7b08be8000 | |||
| 2811ab62d0 | |||
| 5cfa7082d1 | |||
| 6af1f6a9d9 | |||
| dd2f79995b | |||
| 02550a9062 | |||
| 131babfb00 | |||
| 25e30c6f41 | |||
| 4e9e480cc6 | |||
| f28bac1b05 | |||
| 6b92abebb9 | |||
| b5dec61cd5 | |||
| 9ffbdc6696 | |||
| 2f0a9996e7 | |||
| 70692c2e4e | |||
| 2089006fbc | |||
| 8649a10441 | |||
| da7e1eb75d | |||
| 0b5a01aa1b | |||
| 26264bd3ef | |||
| 2c7d8ee06a | |||
| f07d2ff68d | |||
| 0feb855b8f | |||
| 733ae5038a | |||
| 86d4c4ff0e | |||
| 844ef76d39 | |||
| 67f1123808 | |||
| ee652d525d | |||
| 7447bfa320 | |||
| dfb1a1df3a | |||
| 2a01747804 | |||
| 30e16052d6 | |||
| de1a7a9210 | |||
| fbb2a6a434 | |||
| 7004013537 | |||
| f6f61dfa26 | |||
| b8e792bf8a | |||
| 5eb825155e | |||
| a0f478c02f | |||
| 8686b7a499 | |||
| 29963433ee | |||
| 353bfb618c | |||
| adbabe4e09 | |||
| ac559f101e | |||
| ac973a4b2c | |||
| c32085ca10 | |||
| 6929792a58 | |||
| b7ced94f0f | |||
| f827829afe | |||
| 94541d201f | |||
| 75265ecf68 | |||
| 0f915e1e75 | |||
| 625eef93c5 | |||
| c136fa9e52 | |||
| d54bc60842 | |||
| 757ea68d4a | |||
| 87f07b9f2f | |||
| 196b29983b | |||
| c3b8898f0a | |||
| c9ade6f6c5 | |||
| d56bf52141 | |||
| 9833cd32b3 | |||
| 6855735085 | |||
| 43cc5449a1 | |||
| 30e86e0058 | |||
| e7330436c9 | |||
| ec880f26b4 | |||
| a28883d9f0 | |||
| 1cadfa4e4f | |||
| 9a7729d60f | |||
| 82310896ad | |||
| aae7175a83 | |||
| e056d0243a | |||
| a9036ed02b | |||
| b7db4ee69f | |||
| e0c1aacac1 | |||
| 65583eca47 | |||
| 7f67000b90 | |||
| 30fe153296 | |||
| 539fcf9e6b | |||
| 001670cc13 | |||
| 1a915f5b30 | |||
| 12b51ca804 | |||
| c8772fba82 | |||
| fd372f6735 | |||
| 87d5cdaf58 | |||
| 2901147ae3 | |||
| b47d076756 | |||
| 0c73164f15 | |||
| 2d3d78b5b1 | |||
| cc659163ef | |||
| 830020df15 | |||
| 1cb39fc65d | |||
| 71d4e4e6e8 | |||
| c8d65de4b7 | |||
| deb9261232 | |||
| fd5d26d385 | |||
| 6de95f4f32 | |||
| dc8b73168e | |||
| 4821e2e6d8 | |||
| 68a46198d3 | |||
| 9689bca5d2 | |||
| c68e80970d | |||
| bd5eb9dc1b | |||
| b16901485e | |||
| 39889179ea | |||
| 80b940c556 | |||
| 00b9a3ce58 | |||
| aceedefce3 | |||
| cdbae29a83 | |||
| d96a6575af | |||
| 16cd1a675c | |||
| 0e52461624 | |||
| f8a2bd0f4d | |||
| b4bb77f2f3 | |||
| acc12d3426 | |||
| 164672e5d3 | |||
| 517fb19474 | |||
| cc8d0af027 | |||
| 0965a2de70 | |||
| 6318c4c09f | |||
| 4232a2eb96 | |||
| 164b816679 | |||
| 611fbd8215 | |||
| d12221c782 | |||
| 2fae9b0a69 | |||
| 98164f687e | |||
| 58265881af | |||
| 9e170a7332 | |||
| 278527cff4 | |||
| ab54008c94 | |||
| 7d92537af5 | |||
| b012b4a810 | |||
| d467160581 | |||
| 6b978bef71 | |||
| fd7c019fff | |||
| a823ce7337 | |||
| 0bf1b01a9d | |||
| 56889d6cd4 | |||
| 854643236a | |||
| bf5e0675fe | |||
| 0a56ea6916 | |||
| db8ba6cb68 | |||
| c83975cee0 | |||
| 2218160e35 | |||
| 67494a3012 | |||
| dfac0745f3 | |||
| 616e1d7e9a | |||
| 3ea9699908 | |||
| dbc422f29e | |||
| ce114a7f9f | |||
| 7a62534359 | |||
| 7ed17b2605 | |||
| 970a829939 | |||
| 52b4563106 | |||
| 4c4d966c7b | |||
| 5fd77ad7cf | |||
| b7806fd216 | |||
| 9aae983821 | |||
| be1013899f | |||
| 132f1d0eb5 | |||
| dc52df940b | |||
| ecee3ceeec | |||
| c2acc35d45 | |||
| 47d5806e2c | |||
| 3b433181be | |||
| b650ab40f0 | |||
| 9249309d8e | |||
| b833861960 | |||
| 44e25185ff | |||
| ebdc510f12 | |||
| 24ec05b5c5 | |||
| 3727e02bbf | |||
| 5432c3f5ea | |||
| cdf9b9045f | |||
| 70047ffd15 | |||
| f21541c0e4 | |||
| 837f88d38e | |||
| d7cbdbd9c6 | |||
| c47b78c44b | |||
| 0a37dd20d6 | |||
| ecda87060a | |||
| 1bd188db66 | |||
| 79772f2f3f | |||
| 7fc5cce356 | |||
| b5c17048c6 | |||
| a0fedebd62 | |||
| d502de8ca8 | |||
| f8f4ae8e2c | |||
| 29e4205abf | |||
| ffc28e6b93 | |||
| 1c3bfacca3 | |||
| 14caa9b7c1 | |||
| b62798303c | |||
| d7eeb4118e | |||
| 49081e0383 | |||
| 861efe7b50 | |||
| 765fd29336 | |||
| 280112a366 | |||
| 19f58350f2 | |||
| bff73ade6a | |||
| 86f89892c8 | |||
| f6a8a5f7be | |||
| e9d7a230fa | |||
| 928091aa05 | |||
| 2c2dd0eb83 | |||
| 7a210d941e | |||
| b4057bd74a | |||
| 2e9f3e0a6b | |||
| 13b9ed3240 | |||
| 2c0f653aa8 | |||
| bc835dbf69 | |||
| d8d8eb2c8d | |||
| 21597e2d69 | |||
| dd4aafb350 | |||
| 398e8fdf89 | |||
| e508d9aa6e | |||
| de1d04d7bb | |||
| 704212e271 | |||
| 0a911d58f9 | |||
| f67ccc9bc0 | |||
| 700ec966e3 | |||
| bc102d6bd7 | |||
| 3f6a158016 | |||
| 75f62dc39d | |||
| b4a7fe3221 | |||
| 9df80530d7 | |||
| d896b74fa2 | |||
| b7641b219e | |||
| ff62f70f55 | |||
| 000d538ace | |||
| ecb99717ef | |||
| 3c2ce4f52a | |||
| ef836de330 | |||
| cee8fde93d | |||
| bf8af90d5a | |||
| 2ee406d7b6 | |||
| 90a3debe3f | |||
| 9a3d9053a0 | |||
| 19ec5789a4 | |||
| 9a72ddf7d4 | |||
| 9f2a931eb8 | |||
| 8f4157c28d | |||
| b73f9d8e86 | |||
| eabf929676 | |||
| 0115adf9d7 | |||
| d19e679c20 | |||
| a297a014d6 | |||
| c1ab5cf0ba | |||
| f75d497c41 | |||
| 396e5e9a60 | |||
| 2baa34364a | |||
| e088303382 | |||
| 4e42c33769 | |||
| 34c3f919dc | |||
| 259453007e | |||
| 64687631aa | |||
| 435c67ab75 | |||
| ccea595b39 | |||
| 6079cda851 | |||
| 9a0ba1bdc3 | |||
| 2ebd0ec6c5 | |||
| 9f5bc7c10b | |||
| 5bb6bc5571 | |||
| a33a255885 | |||
| 33bdc23e1d | |||
| 4a13c58077 | |||
| 814534d6b7 | |||
| 7fcf319e01 | |||
| 1d746b901b | |||
| 3f058f28e7 | |||
| c6fcd7ee93 | |||
| 45eca671fe | |||
| d843735c02 | |||
| c0835c443d | |||
| 418c9b8743 | |||
| 4a008a8f34 | |||
| d70e2d97de | |||
| 0f5a1ad480 | |||
| 9a7705c656 | |||
| 88c35d3f04 | |||
| 59246e054f | |||
| 65112ecc04 | |||
| 647fd7699b | |||
| 865f49af35 | |||
| 2fc8a275fe | |||
| 82a6405bfa | |||
| 4a9c8f407a | |||
| 818c4a7f23 | |||
| 2af41a439b | |||
| 2899b9431d | |||
| c19541e28f | |||
| 681e2db9ec | |||
| 83bee35c36 | |||
| dad4876e90 | |||
| 6015319e9d | |||
| eb0497ff82 | |||
| f0bd258ff1 | |||
| 1e8cc2ca8d | |||
| a6bd85df38 | |||
| c569528fb7 | |||
| 9f76dab348 | |||
| 8b2d4b87fc | |||
| b5f26d9bdf | |||
| 357f3b38f7 | |||
| 6639c69fad | |||
| 72a48bee52 | |||
| 34467a3289 | |||
| 7f82eca64a | |||
| 7072b8eff5 | |||
| 83fee4b192 | |||
| 7ca624eceb | |||
| 749307ef08 | |||
| 72e83df578 | |||
| 3dbc96d597 | |||
| d3c39b60c9 | |||
| c0a01ec81c | |||
| df95cf7ab2 | |||
| b8a143566b | |||
| e2e07b01e9 | |||
| b4751c72d8 | |||
| 6d176aee5d | |||
| 25decc152a | |||
| 70cd87140d | |||
| 9cefd46bcf | |||
| b83cdfe428 | |||
| 7943a5b85e | |||
| 5d26637686 | |||
| 96435ee2ce | |||
| c969da469d | |||
| b4ca285582 | |||
| 140683813d | |||
| 31076ad665 | |||
| 56395410bd | |||
| 82415a6541 | |||
| 80ef147060 | |||
| 1558ef52f1 | |||
| 98457e993b | |||
| f40865bc2f | |||
| 4539ce7db6 | |||
| b8437ce453 | |||
| cfbe96a379 | |||
| b4f14cc6a9 | |||
| 0a6e6442b8 | |||
| bdecf53413 | |||
| e27cfe6e6a | |||
| 98a5ae7f48 | |||
| 3671846b1b | |||
| 46df6c58bc | |||
| 5d48122f12 | |||
| ba9d038d42 | |||
| c594f02948 | |||
| 797e91d4c6 | |||
| 5d45218a5d | |||
| 3b9b06fe5a | |||
| f56db1b41b | |||
| 97945ae272 | |||
| 41e7216df7 | |||
| 8e2422d42c | |||
| 1417c12229 | |||
| 2c046ef475 | |||
| f4a712b271 | |||
| ea4b9a4354 | |||
| eee4f835ec | |||
| 1ffc8058c2 | |||
| e856491476 | |||
| e738123a9c | |||
| 1b0124ecdb | |||
| d434181941 | |||
| 2a492b7008 | |||
| a28d422c35 | |||
| 96f9e61ca1 | |||
| 24b95e16c4 | |||
| bc739bdfce | |||
| 7703bb77a7 | |||
| a0d0e0e72d | |||
| 1faed639c4 | |||
| bce8a983fe | |||
| cc17d2feea | |||
| 4fb4c9b270 | |||
| 1c8f564fdb | |||
| 4a431b6eb8 | |||
| 15cba05dfc | |||
| b398814ab0 | |||
| 7d43f4ceee | |||
| 69f911dfd5 | |||
| bbb0fab1de | |||
| f3b2009499 | |||
| 467b0a1004 | |||
| 823127e761 | |||
| 2db4488337 | |||
| ea4b7d60d7 | |||
| adf14f1917 | |||
| a47a9b1cfe | |||
| b8ba287128 | |||
| c3781d95b4 | |||
| eb0783ff24 | |||
| 0f754f0bb9 | |||
| 06a5e24f5b | |||
| 8dac0356ed | |||
| b1d1b63113 | |||
| d6f8bd7847 | |||
| c3aef9363b | |||
| 1ecc371ade | |||
| af45e64b1d | |||
| f444100aa6 | |||
| e083cbca6a | |||
| 020c6fb71e | |||
| 456ad85ce3 | |||
| 5b9c3784bc | |||
| 9df9ad4503 | |||
| 2a10c9127f | |||
| ca6b9e9368 | |||
| c8e67833f5 | |||
| 0c5077464b | |||
| 247f8a973f | |||
| 751c0505bf | |||
| 3ec2e96ff4 | |||
| 137b51e2c7 | |||
| e4d23ff5fe | |||
| f61001d049 | |||
| 33c0674d76 | |||
| 6e598234b6 | |||
| 3f81f38fcc | |||
| b1588fa596 | |||
| 977e9ef505 | |||
| 8ac68c95a4 | |||
| af06e80949 | |||
| 6c4c3d6ce5 | |||
| cbd49abfcd | |||
| 3619a3a6dd | |||
| e4b6f5695b | |||
| 2c843f4011 | |||
| df842b31e8 | |||
| 1da2570e14 | |||
| 7a7ee63225 | |||
| af50ef76f9 | |||
| 299260361a | |||
| 8c4b7c01e2 | |||
| 4f512fb230 | |||
| 7d54fd5e0d | |||
| a658c94a67 | |||
| c12c9f1f4e | |||
| 74fdcb1ace | |||
| 794a551176 | |||
| f2758fcb33 | |||
| 51226c55ab | |||
| 2e7e99b135 | |||
| 24953869a6 | |||
| 75078fafa2 | |||
| 9cf3c582cb | |||
| c916a9e67f | |||
| 9440b3da66 | |||
| 1d0fae83a2 | |||
| 518300a26c | |||
| aba5cb8377 | |||
| 31bd2b0bdc | |||
| ac605bad3d | |||
| 28f35393e7 | |||
| 685abfc6d7 | |||
| 4a0ac13bb7 | |||
| 6ac7d86c93 | |||
| b70d2b1767 | |||
| f13632cea8 | |||
| 4068f70d2b | |||
| aa9dbf2eb3 | |||
| 0d71314ae1 | |||
| a540e629e5 | |||
| 4bc4e63637 | |||
| 68ebf93fdb | |||
| 5de1084639 | |||
| ec7b117878 | |||
| 1ec463d74d | |||
| 634e146032 | |||
| 71c908974c | |||
| 2bbe650eb0 | |||
| 175561bf36 | |||
| 065206345e | |||
| 4f9d3155c9 | |||
| 16ebe10ff2 | |||
| 53969415ec | |||
| c60ea38691 | |||
| e4744686ec | |||
| 30a635aa5f | |||
| a4cc883be1 | |||
| fa8153a0fd | |||
| 5e647ba07a | |||
| 26f575e284 | |||
| 976d23e3dc | |||
| 03034dc508 | |||
| fa91e11105 | |||
| a1c32954d9 | |||
| 94aa9be33a | |||
| b2324325fa | |||
| 479467473e | |||
| efbb55803b | |||
| 9ec1fcf1a7 | |||
| fcca639d38 | |||
| f6db95e71c | |||
| 52ead3d933 | |||
| f0c5201617 | |||
| 125e9ef00e | |||
| fbdbc8a6c5 | |||
| 5be2b77830 | |||
| df82f29945 | |||
| 2a5025c84c | |||
| eb11b50bd1 | |||
| d793564937 | |||
| ab3a649520 | |||
| 1376ba3724 | |||
| 74ffd2756a | |||
| 91dd3fbfab | |||
| a77b48a5e3 | |||
| 4f29f9abe3 | |||
| 8c4b6b02e7 | |||
| 5eaf809851 | |||
| 1288a83e42 | |||
| 0665a6c2f2 | |||
| 4954052ff6 | |||
| 003120882f | |||
| 72f59950f2 | |||
| 944c42dcb6 | |||
| 3121ccc7ff | |||
| 6d3dc30207 | |||
| baa9922a82 | |||
| ee45b3df77 | |||
| 660bbb8d38 | |||
| 93378daeb3 | |||
| 2ad1308450 | |||
| 2be7ef88e8 | |||
| 7332acc935 | |||
| 8e655b42b1 | |||
| fa8f50a169 | |||
| b9e3226612 | |||
| 7ec16cf779 | |||
| 0413ed0178 | |||
| a2e129083e | |||
| 3a7c92a56f | |||
| b79bfb2874 | |||
| 1ab2450250 | |||
| 5a3d6b589a | |||
| 466c530525 | |||
| 3fa7ee969d | |||
| 82c709b1ba | |||
| cb42396596 | |||
| 88dc66ae47 | |||
| 6fde86d1ac | |||
| cdba0744a2 | |||
| bbecff28ae | |||
| e016c307ae | |||
| f14f011abf | |||
| 53e5625292 | |||
| 4d5f723740 | |||
| 2bf3cae3cb | |||
| e10644f86a | |||
| e2613c91a7 | |||
| 1f8e817da7 | |||
| 38d0f6fb74 | |||
| b62e22766f | |||
| b26dd4b802 | |||
| 14a63d7b4a | |||
| c6cd042e80 | |||
| a4424205c7 | |||
| 887bbea72a | |||
| f37e007b8e | |||
| ea03c7276b | |||
| 6715cb6a95 | |||
| edccd2de73 | |||
| 97fb5020cf | |||
| d082ab59b4 | |||
| 2631a8436c | |||
| 6d8caf6f14 | |||
| 902265bdd7 | |||
| 6fdebd8a07 | |||
| 8f736fe5f5 | |||
| 9f810b1964 | |||
| 078926f612 | |||
| 004ad1755f | |||
| 10036ab5fb | |||
| a7366a8b76 | |||
| 2b63492649 | |||
| 9b8e5c2d8a | |||
| 9dbbe82cf8 | |||
| 9704945428 | |||
| 613f90cf2b | |||
| 083449c438 | |||
| 49ae49095c | |||
| 4acf36b84a | |||
| 62b2560bd0 | |||
| 1a615778e6 | |||
| 0bed8b57bc | |||
| 33f1e468cb | |||
| 93f4a5008e | |||
| 0f439d5374 | |||
| ba3e6cea51 | |||
| 332362cf4b | |||
| 4de4e8ee21 | |||
| 80b60e158d | |||
| e80e88e019 | |||
| 1952c042e1 | |||
| b32ed2d97e | |||
| cf8d2bc096 | |||
| 555ea64669 | |||
| 80269ba1b9 | |||
| a46e8f2e49 | |||
| 9810339fd6 | |||
| 36f4e0dc35 | |||
| f1d27ba42d | |||
| 828f33ac7b | |||
| b80756e4f5 | |||
| 784696dfa5 | |||
| 00a30775bc | |||
| b34894977d | |||
| be2067bb0d | |||
| ce6b698048 | |||
| 7c3dbbbe93 | |||
| 583b777251 | |||
| ec3ef167d2 | |||
| ae82a5985d | |||
| 43eee35ae8 | |||
| 867996aa18 | |||
| 43e0968dac | |||
| 73d795f426 | |||
| 022372dd73 | |||
| 77b0ca0b53 | |||
| c795a0e500 | |||
| 68658b5197 | |||
| 2aa48d4143 | |||
| 115f80737a | |||
| c2d2751897 | |||
| e8eb28adb4 | |||
| 702c9081e0 | |||
| 4eb0baa849 | |||
| 61dc875454 | |||
| 66f5d39ff5 | |||
| 05efd64320 | |||
| 795050bba3 | |||
| 0fd2ea4044 | |||
| e5add7292f | |||
| 79ab72fbb1 | |||
| 4d4ffa2d99 | |||
| 0c665235ff | |||
| 5d2b2c577b | |||
| d9299caa12 | |||
| 96a2e63809 | |||
| 14efdadd3c | |||
| 05da1e94a2 | |||
| 67cc196c92 | |||
| 6a1e7ee1d0 | |||
| a903d7c249 | |||
| dd69cc9148 | |||
| 5eb41e8794 | |||
| d413b261d8 | |||
| ec117e841a | |||
| b6604e8845 | |||
| 71c52ea93c | |||
| b6c8e5be48 | |||
| f47ecc29f0 | |||
| 0c074742a5 | |||
| f72d8e95a4 | |||
| 02cc325047 | |||
| 8f13c145c4 | |||
| c8ea76c7fa | |||
| ba02dc8f3a | |||
| d203f5259d | |||
| cd416c86dd | |||
| 622456faf8 | |||
| 4a320617b4 | |||
| 5d711fc95a | |||
| 1f4f0efed6 | |||
| 808e83f87d | |||
| 7d1ca7c12f | |||
| 9c923d71ca | |||
| b8c07ff014 | |||
| f9a97c25dc | |||
| b38fb90e06 | |||
| 1bd3ed9ecd | |||
| 51130540af | |||
| ef2411ef17 | |||
| a29c4c543d | |||
| e375169ac0 | |||
| ef635f12ea | |||
| dd5b0b2721 | |||
| 0b15b1bcd1 | |||
| 8e31ed4072 | |||
| 63a9e34381 | |||
| ec5ef51b49 | |||
| 00a2413c03 | |||
| 52bb84a26f | |||
| 12f87cb98d | |||
| a7f14f89e3 | |||
| 119098a1eb | |||
| 6e7175b589 | |||
| d1b1122690 | |||
| bfa136276c | |||
| ce91009d6b | |||
| e951778147 | |||
| 75b0643623 | |||
| e3b324a1a1 | |||
| 04f68e8f15 | |||
| ad9a29dfa6 | |||
| dca77ba487 | |||
| 97b4743a47 | |||
| c6055c64be | |||
| a1853f2fa2 | |||
| 4997b86548 | |||
| 78f862eedc | |||
| 11b7732720 | |||
| d9fc94f9a9 | |||
| 0b156420de | |||
| 424f954b91 | |||
| 2aea5b76aa | |||
| 6f62204d38 | |||
| 049e557675 | |||
| 224d25d4e1 | |||
| 854e974bb4 | |||
| dab1d1fe20 | |||
| 87784493e8 | |||
| a08e8a16ab | |||
| 70d2203b9e | |||
| 9bb0253290 | |||
| 26490aecca | |||
| 778cb7494a | |||
| 8e1da5e628 | |||
| f05f12231d | |||
| 0f4717e471 | |||
| 50f2d0c7e9 | |||
| fcde77b35f | |||
| 6bb4a4c3c5 | |||
| 8d4a8fde66 | |||
| cd38cb8290 | |||
| 8f768f8276 | |||
| 640fcf450c | |||
| 0e6361383c | |||
| ae3473dc1b | |||
| fe0e94ece9 | |||
| 892bddfb7b | |||
| b7f4f3a771 | |||
| a42a36a474 | |||
| 816c37dd0d | |||
| fb86ac2608 | |||
| 3f9af6f5e7 | |||
| d17ea84b4a | |||
| d35a458767 | |||
| b5ad628460 | |||
| fc1bc374cb | |||
| 2d62cbc83a | |||
| 503e94a5c2 | |||
| 0755ff32ef | |||
| 1aac8200fa | |||
| 3931a6c7bc | |||
| c3e60dcdfb | |||
| 8f9a3ea1a7 | |||
| 8899dd89ca | |||
| 643a9efea9 | |||
| 52d2bc930b | |||
| 07608fc3fb | |||
| 2624ee8f12 | |||
| ec11405208 | |||
| dc3d39baf8 | |||
| 5ffa6dae50 | |||
| 3632ac8c01 | |||
| 7f02fa696a | |||
| 08fb9a6f1b | |||
| a172e073e3 | |||
| a6a44896bf | |||
| 667d9a7557 | |||
| 164146c462 | |||
| 6d24f7c894 | |||
| 8909110f58 | |||
| 614bdfbf3c | |||
| 11ac3be4d8 | |||
| 9908c84b2a | |||
| b4d913e24f | |||
| b3689c8f64 | |||
| 4e52112112 | |||
| 8bd9f9c351 | |||
| 3368b8b65f | |||
| 2e9310b203 | |||
| 5961cf900d | |||
| 2ea438cf4f | |||
| 434c4be9f1 | |||
| 90c57a538c | |||
| 9c030a8888 | |||
| e6931d6920 | |||
| dd67283323 | |||
| 730039f35f | |||
| 75ad72de56 | |||
| bd630113b2 | |||
| 0e78d0a502 | |||
| 6dfce5a2c9 | |||
| f5ce911b8d | |||
| a1db82582f | |||
| 465fe8658d | |||
| 398d08a0cf | |||
| 41726a785e | |||
| 140637448c | |||
| 3b15234660 | |||
| 4b9621f9ae | |||
| 55824986bc | |||
| 04f9ca824f | |||
| 6030847c94 | |||
| 4dec828c88 | |||
| 38fd4c0820 | |||
| 0bf00d6122 | |||
| a3f3d7e682 | |||
| 97c8bb5c1d | |||
| 079d2821f5 | |||
| d3a9d7c497 | |||
| 4a9d1c16c7 | |||
| c00f5e499a | |||
| a2ff674158 | |||
| 3078cd3d98 | |||
| 8d1f163cae | |||
| 88c362dfdc | |||
| e747aad2b6 | |||
| 59efa4a528 | |||
| 77d9937dc4 | |||
| ed1b33acd6 | |||
| fe323a159e | |||
| bb0a38b161 | |||
| b6666cf1cf | |||
| 7aa07cfc8d | |||
| f1763f2aa5 | |||
| 87b7537878 | |||
| c3fe2515a7 | |||
| 3eb7f06bf1 | |||
| 272573e9f0 | |||
| 0e469031b8 | |||
| 218d6a92d4 | |||
| 29cbc5404a | |||
| f1dfeb03db | |||
| 35b3ed255c | |||
| 1134e78b7b | |||
| 9afad8241b | |||
| b62b6af0eb | |||
| a98341612c | |||
| 6e8ff2d95c | |||
| e2d8bda246 | |||
| 8fc8559076 | |||
| ad82a60806 | |||
| 96a12d17ef | |||
| 3129741e08 | |||
| 63e68c11d1 | |||
| daee91c6dc | |||
| c000f2ec28 | |||
| ab077d1f15 | |||
| 712098fa2b | |||
| 501dfeedf7 | |||
| 1fc32b1c3d | |||
| fd84df769d | |||
| f9da2631e9 | |||
| f39e6a79de | |||
| 6f06f8dadb | |||
| ea78ca5dff | |||
| 3680e158d9 | |||
| 4353ad7940 | |||
| 68667323f3 | |||
| ac3c530283 | |||
| 745868fd8a | |||
| 300ae4a6bf | |||
| 38a6d546ab | |||
| 34c6ad671c | |||
| ad0070354f | |||
| d8eda97dbe | |||
| a86830a283 | |||
| e2a7448cc8 | |||
| ea27129348 | |||
| e90a076fad | |||
| 7fa1112958 | |||
| 9abfe97dcc | |||
| a5a5c4d400 | |||
| 8926c10b7d | |||
| 7d9a7636e9 | |||
| 93d0956465 | |||
| 8d27978760 | |||
| 528a0686c0 | |||
| 5f5edb18b0 | |||
| f60b5579a7 | |||
| c2871e12aa | |||
| fff2b1dc90 | |||
| 2b9d12828a | |||
| f94ac55f02 | |||
| fbf329fbb7 | |||
| 57c4de4647 | |||
| fa5387ce07 | |||
| 888eb238ec | |||
| 7f041fecd8 | |||
| b26826ee3f | |||
| 693e5d94bc | |||
| 56bf9cfdbe | |||
| b44e6e6711 | |||
| 658246d5f2 | |||
| dc5e16ae22 | |||
| 76b5deea78 | |||
| 80bf042528 | |||
| b3b26a62f2 | |||
| bb3b8bab78 | |||
| bf0fed4f3f | |||
| c6b2861493 | |||
| 25c1e45930 | |||
| 26cfc18b74 | |||
| 51841e9e11 | |||
| 53f521aaf9 | |||
| 4061a254a5 | |||
| 8b2aead7f4 | |||
| d639e6b898 | |||
| c39c0dcc7c | |||
| 285646bf24 | |||
| d3356fa48e | |||
| c37f22ba78 | |||
| 706326715d | |||
| 9afbd152fc | |||
| 2137db0445 | |||
| c87ec484ff | |||
| 7f12020229 | |||
| f36a4a0b07 | |||
| 0e50afe4cf | |||
| 9ec16d4454 | |||
| 7bbbb91df5 | |||
| 9c0f59e3e5 | |||
| 69797add49 | |||
| 0bbe1562f3 | |||
| bb43713d38 | |||
| 942aba3a66 | |||
| e0689456cb | |||
| dafb7663c5 | |||
| ad665cef91 | |||
| 17bb1a1ff1 | |||
| 085330ad96 | |||
| f5a8a27714 | |||
| 38e5b9679b | |||
| fa64c58e56 | |||
| 87fe16cde9 | |||
| 17f692a89a | |||
| f517724caa | |||
| 114d4aac23 | |||
| f6c89a2615 | |||
| d1d459cb79 | |||
| 264093a874 | |||
| af24aea04f | |||
| 770712e1f5 | |||
| 531fd23fd2 | |||
| a44070e2ec | |||
| 2585249014 | |||
| e1db64a1e8 | |||
| b86ed11bc0 | |||
| b92e7d4fab | |||
| 4dd5d79412 | |||
| 3b10e89047 | |||
| 9d39371ee0 | |||
| ad2353c3d8 | |||
| 63af4c6ed1 | |||
| 45776e57f6 | |||
| fb44dc653e | |||
| 81e2b2500a | |||
| aa2468b16e | |||
| e5c6ebdc96 | |||
| 4b99e2d07e | |||
| a3c6cb1768 | |||
| 592b84984f | |||
| 7e530b0652 | |||
| 3e250c6609 | |||
| 0347231bb8 | |||
| dd85ea8977 | |||
| 5398150c25 | |||
| b2892c3d17 | |||
| 24d8644c0b | |||
| e1149992d8 | |||
| 2637f5501b | |||
| c68ab7d9a8 | |||
| aa660e0631 | |||
| f4f40d0ed2 | |||
| 0224426e5b | |||
| 3ff2ae1b1a | |||
| 5bd02917ec | |||
| 121c2bcde8 | |||
| 2da0702009 | |||
| a32951c46a | |||
| 63f9ef2480 | |||
| c18ed6197c | |||
| 0167d76a7c | |||
| e0f7cf5ec6 | |||
| 391738cc25 | |||
| 865c8b7c09 | |||
| eb66cc9f35 | |||
| 0069831e8d | |||
| bf1a0791f7 | |||
| 72c84c5b0a | |||
| 407f80a1ef | |||
| a361673ad8 | |||
| 00d468b964 | |||
| 9c4ccd76a0 | |||
| 929589ddfa | |||
| b4afbe7720 | |||
| 385a4262e2 | |||
| 29e9b9e663 | |||
| c12bc39821 | |||
| 46f42ea9ac | |||
| e33941b7c2 | |||
| 083fc89f93 | |||
| e84061e2be | |||
| a96376e02e | |||
| 3cca30c770 | |||
| 34593e3944 | |||
| e0f7f011d8 | |||
| a1371dbd60 | |||
| 95e578ba10 | |||
| 861d4afdd8 | |||
| bc18f6c113 | |||
| 4ee01dc95c | |||
| 55c0b26977 | |||
| 8e69f02695 | |||
| ded931f0f6 | |||
| 2d051094e7 | |||
| aa43f998d4 | |||
| fab8da633b | |||
| 333718d1fa | |||
| 633dfbdb2e | |||
| d9d44c1188 | |||
| 7738274b3e | |||
| f4d16a1ae5 | |||
| 47afafa4d4 | |||
| a29fc7dd6f | |||
| f6f927e44f | |||
| 8683169e71 | |||
| d2bfa2b9a0 | |||
| 390f29cf8c | |||
| caec0b8aae | |||
| 31690c4b3d | |||
| 512fdbf634 | |||
| e40b0778e9 | |||
| e60460b120 | |||
| 5200591264 | |||
| 87d64baf7a | |||
| 34debbf837 | |||
| ccadd0f286 | |||
| 84956c16cc | |||
| ca4fa81570 | |||
| c191692751 | |||
| c2a96bdc7c | |||
| 599699b3a9 | |||
| 96ff400587 | |||
| ce51435507 | |||
| 27e17ff2c3 | |||
| cd807bc0c8 | |||
| 334d9820fa | |||
| 90e2bab078 | |||
| 901045a95f | |||
| a364318462 | |||
| 56f00a82e1 | |||
| 2612e32bac | |||
| 3b52d4f336 | |||
| b5f5740d0b | |||
| e496bdb687 | |||
| a63c97b8de | |||
| 5780006adb | |||
| afcc179244 | |||
| 8f55f5c28b | |||
| 5953a46785 | |||
| a6b7e8c43a | |||
| 04e404e448 | |||
| 5686fc9983 | |||
| 401b9276ae | |||
| 5c1dc1b7fe | |||
| e470162305 | |||
| 1f600fc526 | |||
| b26f30688f | |||
| 6378294071 | |||
| 0ba200c2a1 | |||
| 665b84de0d | |||
| a07209efa1 | |||
| 7d7aa789b3 | |||
| d8762c676f | |||
| b120247213 | |||
| 563c3ade06 | |||
| 6981f89cd8 | |||
| 58d650c2bb | |||
| 1c7adc0ebd | |||
| 3d91fe8895 | |||
| 90da09bc2c | |||
| 872037bf85 | |||
| 175dde1c52 | |||
| 90d18dd2e5 | |||
| 349bfad2e9 | |||
| 311327cb11 | |||
| a8346bd8ea | |||
| 1176256a44 | |||
| b38d1ed4a5 | |||
| 891bbda995 | |||
| 164528176a | |||
| 773580c7c9 | |||
| 42b3d90221 |
@@ -1,83 +0,0 @@
|
||||
FROM ubuntu
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
ENV EDITOR=vim
|
||||
|
||||
RUN apt-get update && apt-get upgrade --yes
|
||||
|
||||
RUN apt-get install --yes \
|
||||
ca-certificates \
|
||||
bash-completion \
|
||||
build-essential \
|
||||
curl \
|
||||
cmake \
|
||||
direnv \
|
||||
emacs-nox \
|
||||
gnupg \
|
||||
htop \
|
||||
jq \
|
||||
less \
|
||||
lsb-release \
|
||||
lsof \
|
||||
man-db \
|
||||
nano \
|
||||
neovim \
|
||||
ssl-cert \
|
||||
sudo \
|
||||
unzip \
|
||||
xz-utils \
|
||||
zip
|
||||
|
||||
# configure locales to UTF8
|
||||
RUN apt-get install locales && locale-gen en_US.UTF-8
|
||||
ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'
|
||||
|
||||
# configure direnv
|
||||
RUN direnv hook bash >> $HOME/.bashrc
|
||||
|
||||
# install nix
|
||||
RUN sh <(curl -L https://nixos.org/nix/install) --daemon
|
||||
|
||||
RUN mkdir -p $HOME/.config/nix $HOME/.config/nixpkgs \
|
||||
&& echo 'sandbox = false' >> $HOME/.config/nix/nix.conf \
|
||||
&& echo '{ allowUnfree = true; }' >> $HOME/.config/nixpkgs/config.nix \
|
||||
&& echo '. $HOME/.nix-profile/etc/profile.d/nix.sh' >> $HOME/.bashrc
|
||||
|
||||
|
||||
# install docker and configure daemon to use vfs as GitHub codespaces requires vfs
|
||||
# https://github.com/moby/moby/issues/13742#issuecomment-725197223
|
||||
RUN mkdir -p /etc/apt/keyrings \
|
||||
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
|
||||
&& echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
|
||||
&& apt-get update \
|
||||
&& apt-get install --yes docker-ce docker-ce-cli containerd.io docker-compose-plugin \
|
||||
&& mkdir -p /etc/docker \
|
||||
&& echo '{"cgroup-parent":"/actions_job","storage-driver":"vfs"}' >> /etc/docker/daemon.json
|
||||
|
||||
# install golang and language tooling
|
||||
ENV GO_VERSION=1.20
|
||||
ENV GOPATH=$HOME/go-packages
|
||||
ENV GOROOT=$HOME/go
|
||||
ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH
|
||||
RUN curl -fsSL https://dl.google.com/go/go$GO_VERSION.linux-amd64.tar.gz | tar xzs
|
||||
RUN echo 'export PATH=$GOPATH/bin:$PATH' >> $HOME/.bashrc
|
||||
|
||||
RUN bash -c ". $HOME/.bashrc \
|
||||
go install -v golang.org/x/tools/gopls@latest \
|
||||
&& go install -v mvdan.cc/sh/v3/cmd/shfmt@latest \
|
||||
&& go install -v github.com/mikefarah/yq/v4@v4.30.6 \
|
||||
"
|
||||
|
||||
# install nodejs
|
||||
RUN bash -c "$(curl -fsSL https://deb.nodesource.com/setup_14.x)" \
|
||||
&& apt-get install -y nodejs
|
||||
|
||||
# install zstd
|
||||
RUN bash -c "$(curl -fsSL https://raw.githubusercontent.com/horta/zstd.install/main/install)"
|
||||
|
||||
# install nfpm
|
||||
RUN echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list \
|
||||
&& apt update \
|
||||
&& apt install nfpm
|
||||
@@ -1,24 +1,12 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json
|
||||
{
|
||||
"name": "Development environments on your infrastructure",
|
||||
"image": "codercom/oss-dogfood:latest",
|
||||
|
||||
// Sets the run context to one level up instead of the .devcontainer folder.
|
||||
"context": ".",
|
||||
|
||||
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
|
||||
"dockerFile": "Dockerfile",
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
"postStartCommand": "dockerd",
|
||||
|
||||
// privileged is required by GitHub codespaces - https://github.com/microsoft/vscode-dev-containers/issues/727
|
||||
"runArgs": [
|
||||
"--cap-add=SYS_PTRACE",
|
||||
"--security-opt",
|
||||
"seccomp=unconfined",
|
||||
"--privileged",
|
||||
"--init"
|
||||
]
|
||||
"features": {
|
||||
// See all possible options here https://github.com/devcontainers/features/tree/main/src/docker-in-docker
|
||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {}
|
||||
},
|
||||
// SYS_PTRACE to enable go debugging
|
||||
// without --priviliged the Github Codespace build fails (not required otherwise)
|
||||
"runArgs": ["--cap-add=SYS_PTRACE", "--privileged"]
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Generated files
|
||||
coderd/apidoc/docs.go linguist-generated=true
|
||||
docs/api/*.md linguist-generated=true
|
||||
docs/cli/*.md linguist-generated=true
|
||||
coderd/apidoc/swagger.json linguist-generated=true
|
||||
coderd/database/dump.sql linguist-generated=true
|
||||
peerbroker/proto/*.go linguist-generated=true
|
||||
@@ -9,3 +11,4 @@ provisionersdk/proto/*.go linguist-generated=true
|
||||
*.tfstate.json linguist-generated=true
|
||||
*.tfstate.dot linguist-generated=true
|
||||
*.tfplan.dot linguist-generated=true
|
||||
site/src/api/typesGenerated.ts linguist-generated=true
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
docs/ @coder/docs
|
||||
README.md @coder/docs
|
||||
ADOPTERS.md @coder/docs
|
||||
@@ -0,0 +1,70 @@
|
||||
name: "Setup Go"
|
||||
description: |
|
||||
Sets up the Go environment for tests, builds, etc.
|
||||
inputs:
|
||||
version:
|
||||
description: "The Go version to use."
|
||||
default: "1.20.6"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Cache go toolchain
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ runner.tool_cache }}/go/${{ inputs.version }}
|
||||
key: gotoolchain-${{ runner.os }}-${{ inputs.version }}
|
||||
restore-keys: |
|
||||
gotoolchain-${{ runner.os }}-
|
||||
|
||||
- name: Setup Go
|
||||
uses: buildjet/setup-go@v4
|
||||
with:
|
||||
# We do our own caching for implementation clarity.
|
||||
cache: false
|
||||
go-version: ${{ inputs.version }}
|
||||
|
||||
- name: Get cache dirs
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
|
||||
|
||||
# We split up GOMODCACHE from GOCACHE because the latter must be invalidated
|
||||
# on code change, but the former can be kept.
|
||||
- name: Cache $GOMODCACHE
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOMODCACHE }}
|
||||
key: gomodcache-${{ runner.os }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}
|
||||
# restore-keys aren't used because it causes the cache to grow
|
||||
# infinitely. go.sum changes very infrequently, so rebuilding from
|
||||
# scratch every now and then isn't terrible.
|
||||
|
||||
- name: Cache $GOCACHE
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
# Job name must be included in the key for effective test cache reuse.
|
||||
# The key format is intentionally different than GOMODCACHE, because any
|
||||
# time a Go file changes we invalidate this cache, whereas GOMODCACHE is
|
||||
# only invalidated when go.sum changes.
|
||||
# The number in the key is incremented when the cache gets too large,
|
||||
# since this technically grows without bound.
|
||||
key: gocache2-${{ runner.os }}-${{ github.job }}-${{ hashFiles('**/*.go', 'go.**') }}
|
||||
restore-keys: |
|
||||
gocache2-${{ runner.os }}-${{ github.job }}-
|
||||
gocache2-${{ runner.os }}-
|
||||
|
||||
- name: Install gotestsum
|
||||
shell: bash
|
||||
run: go install gotest.tools/gotestsum@latest
|
||||
|
||||
# It isn't necessary that we ever do this, but it helps
|
||||
# separate the "setup" from the "run" times.
|
||||
- name: go mod download
|
||||
shell: bash
|
||||
run: go mod download -x
|
||||
@@ -0,0 +1,31 @@
|
||||
name: "Setup Node"
|
||||
description: |
|
||||
Sets up the node environment for tests, builds, etc.
|
||||
inputs:
|
||||
directory:
|
||||
description: |
|
||||
The directory to run the setup in.
|
||||
required: false
|
||||
default: "site"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: 8
|
||||
- name: Setup Node
|
||||
uses: buildjet/setup-node@v3
|
||||
with:
|
||||
node-version: 18.17.0
|
||||
# See https://github.com/actions/setup-node#caching-global-packages-data
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: ${{ inputs.directory }}/pnpm-lock.yaml
|
||||
- name: Install root node_modules
|
||||
shell: bash
|
||||
run: ./scripts/pnpm_install.sh
|
||||
|
||||
- name: Install node_modules
|
||||
shell: bash
|
||||
run: ../scripts/pnpm_install.sh
|
||||
working-directory: ${{ inputs.directory }}
|
||||
@@ -0,0 +1,10 @@
|
||||
name: Setup sqlc
|
||||
description: |
|
||||
Sets up the sqlc environment for tests, builds, etc.
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup sqlc
|
||||
uses: sqlc-dev/setup-sqlc@v3
|
||||
with:
|
||||
sqlc-version: "1.19.1"
|
||||
@@ -0,0 +1,11 @@
|
||||
name: "Setup Terraform"
|
||||
description: |
|
||||
Sets up Terraform for tests, builds, etc.
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install Terraform
|
||||
uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: ~1.5
|
||||
terraform_wrapper: false
|
||||
@@ -0,0 +1,27 @@
|
||||
name: Upload tests to datadog
|
||||
if: always()
|
||||
inputs:
|
||||
api-key:
|
||||
description: "Datadog API key"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
owner=${{ github.repository_owner }}
|
||||
echo "owner: $owner"
|
||||
if [[ $owner != "coder" ]]; then
|
||||
echo "Not a pull request from the main repo, skipping..."
|
||||
exit 0
|
||||
fi
|
||||
if [[ -z "${{ inputs.api-key }}" ]]; then
|
||||
# This can happen for dependabot.
|
||||
echo "No API key provided, skipping..."
|
||||
exit 0
|
||||
fi
|
||||
npm install -g @datadog/datadog-ci@2.10.0
|
||||
datadog-ci junit upload --service coder ./gotests.xml \
|
||||
--tags os:${{runner.os}} --tags runner_name:${{runner.name}}
|
||||
env:
|
||||
DATADOG_API_KEY: ${{ inputs.api-key }}
|
||||
+105
-5
@@ -3,7 +3,7 @@ updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
interval: "weekly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
labels: []
|
||||
@@ -24,11 +24,15 @@ updates:
|
||||
update-types:
|
||||
- version-update:semver-minor
|
||||
- version-update:semver-patch
|
||||
groups:
|
||||
github-actions:
|
||||
patterns:
|
||||
- "*"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
interval: "weekly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
commit-message:
|
||||
@@ -39,6 +43,33 @@ updates:
|
||||
- dependency-name: "*"
|
||||
update-types:
|
||||
- version-update:semver-patch
|
||||
groups:
|
||||
otel:
|
||||
patterns:
|
||||
- "go.nhat.io/otelsql"
|
||||
- "go.opentelemetry.io/otel*"
|
||||
golang-x:
|
||||
patterns:
|
||||
- "golang.org/x/*"
|
||||
|
||||
# Update our Dockerfile.
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/scripts/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
labels: []
|
||||
ignore:
|
||||
# We need to coordinate terraform updates with the version hardcoded in
|
||||
# our Go code.
|
||||
- dependency-name: "terraform"
|
||||
groups:
|
||||
scripts-docker:
|
||||
patterns:
|
||||
- "*"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/site/"
|
||||
@@ -46,6 +77,60 @@ updates:
|
||||
interval: "monthly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
reviewers:
|
||||
- "coder/ts"
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
labels: []
|
||||
ignore:
|
||||
# Ignore patch updates for all dependencies
|
||||
- dependency-name: "*"
|
||||
update-types:
|
||||
- version-update:semver-patch
|
||||
# Ignore major updates to Node.js types, because they need to
|
||||
# correspond to the Node.js engine version
|
||||
- dependency-name: "@types/node"
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
groups:
|
||||
react:
|
||||
patterns:
|
||||
- "react*"
|
||||
- "@types/react*"
|
||||
xterm:
|
||||
patterns:
|
||||
- "xterm*"
|
||||
xstate:
|
||||
patterns:
|
||||
- "xstate"
|
||||
- "@xstate*"
|
||||
mui:
|
||||
patterns:
|
||||
- "@mui*"
|
||||
storybook:
|
||||
patterns:
|
||||
- "@storybook*"
|
||||
- "storybook*"
|
||||
eslint:
|
||||
patterns:
|
||||
- "eslint*"
|
||||
- "@eslint*"
|
||||
- "@typescript-eslint/eslint-plugin"
|
||||
- "@typescript-eslint/parser"
|
||||
jest:
|
||||
patterns:
|
||||
- "jest*"
|
||||
- "@swc/jest"
|
||||
- "@types/jest"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/offlinedocs/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
reviewers:
|
||||
- "coder/ts"
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
labels: []
|
||||
@@ -60,10 +145,25 @@ updates:
|
||||
update-types:
|
||||
- version-update:semver-major
|
||||
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "/examples/templates"
|
||||
# Update dogfood.
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/dogfood/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
interval: "weekly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
labels: []
|
||||
groups:
|
||||
dogfood-docker:
|
||||
patterns:
|
||||
- "*"
|
||||
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "/dogfood/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
time: "06:00"
|
||||
timezone: "America/Chicago"
|
||||
commit-message:
|
||||
|
||||
+433
-340
@@ -6,7 +6,6 @@ on:
|
||||
- main
|
||||
|
||||
pull_request:
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -15,7 +14,7 @@ permissions:
|
||||
contents: read
|
||||
deployments: none
|
||||
issues: none
|
||||
packages: none
|
||||
packages: write
|
||||
pull-requests: none
|
||||
repository-projects: none
|
||||
security-events: none
|
||||
@@ -28,22 +27,120 @@ concurrency:
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docs-only: ${{ steps.filter.outputs.docs_count == steps.filter.outputs.all_count }}
|
||||
go: ${{ steps.filter.outputs.go }}
|
||||
ts: ${{ steps.filter.outputs.ts }}
|
||||
k8s: ${{ steps.filter.outputs.k8s }}
|
||||
ci: ${{ steps.filter.outputs.ci }}
|
||||
offlinedocs-only: ${{ steps.filter.outputs.offlinedocs_count == steps.filter.outputs.all_count }}
|
||||
offlinedocs: ${{ steps.filter.outputs.offlinedocs }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Install Go!
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
fetch-depth: 1
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- name: check changed files
|
||||
uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
all:
|
||||
- "**"
|
||||
docs:
|
||||
- "docs/**"
|
||||
- "README.md"
|
||||
- "examples/templates/**"
|
||||
- "examples/web-server/**"
|
||||
- "examples/monitoring/**"
|
||||
- "examples/lima/**"
|
||||
go:
|
||||
- "**.sql"
|
||||
- "**.go"
|
||||
- "**.golden"
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
# Other non-Go files that may affect Go code:
|
||||
- "**.rego"
|
||||
- "**.sh"
|
||||
- "**.tpl"
|
||||
- "**.gotmpl"
|
||||
- "**.gotpl"
|
||||
- "Makefile"
|
||||
- "site/static/error.html"
|
||||
# Main repo directories for completeness in case other files are
|
||||
# touched:
|
||||
- "agent/**"
|
||||
- "cli/**"
|
||||
- "cmd/**"
|
||||
- "coderd/**"
|
||||
- "enterprise/**"
|
||||
- "examples/*"
|
||||
- "provisioner/**"
|
||||
- "provisionerd/**"
|
||||
- "provisionersdk/**"
|
||||
- "pty/**"
|
||||
- "scaletest/**"
|
||||
- "tailnet/**"
|
||||
- "testutil/**"
|
||||
ts:
|
||||
- "site/**"
|
||||
- "Makefile"
|
||||
k8s:
|
||||
- "helm/**"
|
||||
- "scripts/Dockerfile"
|
||||
- "scripts/Dockerfile.base"
|
||||
- "scripts/helm.sh"
|
||||
ci:
|
||||
- ".github/actions/**"
|
||||
- ".github/workflows/ci.yaml"
|
||||
offlinedocs:
|
||||
- "offlinedocs/**"
|
||||
|
||||
# Check for any typos!
|
||||
- id: debug
|
||||
run: |
|
||||
echo "${{ toJSON(steps.filter )}}"
|
||||
|
||||
lint:
|
||||
needs: changes
|
||||
if: needs.changes.outputs.offlinedocs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Get golangci-lint cache dir
|
||||
run: |
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2
|
||||
dir=$(golangci-lint cache status | awk '/Dir/ { print $2 }')
|
||||
echo "LINT_CACHE_DIR=$dir" >> $GITHUB_ENV
|
||||
|
||||
- name: golangci-lint cache
|
||||
uses: buildjet/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ env.LINT_CACHE_DIR }}
|
||||
key: golangci-lint-${{ runner.os }}-${{ hashFiles('**/*.go') }}
|
||||
restore-keys: |
|
||||
golangci-lint-${{ runner.os }}-
|
||||
|
||||
# Check for any typos
|
||||
- name: Check for typos
|
||||
uses: crate-ci/typos@v1.13.14
|
||||
uses: crate-ci/typos@v1.16.1
|
||||
with:
|
||||
config: .github/workflows/typos.toml
|
||||
|
||||
- name: Fix the typos
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
@@ -51,138 +148,43 @@ jobs:
|
||||
cargo install typos-cli
|
||||
typos -c .github/workflows/typos.toml -w"
|
||||
|
||||
# Check for Go linting errors!
|
||||
- name: Lint Go
|
||||
uses: golangci/golangci-lint-action@v3.3.1
|
||||
with:
|
||||
version: v1.51.0
|
||||
|
||||
- name: Lint shell scripts
|
||||
uses: ludeeus/action-shellcheck@2.0.0
|
||||
env:
|
||||
SHELLCHECK_OPTS: --external-sources
|
||||
with:
|
||||
ignore: node_modules
|
||||
|
||||
# Lint our dashboard!
|
||||
- name: Cache node_modules
|
||||
id: cache-node
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
**/node_modules
|
||||
.eslintcache
|
||||
key: js-${{ runner.os }}-test-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
js-${{ runner.os }}-
|
||||
- name: Install node_modules
|
||||
run: ./scripts/yarn_install.sh
|
||||
- name: Lint TypeScript
|
||||
run: yarn lint
|
||||
working-directory: site
|
||||
|
||||
# Make sure the Helm chart is linted!
|
||||
# Needed for helm chart linting
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v3
|
||||
with:
|
||||
version: v3.9.2
|
||||
- name: Lint Helm chart
|
||||
run: |
|
||||
cd helm
|
||||
make lint
|
||||
|
||||
# Ensure AGPL and Enterprise are separated!
|
||||
- name: Check for AGPL code importing Enterprise...
|
||||
run: ./scripts/check_enterprise_imports.sh
|
||||
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docs-only: ${{ steps.filter.outputs.docs_count == steps.filter.outputs.all_count }}
|
||||
sh: ${{ steps.filter.outputs.sh }}
|
||||
ts: ${{ steps.filter.outputs.ts }}
|
||||
k8s: ${{ steps.filter.outputs.k8s }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
# For pull requests it's not necessary to checkout the code
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
all:
|
||||
- '**'
|
||||
docs:
|
||||
- 'docs/**'
|
||||
# For testing:
|
||||
# - '.github/**'
|
||||
sh:
|
||||
- "**.sh"
|
||||
ts:
|
||||
- 'site/**'
|
||||
k8s:
|
||||
- 'helm/**'
|
||||
- scripts/Dockerfile
|
||||
- scripts/Dockerfile.base
|
||||
- scripts/helm.sh
|
||||
- id: debug
|
||||
- name: make lint
|
||||
run: |
|
||||
echo "${{ toJSON(steps.filter )}}"
|
||||
make --output-sync=line -j lint
|
||||
|
||||
gen:
|
||||
timeout-minutes: 8
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.docs-only == 'false'
|
||||
if: needs.changes.outputs.docs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Cache Node
|
||||
id: cache-node
|
||||
uses: actions/cache@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: |
|
||||
**/node_modules
|
||||
.eslintcache
|
||||
key: js-${{ runner.os }}-test-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
js-${{ runner.os }}-
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Install node_modules
|
||||
run: ./scripts/yarn_install.sh
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: go install tools
|
||||
run: |
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Go Build Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOCACHE }}
|
||||
key: ${{ github.job }}-go-build-${{ hashFiles('**/go.sum', '**/**.go') }}
|
||||
|
||||
- name: Go Mod Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOMODCACHE }}
|
||||
key: ${{ github.job }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Install sqlc
|
||||
run: |
|
||||
curl -sSL https://github.com/kyleconroy/sqlc/releases/download/v1.16.0/sqlc_1.16.0_linux_amd64.tar.gz | sudo tar -C /usr/bin -xz sqlc
|
||||
- name: Install protoc-gen-go
|
||||
run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
|
||||
- name: Install protoc-gen-go-drpc
|
||||
run: go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.26
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports@latest
|
||||
- name: Install yq
|
||||
run: go run github.com/mikefarah/yq/v4@v4.30.6
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.33
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
go install github.com/mikefarah/yq/v4@v4.30.6
|
||||
go install github.com/golang/mock/mockgen@v1.6.0
|
||||
|
||||
- name: Install Protoc
|
||||
run: |
|
||||
@@ -204,28 +206,25 @@ jobs:
|
||||
run: ./scripts/check_unstaged.sh
|
||||
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
needs: changes
|
||||
if: needs.changes.outputs.offlinedocs-only == 'false' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
timeout-minutes: 7
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Cache Node
|
||||
id: cache-node
|
||||
uses: actions/cache@v3
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: buildjet/setup-go@v4
|
||||
with:
|
||||
path: |
|
||||
**/node_modules
|
||||
.eslintcache
|
||||
key: js-${{ runner.os }}-test-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
js-${{ runner.os }}-
|
||||
|
||||
- name: Install node_modules
|
||||
run: ./scripts/yarn_install.sh
|
||||
# This doesn't need caching. It's super fast anyways!
|
||||
cache: false
|
||||
go-version: 1.20.6
|
||||
|
||||
- name: Install shfmt
|
||||
run: go install mvdan.cc/sh/v3/cmd/shfmt@v3.5.0
|
||||
@@ -239,54 +238,28 @@ jobs:
|
||||
run: ./scripts/check_unstaged.sh
|
||||
|
||||
test-go:
|
||||
runs-on: ${{ matrix.os == 'ubuntu-latest' && github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || matrix.os == 'windows-2022' && github.repository_owner == 'coder' && 'windows-latest-8-cores'|| matrix.os }}
|
||||
runs-on: ${{ matrix.os == 'ubuntu-latest' && github.repository_owner == 'coder' && 'buildjet-4vcpu-ubuntu-2204' || matrix.os == 'macos-latest' && github.repository_owner == 'coder' && 'macos-latest-xl' || matrix.os == 'windows-2019' && github.repository_owner == 'coder' && 'windows-latest-8-cores' || matrix.os }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 20
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
- windows-2022
|
||||
- windows-2019
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
fetch-depth: 1
|
||||
|
||||
# Sadly the new "set output" syntax (of writing env vars to
|
||||
# $GITHUB_OUTPUT) does not work on both powershell and bash so we use the
|
||||
# deprecated syntax here.
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
run: |
|
||||
echo "::set-output name=GOCACHE::$(go env GOCACHE)"
|
||||
echo "::set-output name=GOMODCACHE::$(go env GOMODCACHE)"
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Go Build Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOCACHE }}
|
||||
key: ${{ runner.os }}-go-build-${{ hashFiles('**/go.**', '**.go') }}
|
||||
|
||||
- name: Go Mod Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOMODCACHE }}
|
||||
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Install gotestsum
|
||||
uses: jaxxstorm/action-install-gh-release@v1.9.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
repo: gotestyourself/gotestsum
|
||||
tag: v1.9.0
|
||||
|
||||
- uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.1.9
|
||||
terraform_wrapper: false
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Test with Mock Database
|
||||
id: test
|
||||
@@ -302,16 +275,29 @@ jobs:
|
||||
echo "cover=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
gotestsum --junitfile="gotests.xml" --packages="./..." -- -parallel=8 -timeout=7m -short -failfast $COVERAGE_FLAGS
|
||||
# By default Go will use the number of logical CPUs, which
|
||||
# is a fine default.
|
||||
PARALLEL_FLAG=""
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
export TS_DEBUG_DISCO=true
|
||||
gotestsum --junitfile="gotests.xml" --jsonfile="gotests.json" \
|
||||
--packages="./..." -- $PARALLEL_FLAG -short -failfast $COVERAGE_FLAGS
|
||||
|
||||
- name: Print test stats
|
||||
if: success() || failure()
|
||||
run: |
|
||||
# Artifacts are not available after rerunning a job,
|
||||
# so we need to print the test stats to the log.
|
||||
go run ./scripts/ci-report/main.go gotests.json | tee gotests_stats.json
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: success() || failure()
|
||||
with:
|
||||
name: gotests-${{ matrix.os }}.xml
|
||||
path: ./gotests.xml
|
||||
retention-days: 30
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
|
||||
- uses: codecov/codecov-action@v3
|
||||
- name: Check code coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
# This action has a tendency to error out unexpectedly, it has
|
||||
# the `fail_ci_if_error` option that defaults to `false`, but
|
||||
# that is no guarantee, see:
|
||||
@@ -323,63 +309,47 @@ jobs:
|
||||
files: ./gotests.coverage
|
||||
flags: unittest-go-${{ matrix.os }}
|
||||
|
||||
test-go-psql:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
test-go-pg:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
# This timeout must be greater than the timeout set by `go test` in
|
||||
# `make test-postgres` to ensure we receive a trace of running
|
||||
# goroutines. Setting this to the timeout +5m should work quite well
|
||||
# even if some of the preceding steps are slow.
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
run: |
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Go Build Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOCACHE }}
|
||||
key: ${{ runner.os }}-go-build-${{ hashFiles('**/go.sum', '**/**.go') }}
|
||||
|
||||
- name: Go Mod Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOMODCACHE }}
|
||||
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Install gotestsum
|
||||
uses: jaxxstorm/action-install-gh-release@v1.9.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
repo: gotestyourself/gotestsum
|
||||
tag: v1.9.0
|
||||
|
||||
- uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.1.9
|
||||
terraform_wrapper: false
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Test with PostgreSQL Database
|
||||
run: |
|
||||
export TS_DEBUG_DISCO=true
|
||||
make test-postgres
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- name: Print test stats
|
||||
if: success() || failure()
|
||||
run: |
|
||||
# Artifacts are not available after rerunning a job,
|
||||
# so we need to print the test stats to the log.
|
||||
go run ./scripts/ci-report/main.go gotests.json | tee gotests_stats.json
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: success() || failure()
|
||||
with:
|
||||
name: gotests-postgres.xml
|
||||
path: ./gotests.xml
|
||||
retention-days: 30
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
|
||||
- uses: codecov/codecov-action@v3
|
||||
- name: Check code coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
# This action has a tendency to error out unexpectedly, it has
|
||||
# the `fail_ci_if_error` option that defaults to `false`, but
|
||||
# that is no guarantee, see:
|
||||
@@ -391,9 +361,36 @@ jobs:
|
||||
files: ./gotests.coverage
|
||||
flags: unittest-go-postgres-linux
|
||||
|
||||
test-go-race:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
gotestsum --junitfile="gotests.xml" -- -race ./...
|
||||
|
||||
- name: Upload test stats to Datadog
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: always()
|
||||
with:
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
|
||||
deploy:
|
||||
name: "deploy"
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
timeout-minutes: 30
|
||||
needs: changes
|
||||
if: |
|
||||
@@ -403,7 +400,8 @@ jobs:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -416,38 +414,11 @@ jobs:
|
||||
- name: Set up Google Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v1
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
run: |
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Go Build Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOCACHE }}
|
||||
key: ${{ runner.os }}-release-go-build-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Go Mod Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOMODCACHE }}
|
||||
key: ${{ runner.os }}-release-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Cache Node
|
||||
id: cache-node
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
**/node_modules
|
||||
.eslintcache
|
||||
key: js-${{ runner.os }}-release-node-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
js-${{ runner.os }}-
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports@latest
|
||||
@@ -470,16 +441,39 @@ jobs:
|
||||
|
||||
- name: Install Release
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
regions=(
|
||||
# gcp-region-id instance-name systemd-service-name
|
||||
"us-central1-a coder coder"
|
||||
"australia-southeast1-b coder-sydney coder-workspace-proxy"
|
||||
"europe-west3-c coder-europe coder-workspace-proxy"
|
||||
"southamerica-east1-b coder-brazil coder-workspace-proxy"
|
||||
)
|
||||
|
||||
deb_pkg="./build/coder_$(./scripts/version.sh)_linux_amd64.deb"
|
||||
if [ ! -f "$deb_pkg" ]; then
|
||||
echo "deb package not found: $deb_pkg"
|
||||
ls -l ./build
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gcloud config set project coder-dogfood
|
||||
gcloud config set compute/zone us-central1-a
|
||||
gcloud compute scp ./build/coder_*_linux_amd64.deb coder:/tmp/coder.deb
|
||||
gcloud compute ssh coder -- sudo dpkg -i --force-confdef /tmp/coder.deb
|
||||
gcloud compute ssh coder -- sudo systemctl daemon-reload
|
||||
for region in "${regions[@]}"; do
|
||||
echo "::group::$region"
|
||||
set -- $region
|
||||
|
||||
- name: Start
|
||||
run: gcloud compute ssh coder -- sudo service coder restart
|
||||
set -x
|
||||
gcloud config set compute/zone "$1"
|
||||
gcloud compute scp "$deb_pkg" "${2}:/tmp/coder.deb"
|
||||
gcloud compute ssh "$2" -- /bin/sh -c "set -eux; sudo dpkg -i --force-confdef /tmp/coder.deb; sudo systemctl daemon-reload; sudo service '$3' restart"
|
||||
set +x
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
echo "::endgroup::"
|
||||
done
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coder
|
||||
path: |
|
||||
@@ -489,33 +483,24 @@ jobs:
|
||||
retention-days: 7
|
||||
|
||||
test-js:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Cache Node
|
||||
id: cache-node
|
||||
uses: actions/cache@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: |
|
||||
**/node_modules
|
||||
.eslintcache
|
||||
key: js-${{ runner.os }}-test-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
js-${{ runner.os }}-
|
||||
fetch-depth: 1
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "16.16.0"
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Install node_modules
|
||||
run: ./scripts/yarn_install.sh
|
||||
|
||||
- run: yarn test:ci --max-workers ${{ steps.cpu-cores.outputs.count }}
|
||||
- run: pnpm test:ci --max-workers $(nproc)
|
||||
working-directory: site
|
||||
|
||||
- uses: codecov/codecov-action@v3
|
||||
- name: Check code coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
# This action has a tendency to error out unexpectedly, it has
|
||||
# the `fail_ci_if_error` option that defaults to `false`, but
|
||||
# that is no guarantee, see:
|
||||
@@ -528,63 +513,33 @@ jobs:
|
||||
flags: unittest-js
|
||||
|
||||
test-e2e:
|
||||
needs:
|
||||
- changes
|
||||
if: needs.changes.outputs.docs-only == 'false'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
needs: changes
|
||||
if: needs.changes.outputs.go == 'true' || needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true' || github.ref == 'refs/heads/main'
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Cache Node
|
||||
id: cache-node
|
||||
uses: actions/cache@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: |
|
||||
**/node_modules
|
||||
.eslintcache
|
||||
key: js-${{ runner.os }}-e2e-${{ hashFiles('**/yarn.lock') }}
|
||||
fetch-depth: 1
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- uses: hashicorp/setup-terraform@v2
|
||||
with:
|
||||
terraform_version: 1.1.9
|
||||
terraform_wrapper: false
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "16.16.0"
|
||||
|
||||
- name: Echo Go Cache Paths
|
||||
id: go-cache-paths
|
||||
run: |
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Go Build Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOCACHE }}
|
||||
key: ${{ runner.os }}-go-build-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Go Mod Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOMODCACHE }}
|
||||
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
sudo npm install -g prettier
|
||||
make -B site/out/index.html
|
||||
|
||||
- run: yarn playwright:install
|
||||
- run: pnpm playwright:install
|
||||
working-directory: site
|
||||
|
||||
- run: yarn playwright:test
|
||||
- run: pnpm playwright:test
|
||||
env:
|
||||
DEBUG: pw:api
|
||||
working-directory: site
|
||||
@@ -600,22 +555,18 @@ jobs:
|
||||
chromatic:
|
||||
# REMARK: this is only used to build storybook and deploy it to Chromatic.
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- changes
|
||||
if: needs.changes.outputs.ts == 'true'
|
||||
needs: changes
|
||||
if: needs.changes.outputs.ts == 'true' || needs.changes.outputs.ci == 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# Required by Chromatic for build-over-build history, otherwise we
|
||||
# only get 1 commit on shallow checkout.
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "16.16.0"
|
||||
|
||||
- name: Install dependencies
|
||||
run: cd site && yarn
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
# This step is not meant for mainline because any detected changes to
|
||||
# storybook snapshots will require manual approval/review in order for
|
||||
@@ -623,13 +574,23 @@ jobs:
|
||||
- name: Publish to Chromatic (non-mainline)
|
||||
if: github.ref != 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@v1
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
with:
|
||||
buildScriptName: "storybook:build"
|
||||
exitOnceUploaded: true
|
||||
# This will prevent CI from failing when Chromatic detects visual changes
|
||||
exitZeroOnChanges: true
|
||||
# Chromatic states its fine to make this token public. See:
|
||||
# https://www.chromatic.com/docs/github-actions#forked-repositories
|
||||
projectToken: 695c25b6cb65
|
||||
workingDir: "./site"
|
||||
# Prevent excessive build runs on minor version changes
|
||||
skip: "@(renovate/**|dependabot/**)"
|
||||
# Run TurboSnap to trace file dependencies to related stories
|
||||
# and tell chromatic to only take snapshots of relevent stories
|
||||
onlyChanged: true
|
||||
|
||||
# This is a separate step for mainline only that auto accepts and changes
|
||||
# instead of holding CI up. Since we squash/merge, this is defensive to
|
||||
@@ -640,8 +601,140 @@ jobs:
|
||||
- name: Publish to Chromatic (mainline)
|
||||
if: github.ref == 'refs/heads/main' && github.repository_owner == 'coder'
|
||||
uses: chromaui/action@v1
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
STORYBOOK: true
|
||||
with:
|
||||
autoAcceptChanges: true
|
||||
# This will prevent CI from failing when Chromatic detects visual changes
|
||||
exitZeroOnChanges: true
|
||||
buildScriptName: "storybook:build"
|
||||
projectToken: 695c25b6cb65
|
||||
workingDir: "./site"
|
||||
# Run TurboSnap to trace file dependencies to related stories
|
||||
# and tell chromatic to only take snapshots of relevent stories
|
||||
onlyChanged: true
|
||||
|
||||
offlinedocs:
|
||||
name: offlinedocs
|
||||
needs: changes
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
if: needs.changes.outputs.offlinedocs == 'true' || needs.changes.outputs.ci == 'true'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
# 0 is required here for version.sh to work.
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
with:
|
||||
directory: offlinedocs
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Install go tools
|
||||
run: |
|
||||
go install github.com/golang/mock/mockgen@v1.6.0
|
||||
|
||||
- name: Setup sqlc
|
||||
uses: sqlc-dev/setup-sqlc@v3
|
||||
with:
|
||||
sqlc-version: "1.19.1"
|
||||
|
||||
- name: Format
|
||||
run: |
|
||||
cd offlinedocs
|
||||
pnpm format:check
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
cd offlinedocs
|
||||
pnpm lint
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make -j build/coder_docs_"$(./scripts/version.sh)".tgz
|
||||
|
||||
required:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- fmt
|
||||
- lint
|
||||
- gen
|
||||
- test-go
|
||||
- test-go-pg
|
||||
- test-go-race
|
||||
- test-js
|
||||
- offlinedocs
|
||||
# Allow this job to run even if the needed jobs fail, are skipped or
|
||||
# cancelled.
|
||||
if: always()
|
||||
steps:
|
||||
- name: Ensure required checks
|
||||
run: |
|
||||
echo "Checking required checks"
|
||||
echo "- fmt: ${{ needs.fmt.result }}"
|
||||
echo "- lint: ${{ needs.lint.result }}"
|
||||
echo "- gen: ${{ needs.gen.result }}"
|
||||
echo "- test-go: ${{ needs.test-go.result }}"
|
||||
echo "- test-go-pg: ${{ needs.test-go-pg.result }}"
|
||||
echo "- test-go-race: ${{ needs.test-go-race.result }}"
|
||||
echo "- test-js: ${{ needs.test-js.result }}"
|
||||
echo
|
||||
|
||||
# We allow skipped jobs to pass, but not failed or cancelled jobs.
|
||||
if [[ "${{ contains(needs.*.result, 'failure') }}" == "true" || "${{ contains(needs.*.result, 'cancelled') }}" == "true" ]]; then
|
||||
echo "One of the required checks has failed or has been cancelled"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Required checks have passed"
|
||||
|
||||
build-main-image:
|
||||
# This build and publihes ghcr.io/coder/coder-preview:main for each merge commit to main branch.
|
||||
# We are only building this for amd64 plateform. (>95% pulls are for amd64)
|
||||
needs: changes
|
||||
if: github.ref == 'refs/heads/main' && needs.changes.outputs.docs-only == 'false'
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Linux amd64 Docker image
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
go mod download
|
||||
make gen/mark-fresh
|
||||
export DOCKER_IMAGE_NO_PREREQUISITES=true
|
||||
version="$(./scripts/version.sh)"
|
||||
export CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
make -j build/coder_linux_amd64
|
||||
./scripts/build_docker.sh \
|
||||
--arch amd64 \
|
||||
--target ghcr.io/coder/coder-preview:main \
|
||||
--version $version \
|
||||
--push \
|
||||
build/coder_linux_amd64
|
||||
|
||||
@@ -25,7 +25,8 @@ jobs:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: hmarr/auto-approve-action@v3
|
||||
- name: auto-approve dependabot
|
||||
uses: hmarr/auto-approve-action@v3
|
||||
if: github.actor == 'dependabot[bot]'
|
||||
|
||||
cla:
|
||||
@@ -47,25 +48,13 @@ jobs:
|
||||
branch: "main"
|
||||
allowlist: dependabot*
|
||||
|
||||
title:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request_target'
|
||||
steps:
|
||||
- name: Validate PR title
|
||||
uses: amannn/action-semantic-pull-request@v5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
requireScope: false
|
||||
|
||||
release-labels:
|
||||
runs-on: ubuntu-latest
|
||||
# Depend on lint so that title is Conventional Commits-compatible.
|
||||
needs: [title]
|
||||
# Skip tagging for draft PRs.
|
||||
if: ${{ github.event_name == 'pull_request_target' && success() && !github.event.pull_request.draft }}
|
||||
steps:
|
||||
- uses: actions/github-script@v6
|
||||
- name: release-labels
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
# This script ensures PR title and labels are in sync:
|
||||
#
|
||||
|
||||
@@ -31,7 +31,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'coder'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Docker login
|
||||
uses: docker/login-action@v2
|
||||
|
||||
@@ -6,18 +6,21 @@ on:
|
||||
- main
|
||||
paths:
|
||||
- "dogfood/**"
|
||||
pull_request:
|
||||
paths:
|
||||
- "dogfood/**"
|
||||
- ".github/workflows/dogfood.yaml"
|
||||
# Uncomment these lines when testing with CI.
|
||||
# pull_request:
|
||||
# paths:
|
||||
# - "dogfood/**"
|
||||
# - ".github/workflows/dogfood.yaml"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy_image:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
steps:
|
||||
- name: Get branch name
|
||||
id: branch-name
|
||||
uses: tj-actions/branch-names@v6.4
|
||||
uses: tj-actions/branch-names@v6.5
|
||||
|
||||
- name: "Branch name to Docker tag name"
|
||||
id: docker-tag-name
|
||||
@@ -43,26 +46,35 @@ jobs:
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: "{{defaultContext}}:dogfood"
|
||||
pull: true
|
||||
push: true
|
||||
tags: "codercom/oss-dogfood:${{ steps.docker-tag-name.outputs.tag }},codercom/oss-dogfood:latest"
|
||||
cache-from: type=registry,ref=codercom/oss-dogfood:latest
|
||||
cache-to: type=inline
|
||||
|
||||
deploy_template:
|
||||
needs: deploy_image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get short commit SHA
|
||||
id: vars
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
- name: "Install latest Coder"
|
||||
|
||||
- name: Get latest commit title
|
||||
id: message
|
||||
run: echo "pr_title=$(git log --format=%s -n 1 ${{ github.sha }})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Get latest Coder binary from the server"
|
||||
run: |
|
||||
curl -L https://coder.com/install.sh | sh
|
||||
# env:
|
||||
# VERSION: 0.x
|
||||
curl -fsSL "https://dev.coder.com/bin/coder-linux-amd64" -o "./coder"
|
||||
chmod +x "./coder"
|
||||
|
||||
- name: "Push template"
|
||||
run: |
|
||||
coder templates push $CODER_TEMPLATE_NAME --directory $CODER_TEMPLATE_DIR --yes --name=$CODER_TEMPLATE_VERSION
|
||||
./coder templates push $CODER_TEMPLATE_NAME --directory $CODER_TEMPLATE_DIR --yes --name=$CODER_TEMPLATE_VERSION --message="$CODER_TEMPLATE_MESSAGE"
|
||||
env:
|
||||
# Consumed by Coder CLI
|
||||
CODER_URL: https://dev.coder.com
|
||||
@@ -71,3 +83,4 @@ jobs:
|
||||
CODER_TEMPLATE_NAME: ${{ secrets.CODER_TEMPLATE_NAME }}
|
||||
CODER_TEMPLATE_VERSION: ${{ steps.vars.outputs.sha_short }}
|
||||
CODER_TEMPLATE_DIR: ./dogfood
|
||||
CODER_TEMPLATE_MESSAGE: ${{ steps.message.outputs.pr_title }}
|
||||
|
||||
@@ -18,5 +18,6 @@
|
||||
{
|
||||
"pattern": "tailscale.com"
|
||||
}
|
||||
]
|
||||
],
|
||||
"aliveStatusCodes": [200, 0]
|
||||
}
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
# The nightly-gauntlet runs tests that are either too flaky or too slow to block
|
||||
# every PR.
|
||||
name: nightly-gauntlet
|
||||
on:
|
||||
schedule:
|
||||
# Every day at midnight
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
go-race:
|
||||
# While GitHub's toaster runners are likelier to flake, we want consistency
|
||||
# between this environment and the regular test environment for DataDog
|
||||
# statistics and to only show real workflow threats.
|
||||
runs-on: "buildjet-8vcpu-ubuntu-2204"
|
||||
# This runner costs 0.016 USD per minute,
|
||||
# so 0.016 * 240 = 3.84 USD per run.
|
||||
timeout-minutes: 240
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: ./.github/actions/setup-tf
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
# -race is likeliest to catch flaky tests
|
||||
# due to correctness detection and its performance
|
||||
# impact.
|
||||
gotestsum --junitfile="gotests.xml" -- -timeout=240m -count=10 -race ./...
|
||||
|
||||
- name: Upload test results to DataDog
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: always()
|
||||
with:
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
|
||||
go-timing:
|
||||
# We run these tests with p=1 so we don't need a lot of compute.
|
||||
runs-on: "buildjet-2vcpu-ubuntu-2204"
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
gotestsum --junitfile="gotests.xml" -- --tags="timing" -p=1 -run='_Timing/' ./...
|
||||
|
||||
- name: Upload test results to DataDog
|
||||
uses: ./.github/actions/upload-datadog
|
||||
if: always()
|
||||
with:
|
||||
api-key: ${{ secrets.DATADOG_API_KEY }}
|
||||
@@ -13,4 +13,5 @@ jobs:
|
||||
assign-author:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: toshimaru/auto-author-assign@v1.6.2
|
||||
- name: Assign author
|
||||
uses: toshimaru/auto-author-assign@v1.6.2
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
name: Cleanup PR deployment and image
|
||||
on:
|
||||
pull_request:
|
||||
types: closed
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: "PR number"
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
cleanup:
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Get PR number
|
||||
id: pr_number
|
||||
run: |
|
||||
if [ -n "${{ github.event.pull_request.number }}" ]; then
|
||||
echo "PR_NUMBER=${{ github.event.pull_request.number }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "PR_NUMBER=${{ github.event.inputs.pr_number }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Delete image
|
||||
continue-on-error: true
|
||||
uses: bots-house/ghcr-delete-image-action@v1.1.0
|
||||
with:
|
||||
owner: coder
|
||||
name: coder-preview
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
tag: pr${{ steps.pr_number.outputs.PR_NUMBER }}
|
||||
|
||||
- name: Set up kubeconfig
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
mkdir -p ~/.kube
|
||||
echo "${{ secrets.PR_DEPLOYMENTS_KUBECONFIG }}" > ~/.kube/config
|
||||
export KUBECONFIG=~/.kube/config
|
||||
|
||||
- name: Delete helm release
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
helm delete --namespace "pr${{ steps.pr_number.outputs.PR_NUMBER }}" "pr${{ steps.pr_number.outputs.PR_NUMBER }}" || echo "helm release not found"
|
||||
|
||||
- name: "Remove PR namespace"
|
||||
run: |
|
||||
kubectl delete namespace "pr${{ steps.pr_number.outputs.PR_NUMBER }}" || echo "namespace not found"
|
||||
|
||||
- name: "Remove DNS records"
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
# Get identifier for the record
|
||||
record_id=$(curl -X GET "https://api.cloudflare.com/client/v4/zones/${{ secrets.PR_DEPLOYMENTS_ZONE_ID }}/dns_records?name=%2A.pr${{ steps.pr_number.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" | jq -r '.result[0].id') || echo "DNS record not found"
|
||||
|
||||
echo "::add-mask::$record_id"
|
||||
|
||||
# Delete the record
|
||||
(
|
||||
curl -X DELETE "https://api.cloudflare.com/client/v4/zones/${{ secrets.PR_DEPLOYMENTS_ZONE_ID }}/dns_records/$record_id" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" | jq -r '.success'
|
||||
) || echo "DNS record not found"
|
||||
|
||||
- name: "Delete certificate"
|
||||
if: ${{ github.event.pull_request.merged == true }}
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
kubectl delete certificate "pr${{ steps.pr_number.outputs.PR_NUMBER }}-tls" -n pr-deployment-certs || echo "certificate not found"
|
||||
@@ -0,0 +1,494 @@
|
||||
# This action will trigger when
|
||||
# 1. when the workflow is manually triggered
|
||||
# 2. ./scripts/deploy_pr.sh is run locally
|
||||
# 3. when a PR is updated
|
||||
name: Deploy PR
|
||||
on:
|
||||
pull_request:
|
||||
types: synchronize
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: "PR number"
|
||||
type: number
|
||||
required: true
|
||||
skip_build:
|
||||
description: "Skip build job"
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
experiments:
|
||||
description: "Experiments to enable"
|
||||
required: false
|
||||
type: string
|
||||
default: "*"
|
||||
|
||||
env:
|
||||
REPO: ghcr.io/coder/coder-preview
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
pull-requests: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-PR-${{ github.event.pull_request.number || github.event.inputs.pr_number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
get_info:
|
||||
if: github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request'
|
||||
outputs:
|
||||
PR_NUMBER: ${{ steps.pr_info.outputs.PR_NUMBER }}
|
||||
PR_TITLE: ${{ steps.pr_info.outputs.PR_TITLE }}
|
||||
PR_URL: ${{ steps.pr_info.outputs.PR_URL }}
|
||||
PR_BRANCH: ${{ steps.pr_info.outputs.PR_BRANCH }}
|
||||
CODER_BASE_IMAGE_TAG: ${{ steps.set_tags.outputs.CODER_BASE_IMAGE_TAG }}
|
||||
CODER_IMAGE_TAG: ${{ steps.set_tags.outputs.CODER_IMAGE_TAG }}
|
||||
NEW: ${{ steps.check_deployment.outputs.new }}
|
||||
BUILD: ${{ steps.filter.outputs.all_count > steps.filter.outputs.ignored_count || steps.check_deployment.outputs.new }}
|
||||
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Get PR number, title, and branch name
|
||||
id: pr_info
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
PR_NUMBER=${{ github.event.inputs.pr_number || github.event.pull_request.number }}
|
||||
PR_TITLE=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/repos/coder/coder/pulls/$PR_NUMBER | jq -r '.title')
|
||||
PR_BRANCH=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/repos/coder/coder/pulls/$PR_NUMBER | jq -r '.head.ref')
|
||||
echo "PR_URL=https://github.com/coder/coder/pull/$PR_NUMBER" >> $GITHUB_OUTPUT
|
||||
echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_OUTPUT
|
||||
echo "PR_TITLE=$PR_TITLE" >> $GITHUB_OUTPUT
|
||||
echo "PR_BRANCH=$PR_BRANCH" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set required tags
|
||||
id: set_tags
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
echo "CODER_BASE_IMAGE_TAG=$CODER_BASE_IMAGE_TAG" >> $GITHUB_OUTPUT
|
||||
echo "CODER_IMAGE_TAG=$CODER_IMAGE_TAG" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
CODER_BASE_IMAGE_TAG: ghcr.io/coder/coder-preview-base:pr${{ steps.pr_info.outputs.PR_NUMBER }}
|
||||
CODER_IMAGE_TAG: ghcr.io/coder/coder-preview:pr${{ steps.pr_info.outputs.PR_NUMBER }}
|
||||
|
||||
- name: Set up kubeconfig
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
mkdir -p ~/.kube
|
||||
echo "${{ secrets.PR_DEPLOYMENTS_KUBECONFIG }}" > ~/.kube/config
|
||||
export KUBECONFIG=~/.kube/config
|
||||
|
||||
- name: Check if the helm deployment already exists
|
||||
id: check_deployment
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
if helm status "pr${{ steps.pr_info.outputs.PR_NUMBER }}" --namespace "pr${{ steps.pr_info.outputs.PR_NUMBER }}" > /dev/null 2>&1; then
|
||||
echo "Deployment already exists. Skipping deployment."
|
||||
new=false
|
||||
else
|
||||
echo "Deployment doesn't exist."
|
||||
new=true
|
||||
fi
|
||||
echo "new=$new" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@v2
|
||||
if: github.event_name == 'workflow_dispatch' || steps.check_deployment.outputs.NEW == 'false'
|
||||
id: fc
|
||||
with:
|
||||
issue-number: ${{ steps.pr_info.outputs.PR_NUMBER }}
|
||||
comment-author: "github-actions[bot]"
|
||||
body-includes: ":rocket:"
|
||||
direction: last
|
||||
|
||||
- name: Comment on PR
|
||||
id: comment_id
|
||||
if: github.event_name == 'workflow_dispatch' || steps.check_deployment.outputs.NEW == 'false'
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
comment-id: ${{ steps.fc.outputs.comment-id }}
|
||||
issue-number: ${{ steps.pr_info.outputs.PR_NUMBER }}
|
||||
edit-mode: replace
|
||||
body: |
|
||||
---
|
||||
:rocket: Deploying PR ${{ steps.pr_info.outputs.PR_NUMBER }} ...
|
||||
---
|
||||
reactions: eyes
|
||||
reactions-edit-mode: replace
|
||||
|
||||
- name: Checkout
|
||||
if: github.event_name == 'workflow_dispatch' || steps.check_deployment.outputs.NEW == 'false'
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ steps.pr_info.outputs.PR_BRANCH }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check changed files
|
||||
if: github.event_name == 'workflow_dispatch' || steps.check_deployment.outputs.NEW == 'false'
|
||||
uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
all:
|
||||
- "**"
|
||||
ignored:
|
||||
- "docs/**"
|
||||
- "README.md"
|
||||
- "examples/web-server/**"
|
||||
- "examples/monitoring/**"
|
||||
- "examples/lima/**"
|
||||
- ".github/**"
|
||||
- "offlinedocs/**"
|
||||
- ".devcontainer/**"
|
||||
- "helm/**"
|
||||
- "*[^g][^o][^.][^s][^u][^m]*"
|
||||
- "*[^g][^o][^.][^m][^o][^d]*"
|
||||
- "*[^M][^a][^k][^e][^f][^i][^l][^e]*"
|
||||
- "scripts/**/*[^D][^o][^c][^k][^e][^r][^f][^i][^l][^e]*"
|
||||
- "scripts/**/*[^D][^o][^c][^k][^e][^r][^f][^i][^l][^e][.][b][^a][^s][^e]*"
|
||||
|
||||
- name: Print number of changed files
|
||||
if: github.event_name == 'workflow_dispatch' || steps.check_deployment.outputs.NEW == 'false'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
echo "Total number of changed files: ${{ steps.filter.outputs.all_count }}"
|
||||
echo "Number of ignored files: ${{ steps.filter.outputs.ignored_count }}"
|
||||
|
||||
build:
|
||||
needs: get_info
|
||||
# Skips the build job if the workflow was triggered by a workflow_dispatch event and the skip_build input is set to true
|
||||
# or if the workflow was triggered by an issue_comment event and the comment body contains --skip-build
|
||||
# always run the build job if a pull_request event triggered the workflow
|
||||
if: |
|
||||
(github.event_name == 'workflow_dispatch' && github.event.inputs.skip_build == 'false') ||
|
||||
(github.event_name == 'pull_request' && needs.get_info.result == 'success' && needs.get_info.outputs.NEW == 'false')
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
env:
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
|
||||
PR_NUMBER: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
PR_BRANCH: ${{ needs.get_info.outputs.PR_BRANCH }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ env.PR_BRANCH }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node
|
||||
if: needs.get_info.outputs.BUILD == 'true'
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Setup Go
|
||||
if: needs.get_info.outputs.BUILD == 'true'
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Setup sqlc
|
||||
if: needs.get_info.outputs.BUILD == 'true'
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: GHCR Login
|
||||
if: needs.get_info.outputs.BUILD == 'true'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Linux amd64 Docker image
|
||||
if: needs.get_info.outputs.BUILD == 'true'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
go mod download
|
||||
make gen/mark-fresh
|
||||
export DOCKER_IMAGE_NO_PREREQUISITES=true
|
||||
version="$(./scripts/version.sh)"
|
||||
export CODER_IMAGE_BUILD_BASE_TAG="$(CODER_IMAGE_BASE=coder-base ./scripts/image_tag.sh --version "$version")"
|
||||
make -j build/coder_linux_amd64
|
||||
./scripts/build_docker.sh \
|
||||
--arch amd64 \
|
||||
--target ${{ env.CODER_IMAGE_TAG }} \
|
||||
--version $version \
|
||||
--push \
|
||||
build/coder_linux_amd64
|
||||
|
||||
deploy:
|
||||
needs: [build, get_info]
|
||||
# Run deploy job only if build job was successful or skipped
|
||||
if: |
|
||||
always() && (needs.build.result == 'success' || needs.build.result == 'skipped') &&
|
||||
(github.event_name == 'workflow_dispatch' || needs.get_info.outputs.NEW == 'false')
|
||||
runs-on: "ubuntu-latest"
|
||||
env:
|
||||
CODER_IMAGE_TAG: ${{ needs.get_info.outputs.CODER_IMAGE_TAG }}
|
||||
PR_NUMBER: ${{ needs.get_info.outputs.PR_NUMBER }}
|
||||
PR_TITLE: ${{ needs.get_info.outputs.PR_TITLE }}
|
||||
PR_URL: ${{ needs.get_info.outputs.PR_URL }}
|
||||
PR_BRANCH: ${{ needs.get_info.outputs.PR_BRANCH }}
|
||||
PR_DEPLOYMENT_ACCESS_URL: "pr${{ needs.get_info.outputs.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
|
||||
steps:
|
||||
- name: Set up kubeconfig
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
mkdir -p ~/.kube
|
||||
echo "${{ secrets.PR_DEPLOYMENTS_KUBECONFIG }}" > ~/.kube/config
|
||||
export KUBECONFIG=~/.kube/config
|
||||
|
||||
- name: Check if image exists
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
foundTag=$(curl -fsSL https://github.com/coder/coder/pkgs/container/coder-preview | grep -o ${{ env.CODER_IMAGE_TAG }} | head -n 1)
|
||||
if [ -z "$foundTag" ]; then
|
||||
echo "Image not found"
|
||||
echo "${{ env.CODER_IMAGE_TAG }} not found in ghcr.io/coder/coder-preview"
|
||||
echo "Please remove --skip-build from the comment and try again"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Add DNS record to Cloudflare
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
run: |
|
||||
curl -X POST "https://api.cloudflare.com/client/v4/zones/${{ secrets.PR_DEPLOYMENTS_ZONE_ID }}/dns_records" \
|
||||
-H "Authorization: Bearer ${{ secrets.PR_DEPLOYMENTS_CLOUDFLARE_API_TOKEN }}" \
|
||||
-H "Content-Type:application/json" \
|
||||
--data '{"type":"CNAME","name":"*.${{ env.PR_DEPLOYMENT_ACCESS_URL }}","content":"${{ env.PR_DEPLOYMENT_ACCESS_URL }}","ttl":1,"proxied":false}'
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ env.PR_BRANCH }}
|
||||
|
||||
- name: Create PR namespace
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
# try to delete the namespace, but don't fail if it doesn't exist
|
||||
kubectl delete namespace "pr${{ env.PR_NUMBER }}" || true
|
||||
kubectl create namespace "pr${{ env.PR_NUMBER }}"
|
||||
|
||||
- name: Check and Create Certificate
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
run: |
|
||||
# Using kubectl to check if a Certificate resource already exists
|
||||
# we are doing this to avoid letsenrypt rate limits
|
||||
if ! kubectl get certificate pr${{ env.PR_NUMBER }}-tls -n pr-deployment-certs > /dev/null 2>&1; then
|
||||
echo "Certificate doesn't exist. Creating a new one."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: pr${{ env.PR_NUMBER }}-tls
|
||||
namespace: pr-deployment-certs
|
||||
spec:
|
||||
secretName: pr${{ env.PR_NUMBER }}-tls
|
||||
issuerRef:
|
||||
name: letsencrypt
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- "${{ env.PR_DEPLOYMENT_ACCESS_URL }}"
|
||||
- "*.${{ env.PR_DEPLOYMENT_ACCESS_URL }}"
|
||||
EOF
|
||||
else
|
||||
echo "Certificate exists. Skipping certificate creation."
|
||||
fi
|
||||
echo "Copy certificate from pr-deployment-certs to pr${{ env.PR_NUMBER }} namespace"
|
||||
until kubectl get secret pr${{ env.PR_NUMBER }}-tls -n pr-deployment-certs &> /dev/null
|
||||
do
|
||||
echo "Waiting for secret pr${{ env.PR_NUMBER }}-tls to be created..."
|
||||
sleep 5
|
||||
done
|
||||
(
|
||||
kubectl get secret pr${{ env.PR_NUMBER }}-tls -n pr-deployment-certs -o json |
|
||||
jq 'del(.metadata.namespace,.metadata.creationTimestamp,.metadata.resourceVersion,.metadata.selfLink,.metadata.uid,.metadata.managedFields)' |
|
||||
kubectl -n pr${{ env.PR_NUMBER }} apply -f -
|
||||
)
|
||||
|
||||
- name: Set up PostgreSQL database
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
run: |
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
helm install coder-db bitnami/postgresql \
|
||||
--namespace pr${{ env.PR_NUMBER }} \
|
||||
--set auth.username=coder \
|
||||
--set auth.password=coder \
|
||||
--set auth.database=coder \
|
||||
--set persistence.size=10Gi
|
||||
kubectl create secret generic coder-db-url -n pr${{ env.PR_NUMBER }} \
|
||||
--from-literal=url="postgres://coder:coder@coder-db-postgresql.pr${{ env.PR_NUMBER }}.svc.cluster.local:5432/coder?sslmode=disable"
|
||||
|
||||
- name: Create values.yaml
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
cat <<EOF > pr-deploy-values.yaml
|
||||
coder:
|
||||
image:
|
||||
repo: ${{ env.REPO }}
|
||||
tag: pr${{ env.PR_NUMBER }}
|
||||
pullPolicy: Always
|
||||
service:
|
||||
type: ClusterIP
|
||||
ingress:
|
||||
enable: true
|
||||
className: traefik
|
||||
host: ${{ env.PR_DEPLOYMENT_ACCESS_URL }}
|
||||
wildcardHost: "*.${{ env.PR_DEPLOYMENT_ACCESS_URL }}"
|
||||
tls:
|
||||
enable: true
|
||||
secretName: pr${{ env.PR_NUMBER }}-tls
|
||||
wildcardSecretName: pr${{ env.PR_NUMBER }}-tls
|
||||
env:
|
||||
- name: "CODER_ACCESS_URL"
|
||||
value: "https://${{ env.PR_DEPLOYMENT_ACCESS_URL }}"
|
||||
- name: "CODER_WILDCARD_ACCESS_URL"
|
||||
value: "*.${{ env.PR_DEPLOYMENT_ACCESS_URL }}"
|
||||
- name: "CODER_EXPERIMENTS"
|
||||
value: "${{ github.event.inputs.experiments }}"
|
||||
- name: CODER_PG_CONNECTION_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: coder-db-url
|
||||
key: url
|
||||
- name: "CODER_OAUTH2_GITHUB_ALLOW_SIGNUPS"
|
||||
value: "true"
|
||||
- name: "CODER_OAUTH2_GITHUB_CLIENT_ID"
|
||||
value: "${{ secrets.PR_DEPLOYMENTS_GITHUB_OAUTH_CLIENT_ID }}"
|
||||
- name: "CODER_OAUTH2_GITHUB_CLIENT_SECRET"
|
||||
value: "${{ secrets.PR_DEPLOYMENTS_GITHUB_OAUTH_CLIENT_SECRET }}"
|
||||
- name: "CODER_OAUTH2_GITHUB_ALLOWED_ORGS"
|
||||
value: "coder"
|
||||
EOF
|
||||
|
||||
- name: Install/Upgrade Helm chart
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
if [[ ${{ github.event_name }} == "workflow_dispatch" ]]; then
|
||||
helm upgrade --install "pr${{ env.PR_NUMBER }}" ./helm \
|
||||
--namespace "pr${{ env.PR_NUMBER }}" \
|
||||
--values ./pr-deploy-values.yaml \
|
||||
--force
|
||||
else
|
||||
if [[ ${{ needs.get_info.outputs.BUILD }} == "true" ]]; then
|
||||
helm upgrade --install "pr${{ env.PR_NUMBER }}" ./helm \
|
||||
--namespace "pr${{ env.PR_NUMBER }}" \
|
||||
--reuse-values \
|
||||
--force
|
||||
else
|
||||
echo "Skipping helm upgrade, as there is no new image to deploy"
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Install coder-logstream-kube
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
run: |
|
||||
helm repo add coder-logstream-kube https://helm.coder.com/logstream-kube
|
||||
helm upgrade --install coder-logstream-kube coder-logstream-kube/coder-logstream-kube \
|
||||
--namespace "pr${{ env.PR_NUMBER }}" \
|
||||
--set url="https://pr${{ env.PR_NUMBER }}.${{ secrets.PR_DEPLOYMENTS_DOMAIN }}"
|
||||
|
||||
- name: Get Coder binary
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
|
||||
DEST="${HOME}/coder"
|
||||
URL="https://${{ env.PR_DEPLOYMENT_ACCESS_URL }}/bin/coder-linux-amd64"
|
||||
|
||||
mkdir -p "$(dirname ${DEST})"
|
||||
|
||||
COUNT=0
|
||||
until $(curl --output /dev/null --silent --head --fail "$URL"); do
|
||||
printf '.'
|
||||
sleep 5
|
||||
COUNT=$((COUNT+1))
|
||||
if [ $COUNT -ge 60 ]; then
|
||||
echo "Timed out waiting for URL to be available"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
curl -fsSL "$URL" -o "${DEST}"
|
||||
chmod +x "${DEST}"
|
||||
"${DEST}" version
|
||||
mv "${DEST}" /usr/local/bin/coder
|
||||
|
||||
- name: Create first user, template and workspace
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
id: setup_deployment
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
|
||||
# Create first user
|
||||
|
||||
# create a masked random password 12 characters long
|
||||
password=$(openssl rand -base64 16 | tr -d "=+/" | cut -c1-12)
|
||||
|
||||
# add mask so that the password is not printed to the logs
|
||||
echo "::add-mask::$password"
|
||||
echo "password=$password" >> $GITHUB_OUTPUT
|
||||
|
||||
coder login \
|
||||
--first-user-username test \
|
||||
--first-user-email pr${{ env.PR_NUMBER }}@coder.com \
|
||||
--first-user-password $password \
|
||||
--first-user-trial \
|
||||
--use-token-as-session \
|
||||
https://${{ env.PR_DEPLOYMENT_ACCESS_URL }}
|
||||
|
||||
# Create template
|
||||
coder templates init --id kubernetes && cd ./kubernetes/ && coder templates create -y --variable namespace=pr${{ env.PR_NUMBER }}
|
||||
|
||||
# Create workspace
|
||||
cat <<EOF > workspace.yaml
|
||||
cpu: "2"
|
||||
memory: "4"
|
||||
home_disk_size: "2"
|
||||
EOF
|
||||
|
||||
coder create --template="kubernetes" test --rich-parameter-file ./workspace.yaml -y
|
||||
coder stop test -y
|
||||
|
||||
- name: Send Slack notification
|
||||
if: needs.get_info.outputs.NEW == 'true'
|
||||
run: |
|
||||
curl -s -o /dev/null -X POST -H 'Content-type: application/json' \
|
||||
-d \
|
||||
'{
|
||||
"pr_number": "'"${{ env.PR_NUMBER }}"'",
|
||||
"pr_url": "'"${{ env.PR_URL }}"'",
|
||||
"pr_title": "'"${{ env.PR_TITLE }}"'",
|
||||
"pr_access_url": "'"https://${{ env.PR_DEPLOYMENT_ACCESS_URL }}"'",
|
||||
"pr_username": "'"test"'",
|
||||
"pr_email": "'"pr${{ env.PR_NUMBER }}@coder.com"'",
|
||||
"pr_password": "'"${{ steps.setup_deployment.outputs.password }}"'",
|
||||
"pr_actor": "'"${{ github.actor }}"'"
|
||||
}' \
|
||||
${{ secrets.PR_DEPLOYMENTS_SLACK_WEBHOOK }}
|
||||
echo "Slack notification sent"
|
||||
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@v2
|
||||
id: fc
|
||||
with:
|
||||
issue-number: ${{ env.PR_NUMBER }}
|
||||
comment-author: "github-actions[bot]"
|
||||
body-includes: ":rocket:"
|
||||
direction: last
|
||||
|
||||
- name: Comment on PR
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
env:
|
||||
STATUS: ${{ needs.get_info.outputs.NEW == 'true' && 'Created' || 'Updated' }}
|
||||
with:
|
||||
issue-number: ${{ env.PR_NUMBER }}
|
||||
edit-mode: replace
|
||||
comment-id: ${{ steps.fc.outputs.comment-id }}
|
||||
body: |
|
||||
---
|
||||
:heavy_check_mark: PR ${{ env.PR_NUMBER }} ${{ env.STATUS }} successfully.
|
||||
:rocket: Access the credentials [here](${{ secrets.PR_DEPLOYMENTS_SLACK_CHANNEL_URL }}).
|
||||
---
|
||||
cc: @${{ github.actor }}
|
||||
reactions: rocket
|
||||
reactions-edit-mode: replace
|
||||
@@ -28,18 +28,23 @@ env:
|
||||
# https://github.blog/changelog/2022-06-10-github-actions-inputs-unified-across-manual-and-reusable-workflows/
|
||||
CODER_RELEASE: ${{ !inputs.dry_run }}
|
||||
CODER_DRY_RUN: ${{ inputs.dry_run }}
|
||||
# For some reason, setup-go won't actually pick up a new patch version if
|
||||
# it has an old one cached. We need to manually specify the versions so we
|
||||
# can get the latest release. Never use "~1.xx" here!
|
||||
CODER_GO_VERSION: "1.20.6"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Build and publish
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
env:
|
||||
# Necessary for Docker manifest
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -71,11 +76,11 @@ jobs:
|
||||
set -euo pipefail
|
||||
ref=HEAD
|
||||
old_version="$(git describe --abbrev=0 "$ref^1")"
|
||||
version="$(./scripts/version.sh)"
|
||||
version="v$(./scripts/version.sh)"
|
||||
|
||||
# Generate notes.
|
||||
release_notes_file="$(mktemp -t release_notes.XXXXXX)"
|
||||
./scripts/release/generate_release_notes.sh --old-version "$old_version" --new-version "$version" --ref "$ref" >> "$release_notes_file"
|
||||
./scripts/release/generate_release_notes.sh --check-for-changelog --old-version "$old_version" --new-version "$version" --ref "$ref" >> "$release_notes_file"
|
||||
echo CODER_RELEASE_NOTES_FILE="$release_notes_file" >> $GITHUB_ENV
|
||||
|
||||
- name: Show release notes
|
||||
@@ -90,20 +95,11 @@ jobs:
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Cache Node
|
||||
id: cache-node
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
**/node_modules
|
||||
.eslintcache
|
||||
key: js-${{ runner.os }}-test-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
js-${{ runner.os }}-
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Install nsis and zstd
|
||||
run: sudo apt-get install -y nsis zstd
|
||||
@@ -255,6 +251,11 @@ jobs:
|
||||
env:
|
||||
CODER_BASE_IMAGE_TAG: ${{ steps.image-base-tag.outputs.tag }}
|
||||
|
||||
- name: Generate offline docs
|
||||
run: |
|
||||
version="$(./scripts/version.sh)"
|
||||
make -j build/coder_docs_"$version".tgz
|
||||
|
||||
- name: ls build
|
||||
run: ls -lh build
|
||||
|
||||
@@ -333,7 +334,8 @@ jobs:
|
||||
runs-on: windows-latest
|
||||
needs: release
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -397,6 +399,8 @@ jobs:
|
||||
- name: Comment on PR
|
||||
if: ${{ !inputs.dry_run }}
|
||||
run: |
|
||||
# wait 30 seconds
|
||||
Start-Sleep -Seconds 30.0
|
||||
# Find the PR that wingetcreate just made.
|
||||
$version = "${{ needs.release.outputs.version }}".Trim('v')
|
||||
$pr_list = gh pr list --repo microsoft/winget-pkgs --search "author:cdrci Coder.Coder version ${version}" --limit 1 --json number | `
|
||||
|
||||
@@ -8,9 +8,12 @@ permissions:
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
# Uncomment when testing.
|
||||
# pull_request:
|
||||
|
||||
schedule:
|
||||
# Run every 6 hours Monday-Friday!
|
||||
- cron: "0 0,6,12,18 * * 1-5"
|
||||
- cron: "0 0/6 * * 1-5"
|
||||
|
||||
# Cancel in-progress runs for pull requests when developers push
|
||||
# additional changes
|
||||
@@ -18,11 +21,15 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-security
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
env:
|
||||
CODER_GO_VERSION: "1.20.6"
|
||||
|
||||
jobs:
|
||||
codeql:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
@@ -30,20 +37,7 @@ jobs:
|
||||
languages: go, javascript
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
|
||||
- name: Go Cache Paths
|
||||
id: go-cache-paths
|
||||
run: |
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Go Mod Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOMODCACHE }}
|
||||
key: ${{ runner.os }}-release-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
# Workaround to prevent CodeQL from building the dashboard.
|
||||
- name: Remove Makefile
|
||||
@@ -65,44 +59,30 @@ jobs:
|
||||
"${{ secrets.SLACK_SECURITY_FAILURE_WEBHOOK_URL }}"
|
||||
|
||||
trivy:
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'ubuntu-latest-8-cores' || 'ubuntu-latest' }}
|
||||
runs-on: ${{ github.repository_owner == 'coder' && 'buildjet-8vcpu-ubuntu-2204' || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "~1.20"
|
||||
- name: Setup Go
|
||||
uses: ./.github/actions/setup-go
|
||||
|
||||
- name: Go Cache Paths
|
||||
id: go-cache-paths
|
||||
run: |
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
- name: Setup Node
|
||||
uses: ./.github/actions/setup-node
|
||||
|
||||
- name: Go Mod Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.GOMODCACHE }}
|
||||
key: ${{ runner.os }}-release-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Cache Node
|
||||
id: cache-node
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
**/node_modules
|
||||
.eslintcache
|
||||
key: js-${{ runner.os }}-test-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
js-${{ runner.os }}-
|
||||
- name: Setup sqlc
|
||||
uses: ./.github/actions/setup-sqlc
|
||||
|
||||
- name: Install yq
|
||||
run: go run github.com/mikefarah/yq/v4@v4.30.6
|
||||
- name: Install mockgen
|
||||
run: go install github.com/golang/mock/mockgen@v1.6.0
|
||||
- name: Install protoc-gen-go
|
||||
run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
|
||||
run: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30
|
||||
- name: Install protoc-gen-go-drpc
|
||||
run: go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.26
|
||||
run: go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.33
|
||||
- name: Install Protoc
|
||||
run: |
|
||||
# protoc must be in lockstep with our dogfood Dockerfile or the
|
||||
@@ -136,8 +116,16 @@ jobs:
|
||||
make -j "$image_job"
|
||||
echo "image=$(cat "$image_job")" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run Prisma Cloud image scan
|
||||
uses: PaloAltoNetworks/prisma-cloud-scan@v1
|
||||
with:
|
||||
pcc_console_url: ${{ secrets.PRISMA_CLOUD_URL }}
|
||||
pcc_user: ${{ secrets.PRISMA_CLOUD_ACCESS_KEY }}
|
||||
pcc_pass: ${{ secrets.PRISMA_CLOUD_SECRET_KEY }}
|
||||
image_name: ${{ steps.build.outputs.image }}
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@8bd2f9fbda2109502356ff8a6a89da55b1ead252
|
||||
uses: aquasecurity/trivy-action@41f05d9ecffa2ed3f1580af306000f734b733e54
|
||||
with:
|
||||
image-ref: ${{ steps.build.outputs.image }}
|
||||
format: sarif
|
||||
@@ -160,7 +148,7 @@ jobs:
|
||||
- name: Send Slack notification on failure
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
msg="❌ CodeQL Failed\n\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
msg="❌ Trivy Failed\n\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
curl \
|
||||
-qfsSL \
|
||||
-X POST \
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Stale Issue and Branch Cleanup
|
||||
name: Stale Issue, Banch and Old Workflows Cleanup
|
||||
on:
|
||||
schedule:
|
||||
# Every day at midnight
|
||||
@@ -10,24 +10,22 @@ jobs:
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
actions: write
|
||||
steps:
|
||||
- uses: actions/stale@v7.0.0
|
||||
- name: stale
|
||||
uses: actions/stale@v8.0.0
|
||||
with:
|
||||
stale-issue-label: "stale"
|
||||
stale-pr-label: "stale"
|
||||
days-before-stale: 90
|
||||
days-before-stale: 180
|
||||
# Pull Requests become stale more quickly due to merge conflicts.
|
||||
# Also, we promote minimizing WIP.
|
||||
days-before-pr-stale: 7
|
||||
days-before-pr-close: 3
|
||||
stale-pr-message: >
|
||||
This Pull Request is becoming stale. In order to minimize WIP,
|
||||
prevent merge conflicts and keep the tracker readable, I'm going
|
||||
close to this PR in 3 days if there isn't more activity.
|
||||
stale-issue-message: >
|
||||
This issue is becoming stale. In order to keep the tracker readable
|
||||
and actionable, I'm going close to this issue in 7 days if there
|
||||
isn't more activity.
|
||||
# We rarely take action in response to the message, so avoid
|
||||
# cluttering the issue and just close the oldies.
|
||||
stale-pr-message: ""
|
||||
stale-issue-message: ""
|
||||
# Upped from 30 since we have a big tracker and was hitting the limit.
|
||||
operations-per-run: 60
|
||||
# Start with the oldest issues, always.
|
||||
@@ -36,9 +34,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Run delete-old-branches-action
|
||||
uses: beatlabs/delete-old-branches-action@v0.0.9
|
||||
uses: beatlabs/delete-old-branches-action@v0.0.10
|
||||
with:
|
||||
repo_token: ${{ github.token }}
|
||||
date: "6 months ago"
|
||||
@@ -46,3 +44,24 @@ jobs:
|
||||
delete_tags: false
|
||||
# extra_protected_branch_regex: ^(foo|bar)$
|
||||
exclude_open_pr_branches: true
|
||||
del_runs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Delete PR Cleanup workflow runs
|
||||
uses: Mattraks/delete-workflow-runs@v2
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
retain_days: 1
|
||||
keep_minimum_runs: 1
|
||||
delete_workflow_pattern: pr-cleanup.yaml
|
||||
|
||||
- name: Delete PR Deploy workflow skipped runs
|
||||
uses: Mattraks/delete-workflow-runs@v2
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
repository: ${{ github.repository }}
|
||||
retain_days: 0
|
||||
keep_minimum_runs: 0
|
||||
delete_run_by_conclusion_pattern: skipped
|
||||
delete_workflow_pattern: pr-deploy.yaml
|
||||
|
||||
@@ -4,6 +4,7 @@ Jetbrains = "JetBrains"
|
||||
IST = "IST"
|
||||
MacOS = "macOS"
|
||||
AKS = "AKS"
|
||||
O_WRONLY = "O_WRONLY"
|
||||
|
||||
[default.extend-words]
|
||||
AKS = "AKS"
|
||||
@@ -16,12 +17,16 @@ encrypter = "encrypter"
|
||||
|
||||
[files]
|
||||
extend-exclude = [
|
||||
"**.svg",
|
||||
"**.png",
|
||||
"**.lock",
|
||||
"go.sum",
|
||||
"go.mod",
|
||||
# These files contain base64 strings that confuse the detector
|
||||
"**XService**.ts",
|
||||
"**identity.go",
|
||||
"**.svg",
|
||||
"**.png",
|
||||
"**.lock",
|
||||
"go.sum",
|
||||
"go.mod",
|
||||
# These files contain base64 strings that confuse the detector
|
||||
"**XService**.ts",
|
||||
"**identity.go",
|
||||
"scripts/ci-report/testdata/**",
|
||||
"**/*_test.go",
|
||||
"**/*.test.tsx",
|
||||
"**/pnpm-lock.yaml",
|
||||
]
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
name: weekly-docs
|
||||
# runs every monday at 9 am
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 9 * * 1"
|
||||
workflow_dispatch: # allows to run manually for testing
|
||||
|
||||
jobs:
|
||||
check-docs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
|
||||
- name: Check Markdown links
|
||||
uses: gaurav-nelson/github-action-markdown-link-check@v1
|
||||
id: markdown-link-check
|
||||
# checks all markdown files from /docs including all subfolders
|
||||
with:
|
||||
use-quiet-mode: "yes"
|
||||
use-verbose-mode: "yes"
|
||||
config-file: ".github/workflows/mlc_config.json"
|
||||
folder-path: "docs/"
|
||||
file-path: "./README.md"
|
||||
|
||||
- name: Send Slack notification
|
||||
if: failure()
|
||||
run: |
|
||||
curl -X POST -H 'Content-type: application/json' -d '{"msg":"Broken links found in the documentation. Please check the logs at ${{ env.LOGS_URL }}"}' ${{ secrets.DOCS_LINK_SLACK_WEBHOOK }}
|
||||
echo "Sent Slack notification"
|
||||
env:
|
||||
LOGS_URL: https://github.com/coder/coder/actions/runs/${{ github.run_id }}
|
||||
+14
-5
@@ -6,7 +6,8 @@
|
||||
**/*.swp
|
||||
gotests.coverage
|
||||
gotests.xml
|
||||
gotestsum.json
|
||||
gotests_stats.json
|
||||
gotests.json
|
||||
node_modules/
|
||||
vendor/
|
||||
yarn-error.log
|
||||
@@ -26,12 +27,13 @@ site/storybook-static/
|
||||
site/test-results/*
|
||||
site/e2e/test-results/*
|
||||
site/e2e/states/*.json
|
||||
site/e2e/.auth.json
|
||||
site/playwright-report/*
|
||||
site/.swc
|
||||
site/dist/
|
||||
|
||||
# Make target for updating golden files.
|
||||
cli/testdata/.gen-golden
|
||||
helm/tests/testdata/.gen-golden
|
||||
# Make target for updating golden files (any dir).
|
||||
.gen-golden
|
||||
|
||||
# Build
|
||||
/build/
|
||||
@@ -47,8 +49,15 @@ site/stats/
|
||||
*.lock.hcl
|
||||
.terraform/
|
||||
|
||||
/.coderv2/*
|
||||
**/.coderv2/*
|
||||
**/__debug_bin
|
||||
|
||||
# direnv
|
||||
.envrc
|
||||
*.test
|
||||
|
||||
# Loadtesting
|
||||
./scaletest/terraform/.terraform
|
||||
./scaletest/terraform/.terraform.lock.hcl
|
||||
scaletest/terraform/secrets.tfvars
|
||||
.terraform.tfstate.*
|
||||
|
||||
+13
-4
@@ -2,6 +2,10 @@
|
||||
# Over time we should try tightening some of these.
|
||||
|
||||
linters-settings:
|
||||
exhaustruct:
|
||||
include:
|
||||
# Gradually extend to cover more of the codebase.
|
||||
- 'httpmw\.\w+'
|
||||
gocognit:
|
||||
min-complexity: 46 # Min code complexity (def 30).
|
||||
|
||||
@@ -54,7 +58,6 @@ linters-settings:
|
||||
# - importShadow
|
||||
- indexAlloc
|
||||
- initClause
|
||||
- ioutilDeprecated
|
||||
- mapKey
|
||||
- methodExprCall
|
||||
# - nestingReduce
|
||||
@@ -116,7 +119,8 @@ linters-settings:
|
||||
local-prefixes: coder.com,cdr.dev,go.coder.com,github.com/cdr,github.com/coder
|
||||
|
||||
gocyclo:
|
||||
min-complexity: 50
|
||||
# goal: 30
|
||||
min-complexity: 47
|
||||
|
||||
importas:
|
||||
no-unaliased: true
|
||||
@@ -194,18 +198,22 @@ issues:
|
||||
linters:
|
||||
# We use assertions rather than explicitly checking errors in tests
|
||||
- errcheck
|
||||
- forcetypeassert
|
||||
- exhaustruct # This is unhelpful in tests.
|
||||
- path: scripts/*
|
||||
linters:
|
||||
- exhaustruct
|
||||
|
||||
fix: true
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
|
||||
run:
|
||||
concurrency: 4
|
||||
skip-dirs:
|
||||
- node_modules
|
||||
skip-files:
|
||||
- scripts/rules.go
|
||||
timeout: 5m
|
||||
timeout: 10m
|
||||
|
||||
# Over time, add more and more linters from
|
||||
# https://golangci-lint.run/usage/linters/ as the code improves.
|
||||
@@ -219,6 +227,7 @@ linters:
|
||||
- errcheck
|
||||
- errname
|
||||
- errorlint
|
||||
- exhaustruct
|
||||
- exportloopref
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
|
||||
+19
-5
@@ -9,7 +9,8 @@
|
||||
**/*.swp
|
||||
gotests.coverage
|
||||
gotests.xml
|
||||
gotestsum.json
|
||||
gotests_stats.json
|
||||
gotests.json
|
||||
node_modules/
|
||||
vendor/
|
||||
yarn-error.log
|
||||
@@ -29,12 +30,13 @@ site/storybook-static/
|
||||
site/test-results/*
|
||||
site/e2e/test-results/*
|
||||
site/e2e/states/*.json
|
||||
site/e2e/.auth.json
|
||||
site/playwright-report/*
|
||||
site/.swc
|
||||
site/dist/
|
||||
|
||||
# Make target for updating golden files.
|
||||
cli/testdata/.gen-golden
|
||||
helm/tests/testdata/.gen-golden
|
||||
# Make target for updating golden files (any dir).
|
||||
.gen-golden
|
||||
|
||||
# Build
|
||||
/build/
|
||||
@@ -50,11 +52,18 @@ site/stats/
|
||||
*.lock.hcl
|
||||
.terraform/
|
||||
|
||||
/.coderv2/*
|
||||
**/.coderv2/*
|
||||
**/__debug_bin
|
||||
|
||||
# direnv
|
||||
.envrc
|
||||
*.test
|
||||
|
||||
# Loadtesting
|
||||
./scaletest/terraform/.terraform
|
||||
./scaletest/terraform/.terraform.lock.hcl
|
||||
scaletest/terraform/secrets.tfvars
|
||||
.terraform.tfstate.*
|
||||
# .prettierignore.include:
|
||||
# Helm templates contain variables that are invalid YAML and can't be formatted
|
||||
# by Prettier.
|
||||
@@ -66,3 +75,8 @@ helm/templates/*.yaml
|
||||
|
||||
# Testdata shouldn't be formatted.
|
||||
scripts/apitypings/testdata/**/*.ts
|
||||
|
||||
# Generated files shouldn't be formatted.
|
||||
site/e2e/provisionerGenerated.ts
|
||||
|
||||
**/pnpm-lock.yaml
|
||||
|
||||
@@ -8,3 +8,8 @@ helm/templates/*.yaml
|
||||
|
||||
# Testdata shouldn't be formatted.
|
||||
scripts/apitypings/testdata/**/*.ts
|
||||
|
||||
# Generated files shouldn't be formatted.
|
||||
site/e2e/provisionerGenerated.ts
|
||||
|
||||
**/pnpm-lock.yaml
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
printWidth: 80
|
||||
semi: false
|
||||
trailingComma: all
|
||||
useTabs: false
|
||||
tabWidth: 2
|
||||
overrides:
|
||||
- files:
|
||||
- README.md
|
||||
|
||||
Vendored
+4
-9
@@ -20,6 +20,8 @@
|
||||
"codersdk",
|
||||
"cronstrue",
|
||||
"databasefake",
|
||||
"dbfake",
|
||||
"dbgen",
|
||||
"dbtype",
|
||||
"DERP",
|
||||
"derphttp",
|
||||
@@ -34,6 +36,7 @@
|
||||
"Dsts",
|
||||
"embeddedpostgres",
|
||||
"enablements",
|
||||
"enterprisemeta",
|
||||
"errgroup",
|
||||
"eventsourcemock",
|
||||
"Failf",
|
||||
@@ -90,7 +93,6 @@
|
||||
"pqtype",
|
||||
"prometheusmetrics",
|
||||
"promhttp",
|
||||
"promptui",
|
||||
"protobuf",
|
||||
"provisionerd",
|
||||
"provisionerdserver",
|
||||
@@ -209,12 +211,5 @@
|
||||
"go.testFlags": ["-short", "-coverpkg=./..."],
|
||||
// We often use a version of TypeScript that's ahead of the version shipped
|
||||
// with VS Code.
|
||||
"typescript.tsdk": "./site/node_modules/typescript/lib",
|
||||
"grammarly.selectors": [
|
||||
{
|
||||
"language": "markdown",
|
||||
"scheme": "file",
|
||||
"pattern": "docs/contributing/frontend.md"
|
||||
}
|
||||
]
|
||||
"typescript.tsdk": "./site/node_modules/typescript/lib"
|
||||
}
|
||||
|
||||
@@ -50,11 +50,11 @@ endif
|
||||
# Note, all find statements should be written with `.` or `./path` as
|
||||
# the search path so that these exclusions match.
|
||||
FIND_EXCLUSIONS= \
|
||||
-not \( \( -path '*/.git/*' -o -path './build/*' -o -path './vendor/*' -o -path './.coderv2/*' -o -path '*/node_modules/*' -o -path './site/out/*' \) -prune \)
|
||||
-not \( \( -path '*/.git/*' -o -path './build/*' -o -path './vendor/*' -o -path './.coderv2/*' -o -path '*/node_modules/*' -o -path '*/out/*' -o -path './coderd/apidoc/*' -o -path '*/.next/*' \) -prune \)
|
||||
# Source files used for make targets, evaluated on use.
|
||||
GO_SRC_FILES = $(shell find . $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
GO_SRC_FILES := $(shell find . $(FIND_EXCLUSIONS) -type f -name '*.go' -not -name '*_test.go')
|
||||
# All the shell files in the repo, excluding ignored files.
|
||||
SHELL_SRC_FILES = $(shell find . $(FIND_EXCLUSIONS) -type f -name '*.sh')
|
||||
SHELL_SRC_FILES := $(shell find . $(FIND_EXCLUSIONS) -type f -name '*.sh')
|
||||
|
||||
# All ${OS}_${ARCH} combos we build for. Windows binaries have the .exe suffix.
|
||||
OS_ARCHES := \
|
||||
@@ -356,9 +356,17 @@ build/coder_helm_$(VERSION).tgz:
|
||||
--output "$@"
|
||||
|
||||
site/out/index.html: site/package.json $(shell find ./site $(FIND_EXCLUSIONS) -type f \( -name '*.ts' -o -name '*.tsx' \))
|
||||
./scripts/yarn_install.sh
|
||||
cd site
|
||||
yarn build
|
||||
../scripts/pnpm_install.sh
|
||||
pnpm build
|
||||
|
||||
offlinedocs/out/index.html: $(shell find ./offlinedocs $(FIND_EXCLUSIONS) -type f) $(shell find ./docs $(FIND_EXCLUSIONS) -type f | sed 's: :\\ :g')
|
||||
cd offlinedocs
|
||||
../scripts/pnpm_install.sh
|
||||
pnpm export
|
||||
|
||||
build/coder_docs_$(VERSION).tgz: offlinedocs/out/index.html
|
||||
tar -czf "$@" -C offlinedocs/out .
|
||||
|
||||
install: build/coder_$(VERSION)_$(GOOS)_$(GOARCH)$(GOOS_BIN_EXT)
|
||||
install_dir="$$(go env GOPATH)/bin"
|
||||
@@ -382,9 +390,9 @@ fmt/prettier:
|
||||
cd site
|
||||
# Avoid writing files in CI to reduce file write activity
|
||||
ifdef CI
|
||||
yarn run format:check
|
||||
pnpm run format:check
|
||||
else
|
||||
yarn run format:write
|
||||
pnpm run format:write
|
||||
endif
|
||||
.PHONY: fmt/prettier
|
||||
|
||||
@@ -402,11 +410,22 @@ else
|
||||
endif
|
||||
.PHONY: fmt/shfmt
|
||||
|
||||
lint: lint/shellcheck lint/go
|
||||
lint: lint/shellcheck lint/go lint/ts lint/helm lint/site-icons
|
||||
.PHONY: lint
|
||||
|
||||
lint/site-icons:
|
||||
./scripts/check_site_icons.sh
|
||||
|
||||
.PHONY: lint/site-icons
|
||||
|
||||
lint/ts:
|
||||
cd site
|
||||
pnpm i && pnpm lint
|
||||
.PHONY: lint/ts
|
||||
|
||||
lint/go:
|
||||
./scripts/check_enterprise_imports.sh
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.2
|
||||
golangci-lint run
|
||||
.PHONY: lint/go
|
||||
|
||||
@@ -416,13 +435,29 @@ lint/shellcheck: $(SHELL_SRC_FILES)
|
||||
shellcheck --external-sources $(SHELL_SRC_FILES)
|
||||
.PHONY: lint/shellcheck
|
||||
|
||||
lint/helm:
|
||||
cd helm
|
||||
make lint
|
||||
.PHONY: lint/helm
|
||||
|
||||
# All files generated by the database should be added here, and this can be used
|
||||
# as a target for jobs that need to run after the database is generated.
|
||||
DB_GEN_FILES := \
|
||||
coderd/database/querier.go \
|
||||
coderd/database/unique_constraint.go \
|
||||
coderd/database/dbfake/dbfake.go \
|
||||
coderd/database/dbmetrics/dbmetrics.go \
|
||||
coderd/database/dbauthz/dbauthz.go \
|
||||
coderd/database/dbmock/dbmock.go
|
||||
|
||||
# all gen targets should be added here and to gen/mark-fresh
|
||||
gen: \
|
||||
coderd/database/dump.sql \
|
||||
coderd/database/querier.go \
|
||||
$(DB_GEN_FILES) \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
site/src/api/typesGenerated.ts \
|
||||
coderd/rbac/object_gen.go \
|
||||
docs/admin/prometheus.md \
|
||||
docs/cli.md \
|
||||
docs/admin/audit-logs.md \
|
||||
@@ -439,10 +474,11 @@ gen: \
|
||||
gen/mark-fresh:
|
||||
files="\
|
||||
coderd/database/dump.sql \
|
||||
coderd/database/querier.go \
|
||||
$(DB_GEN_FILES) \
|
||||
provisionersdk/proto/provisioner.pb.go \
|
||||
provisionerd/proto/provisionerd.pb.go \
|
||||
site/src/api/typesGenerated.ts \
|
||||
coderd/rbac/object_gen.go \
|
||||
docs/admin/prometheus.md \
|
||||
docs/cli.md \
|
||||
docs/admin/audit-logs.md \
|
||||
@@ -471,9 +507,12 @@ coderd/database/dump.sql: coderd/database/gen/dump/main.go $(wildcard coderd/dat
|
||||
go run ./coderd/database/gen/dump/main.go
|
||||
|
||||
# Generates Go code for querying the database.
|
||||
coderd/database/querier.go: coderd/database/sqlc.yaml coderd/database/dump.sql $(wildcard coderd/database/queries/*.sql) coderd/database/gen/enum/main.go
|
||||
coderd/database/querier.go: coderd/database/sqlc.yaml coderd/database/dump.sql $(wildcard coderd/database/queries/*.sql)
|
||||
./coderd/database/generate.sh
|
||||
|
||||
coderd/database/dbmock/dbmock.go: coderd/database/db.go coderd/database/querier.go
|
||||
go generate ./coderd/database/dbmock/
|
||||
|
||||
provisionersdk/proto/provisioner.pb.go: provisionersdk/proto/provisioner.proto
|
||||
protoc \
|
||||
--go_out=. \
|
||||
@@ -493,38 +532,46 @@ provisionerd/proto/provisionerd.pb.go: provisionerd/proto/provisionerd.proto
|
||||
site/src/api/typesGenerated.ts: scripts/apitypings/main.go $(shell find ./codersdk $(FIND_EXCLUSIONS) -type f -name '*.go')
|
||||
go run scripts/apitypings/main.go > site/src/api/typesGenerated.ts
|
||||
cd site
|
||||
yarn run format:types
|
||||
pnpm run format:types
|
||||
|
||||
coderd/rbac/object_gen.go: scripts/rbacgen/main.go coderd/rbac/object.go
|
||||
go run scripts/rbacgen/main.go ./coderd/rbac > coderd/rbac/object_gen.go
|
||||
|
||||
docs/admin/prometheus.md: scripts/metricsdocgen/main.go scripts/metricsdocgen/metrics
|
||||
go run scripts/metricsdocgen/main.go
|
||||
cd site
|
||||
yarn run format:write:only ../docs/admin/prometheus.md
|
||||
pnpm run format:write:only ./docs/admin/prometheus.md
|
||||
|
||||
docs/cli.md: scripts/clidocgen/main.go $(GO_SRC_FILES) docs/manifest.json
|
||||
docs/cli.md: scripts/clidocgen/main.go $(GO_SRC_FILES)
|
||||
BASE_PATH="." go run ./scripts/clidocgen
|
||||
cd site
|
||||
yarn run format:write:only ../docs/cli.md ../docs/cli/*.md ../docs/manifest.json
|
||||
pnpm run format:write:only ./docs/cli.md ./docs/cli/*.md ./docs/manifest.json
|
||||
|
||||
docs/admin/audit-logs.md: scripts/auditdocgen/main.go enterprise/audit/table.go
|
||||
docs/admin/audit-logs.md: scripts/auditdocgen/main.go enterprise/audit/table.go coderd/rbac/object_gen.go
|
||||
go run scripts/auditdocgen/main.go
|
||||
cd site
|
||||
yarn run format:write:only ../docs/admin/audit-logs.md
|
||||
pnpm run format:write:only ./docs/admin/audit-logs.md
|
||||
|
||||
coderd/apidoc/swagger.json: $(shell find ./scripts/apidocgen $(FIND_EXCLUSIONS) -type f) $(wildcard coderd/*.go) $(wildcard enterprise/coderd/*.go) $(wildcard codersdk/*.go) .swaggo docs/manifest.json
|
||||
coderd/apidoc/swagger.json: $(shell find ./scripts/apidocgen $(FIND_EXCLUSIONS) -type f) $(wildcard coderd/*.go) $(wildcard enterprise/coderd/*.go) $(wildcard codersdk/*.go) $(wildcard enterprise/wsproxy/wsproxysdk/*.go) $(DB_GEN_FILES) .swaggo docs/manifest.json coderd/rbac/object_gen.go
|
||||
./scripts/apidocgen/generate.sh
|
||||
yarn run --cwd=site format:write:only ../docs/api ../docs/manifest.json ../coderd/apidoc/swagger.json
|
||||
pnpm run format:write:only ./docs/api ./docs/manifest.json ./coderd/apidoc/swagger.json
|
||||
|
||||
update-golden-files: cli/testdata/.gen-golden helm/tests/testdata/.gen-golden
|
||||
update-golden-files: cli/testdata/.gen-golden helm/tests/testdata/.gen-golden scripts/ci-report/testdata/.gen-golden enterprise/cli/testdata/.gen-golden
|
||||
.PHONY: update-golden-files
|
||||
|
||||
cli/testdata/.gen-golden: $(wildcard cli/testdata/*.golden) $(wildcard cli/*.tpl) $(GO_SRC_FILES)
|
||||
go test ./cli -run=TestCommandHelp -update
|
||||
cli/testdata/.gen-golden: $(wildcard cli/testdata/*.golden) $(wildcard cli/*.tpl) $(GO_SRC_FILES) $(wildcard cli/*_test.go)
|
||||
go test ./cli -run="Test(CommandHelp|ServerYAML)" -update
|
||||
touch "$@"
|
||||
|
||||
helm/tests/testdata/.gen-golden: $(wildcard helm/tests/testdata/*.golden) $(GO_SRC_FILES)
|
||||
enterprise/cli/testdata/.gen-golden: $(wildcard enterprise/cli/testdata/*.golden) $(wildcard cli/*.tpl) $(GO_SRC_FILES) $(wildcard enterprise/cli/*_test.go)
|
||||
go test ./enterprise/cli -run="TestEnterpriseCommandHelp" -update
|
||||
touch "$@"
|
||||
|
||||
helm/tests/testdata/.gen-golden: $(wildcard helm/tests/testdata/*.yaml) $(wildcard helm/tests/testdata/*.golden) $(GO_SRC_FILES) $(wildcard helm/tests/*_test.go)
|
||||
go test ./helm/tests -run=TestUpdateGoldenFiles -update
|
||||
touch "$@"
|
||||
|
||||
scripts/ci-report/testdata/.gen-golden: $(wildcard scripts/ci-report/testdata/*) $(wildcard scripts/ci-report/*.go)
|
||||
go test ./scripts/ci-report -run=TestOutputMatchesGoldenFile -update
|
||||
touch "$@"
|
||||
|
||||
# Generate a prettierrc for the site package that uses relative paths for
|
||||
# overrides. This allows us to share the same prettier config between the
|
||||
# site and the root of the repo.
|
||||
@@ -585,22 +632,23 @@ site/.eslintignore site/.prettierignore: .prettierignore Makefile
|
||||
echo "$${ignore}$${rule}" >> "$@"
|
||||
done < "$<"
|
||||
|
||||
test: test-clean
|
||||
gotestsum -- -v -short ./...
|
||||
test:
|
||||
gotestsum --format standard-quiet -- -v -short -count=1 ./...
|
||||
.PHONY: test
|
||||
|
||||
# When updating -timeout for this test, keep in sync with
|
||||
# test-go-postgres (.github/workflows/coder.yaml).
|
||||
test-postgres: test-clean test-postgres-docker
|
||||
# Do add coverage flags so that test caching works.
|
||||
test-postgres: test-postgres-docker
|
||||
# The postgres test is prone to failure, so we limit parallelism for
|
||||
# more consistent execution.
|
||||
DB=ci DB_FROM=$(shell go run scripts/migrate-ci/main.go) gotestsum \
|
||||
--junitfile="gotests.xml" \
|
||||
--jsonfile="gotests.json" \
|
||||
--packages="./..." -- \
|
||||
-covermode=atomic -coverprofile="gotests.coverage" -timeout=20m \
|
||||
-parallel=4 \
|
||||
-coverpkg=./... \
|
||||
-count=1 -race -failfast
|
||||
-timeout=20m \
|
||||
-failfast \
|
||||
-count=1
|
||||
.PHONY: test-postgres
|
||||
|
||||
test-postgres-docker:
|
||||
@@ -615,8 +663,10 @@ test-postgres-docker:
|
||||
--name test-postgres-docker \
|
||||
--restart no \
|
||||
--detach \
|
||||
postgres:13 \
|
||||
gcr.io/coder-dev-1/postgres:13 \
|
||||
-c shared_buffers=1GB \
|
||||
-c work_mem=1GB \
|
||||
-c effective_cache_size=1GB \
|
||||
-c max_connections=1000 \
|
||||
-c fsync=off \
|
||||
-c synchronous_commit=off \
|
||||
@@ -629,6 +679,14 @@ test-postgres-docker:
|
||||
done
|
||||
.PHONY: test-postgres-docker
|
||||
|
||||
# Make sure to keep this in sync with test-go-race from .github/workflows/ci.yaml.
|
||||
test-race:
|
||||
gotestsum --junitfile="gotests.xml" -- -race -count=1 ./...
|
||||
.PHONY: test-race
|
||||
|
||||
# Note: we used to add this to the test target, but it's not necessary and we can
|
||||
# achieve the desired result by specifying -count=1 in the go test invocation
|
||||
# instead. Keeping it here for convenience.
|
||||
test-clean:
|
||||
go clean -testcache
|
||||
.PHONY: test-clean
|
||||
|
||||
@@ -84,7 +84,7 @@ coder server --postgres-url <url> --access-url <url>
|
||||
|
||||
> <sup>1</sup> For production deployments, set up an external PostgreSQL instance for reliability.
|
||||
|
||||
Use `coder --help` to get a list of flags and environment variables. Use our [quickstart guide](https://coder.com/docs/v2/latest/quickstart) for a full walkthrough.
|
||||
Use `coder --help` to get a list of flags and environment variables. Use our [install guides](https://coder.com/docs/v2/latest/install) for a full walkthrough.
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
+673
-790
File diff suppressed because it is too large
Load Diff
+1005
-450
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,848 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/afero"
|
||||
"go.uber.org/atomic"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/agent/usershell"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/pty"
|
||||
)
|
||||
|
||||
const (
|
||||
// MagicSessionErrorCode indicates that something went wrong with the session, rather than the
|
||||
// command just returning a nonzero exit code, and is chosen as an arbitrary, high number
|
||||
// unlikely to shadow other exit codes, which are typically 1, 2, 3, etc.
|
||||
MagicSessionErrorCode = 229
|
||||
|
||||
// MagicSessionTypeEnvironmentVariable is used to track the purpose behind an SSH connection.
|
||||
// This is stripped from any commands being executed, and is counted towards connection stats.
|
||||
MagicSessionTypeEnvironmentVariable = "CODER_SSH_SESSION_TYPE"
|
||||
// MagicSessionTypeVSCode is set in the SSH config by the VS Code extension to identify itself.
|
||||
MagicSessionTypeVSCode = "vscode"
|
||||
// MagicSessionTypeJetBrains is set in the SSH config by the JetBrains extension to identify itself.
|
||||
MagicSessionTypeJetBrains = "jetbrains"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
mu sync.RWMutex // Protects following.
|
||||
fs afero.Fs
|
||||
listeners map[net.Listener]struct{}
|
||||
conns map[net.Conn]struct{}
|
||||
sessions map[ssh.Session]struct{}
|
||||
closing chan struct{}
|
||||
// Wait for goroutines to exit, waited without
|
||||
// a lock on mu but protected by closing.
|
||||
wg sync.WaitGroup
|
||||
|
||||
logger slog.Logger
|
||||
srv *ssh.Server
|
||||
x11SocketDir string
|
||||
|
||||
Env map[string]string
|
||||
AgentToken func() string
|
||||
Manifest *atomic.Pointer[agentsdk.Manifest]
|
||||
ServiceBanner *atomic.Pointer[codersdk.ServiceBannerConfig]
|
||||
|
||||
connCountVSCode atomic.Int64
|
||||
connCountJetBrains atomic.Int64
|
||||
connCountSSHSession atomic.Int64
|
||||
|
||||
metrics *sshServerMetrics
|
||||
}
|
||||
|
||||
func NewServer(ctx context.Context, logger slog.Logger, prometheusRegistry *prometheus.Registry, fs afero.Fs, maxTimeout time.Duration, x11SocketDir string) (*Server, error) {
|
||||
// Clients' should ignore the host key when connecting.
|
||||
// The agent needs to authenticate with coderd to SSH,
|
||||
// so SSH authentication doesn't improve security.
|
||||
randomHostKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
randomSigner, err := gossh.NewSignerFromKey(randomHostKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if x11SocketDir == "" {
|
||||
x11SocketDir = filepath.Join(os.TempDir(), ".X11-unix")
|
||||
}
|
||||
|
||||
forwardHandler := &ssh.ForwardedTCPHandler{}
|
||||
unixForwardHandler := &forwardedUnixHandler{log: logger}
|
||||
|
||||
metrics := newSSHServerMetrics(prometheusRegistry)
|
||||
s := &Server{
|
||||
listeners: make(map[net.Listener]struct{}),
|
||||
fs: fs,
|
||||
conns: make(map[net.Conn]struct{}),
|
||||
sessions: make(map[ssh.Session]struct{}),
|
||||
logger: logger,
|
||||
x11SocketDir: x11SocketDir,
|
||||
|
||||
metrics: metrics,
|
||||
}
|
||||
|
||||
srv := &ssh.Server{
|
||||
ChannelHandlers: map[string]ssh.ChannelHandler{
|
||||
"direct-tcpip": ssh.DirectTCPIPHandler,
|
||||
"direct-streamlocal@openssh.com": directStreamLocalHandler,
|
||||
"session": ssh.DefaultSessionHandler,
|
||||
},
|
||||
ConnectionFailedCallback: func(conn net.Conn, err error) {
|
||||
s.logger.Warn(ctx, "ssh connection failed",
|
||||
slog.F("remote_addr", conn.RemoteAddr()),
|
||||
slog.F("local_addr", conn.LocalAddr()),
|
||||
slog.Error(err))
|
||||
metrics.failedConnectionsTotal.Add(1)
|
||||
},
|
||||
ConnectionCompleteCallback: func(conn *gossh.ServerConn, err error) {
|
||||
s.logger.Info(ctx, "ssh connection complete",
|
||||
slog.F("remote_addr", conn.RemoteAddr()),
|
||||
slog.F("local_addr", conn.LocalAddr()),
|
||||
slog.Error(err))
|
||||
},
|
||||
Handler: s.sessionHandler,
|
||||
HostSigners: []ssh.Signer{randomSigner},
|
||||
LocalPortForwardingCallback: func(ctx ssh.Context, destinationHost string, destinationPort uint32) bool {
|
||||
// Allow local port forwarding all!
|
||||
s.logger.Debug(ctx, "local port forward",
|
||||
slog.F("destination_host", destinationHost),
|
||||
slog.F("destination_port", destinationPort))
|
||||
return true
|
||||
},
|
||||
PtyCallback: func(ctx ssh.Context, pty ssh.Pty) bool {
|
||||
return true
|
||||
},
|
||||
ReversePortForwardingCallback: func(ctx ssh.Context, bindHost string, bindPort uint32) bool {
|
||||
// Allow reverse port forwarding all!
|
||||
s.logger.Debug(ctx, "local port forward",
|
||||
slog.F("bind_host", bindHost),
|
||||
slog.F("bind_port", bindPort))
|
||||
return true
|
||||
},
|
||||
RequestHandlers: map[string]ssh.RequestHandler{
|
||||
"tcpip-forward": forwardHandler.HandleSSHRequest,
|
||||
"cancel-tcpip-forward": forwardHandler.HandleSSHRequest,
|
||||
"streamlocal-forward@openssh.com": unixForwardHandler.HandleSSHRequest,
|
||||
"cancel-streamlocal-forward@openssh.com": unixForwardHandler.HandleSSHRequest,
|
||||
},
|
||||
X11Callback: s.x11Callback,
|
||||
ServerConfigCallback: func(ctx ssh.Context) *gossh.ServerConfig {
|
||||
return &gossh.ServerConfig{
|
||||
NoClientAuth: true,
|
||||
}
|
||||
},
|
||||
SubsystemHandlers: map[string]ssh.SubsystemHandler{
|
||||
"sftp": s.sessionHandler,
|
||||
},
|
||||
}
|
||||
|
||||
// The MaxTimeout functionality has been substituted with the introduction of the KeepAlive feature.
|
||||
// In cases where very short timeouts are set, the SSH server will automatically switch to the connection timeout for both read and write operations.
|
||||
if maxTimeout >= 3*time.Second {
|
||||
srv.ClientAliveCountMax = 3
|
||||
srv.ClientAliveInterval = maxTimeout / time.Duration(srv.ClientAliveCountMax)
|
||||
srv.MaxTimeout = 0
|
||||
} else {
|
||||
srv.MaxTimeout = maxTimeout
|
||||
}
|
||||
|
||||
s.srv = srv
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type ConnStats struct {
|
||||
Sessions int64
|
||||
VSCode int64
|
||||
JetBrains int64
|
||||
}
|
||||
|
||||
func (s *Server) ConnStats() ConnStats {
|
||||
return ConnStats{
|
||||
Sessions: s.connCountSSHSession.Load(),
|
||||
VSCode: s.connCountVSCode.Load(),
|
||||
JetBrains: s.connCountJetBrains.Load(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sessionHandler(session ssh.Session) {
|
||||
logger := s.logger.With(slog.F("remote_addr", session.RemoteAddr()), slog.F("local_addr", session.LocalAddr()))
|
||||
logger.Info(session.Context(), "handling ssh session")
|
||||
ctx := session.Context()
|
||||
if !s.trackSession(session, true) {
|
||||
// See (*Server).Close() for why we call Close instead of Exit.
|
||||
_ = session.Close()
|
||||
logger.Info(ctx, "unable to accept new session, server is closing")
|
||||
return
|
||||
}
|
||||
defer s.trackSession(session, false)
|
||||
|
||||
extraEnv := make([]string, 0)
|
||||
x11, hasX11 := session.X11()
|
||||
if hasX11 {
|
||||
handled := s.x11Handler(session.Context(), x11)
|
||||
if !handled {
|
||||
_ = session.Exit(1)
|
||||
logger.Error(ctx, "x11 handler failed")
|
||||
return
|
||||
}
|
||||
extraEnv = append(extraEnv, fmt.Sprintf("DISPLAY=:%d.0", x11.ScreenNumber))
|
||||
}
|
||||
|
||||
switch ss := session.Subsystem(); ss {
|
||||
case "":
|
||||
case "sftp":
|
||||
s.sftpHandler(session)
|
||||
return
|
||||
default:
|
||||
logger.Warn(ctx, "unsupported subsystem", slog.F("subsystem", ss))
|
||||
_ = session.Exit(1)
|
||||
return
|
||||
}
|
||||
|
||||
err := s.sessionStart(session, extraEnv)
|
||||
var exitError *exec.ExitError
|
||||
if xerrors.As(err, &exitError) {
|
||||
logger.Info(ctx, "ssh session returned", slog.Error(exitError))
|
||||
_ = session.Exit(exitError.ExitCode())
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logger.Warn(ctx, "ssh session failed", slog.Error(err))
|
||||
// This exit code is designed to be unlikely to be confused for a legit exit code
|
||||
// from the process.
|
||||
_ = session.Exit(MagicSessionErrorCode)
|
||||
return
|
||||
}
|
||||
logger.Info(ctx, "normal ssh session exit")
|
||||
_ = session.Exit(0)
|
||||
}
|
||||
|
||||
func (s *Server) sessionStart(session ssh.Session, extraEnv []string) (retErr error) {
|
||||
ctx := session.Context()
|
||||
env := append(session.Environ(), extraEnv...)
|
||||
var magicType string
|
||||
for index, kv := range env {
|
||||
if !strings.HasPrefix(kv, MagicSessionTypeEnvironmentVariable) {
|
||||
continue
|
||||
}
|
||||
magicType = strings.TrimPrefix(kv, MagicSessionTypeEnvironmentVariable+"=")
|
||||
env = append(env[:index], env[index+1:]...)
|
||||
}
|
||||
switch magicType {
|
||||
case MagicSessionTypeVSCode:
|
||||
s.connCountVSCode.Add(1)
|
||||
defer s.connCountVSCode.Add(-1)
|
||||
case MagicSessionTypeJetBrains:
|
||||
s.connCountJetBrains.Add(1)
|
||||
defer s.connCountJetBrains.Add(-1)
|
||||
case "":
|
||||
s.connCountSSHSession.Add(1)
|
||||
defer s.connCountSSHSession.Add(-1)
|
||||
default:
|
||||
s.logger.Warn(ctx, "invalid magic ssh session type specified", slog.F("type", magicType))
|
||||
}
|
||||
|
||||
magicTypeLabel := magicTypeMetricLabel(magicType)
|
||||
sshPty, windowSize, isPty := session.Pty()
|
||||
|
||||
cmd, err := s.CreateCommand(ctx, session.RawCommand(), env)
|
||||
if err != nil {
|
||||
ptyLabel := "no"
|
||||
if isPty {
|
||||
ptyLabel = "yes"
|
||||
}
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, ptyLabel, "create_command").Add(1)
|
||||
return err
|
||||
}
|
||||
|
||||
if ssh.AgentRequested(session) {
|
||||
l, err := ssh.NewAgentListener()
|
||||
if err != nil {
|
||||
ptyLabel := "no"
|
||||
if isPty {
|
||||
ptyLabel = "yes"
|
||||
}
|
||||
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, ptyLabel, "listener").Add(1)
|
||||
return xerrors.Errorf("new agent listener: %w", err)
|
||||
}
|
||||
defer l.Close()
|
||||
go ssh.ForwardAgentConnections(l, session)
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", "SSH_AUTH_SOCK", l.Addr().String()))
|
||||
}
|
||||
|
||||
if isPty {
|
||||
return s.startPTYSession(session, magicTypeLabel, cmd, sshPty, windowSize)
|
||||
}
|
||||
return s.startNonPTYSession(session, magicTypeLabel, cmd.AsExec())
|
||||
}
|
||||
|
||||
func (s *Server) startNonPTYSession(session ssh.Session, magicTypeLabel string, cmd *exec.Cmd) error {
|
||||
s.metrics.sessionsTotal.WithLabelValues(magicTypeLabel, "no").Add(1)
|
||||
|
||||
cmd.Stdout = session
|
||||
cmd.Stderr = session.Stderr()
|
||||
// This blocks forever until stdin is received if we don't
|
||||
// use StdinPipe. It's unknown what causes this.
|
||||
stdinPipe, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "stdin_pipe").Add(1)
|
||||
return xerrors.Errorf("create stdin pipe: %w", err)
|
||||
}
|
||||
go func() {
|
||||
_, err := io.Copy(stdinPipe, session)
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "stdin_io_copy").Add(1)
|
||||
}
|
||||
_ = stdinPipe.Close()
|
||||
}()
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "no", "start_command").Add(1)
|
||||
return xerrors.Errorf("start: %w", err)
|
||||
}
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
||||
// ptySession is the interface to the ssh.Session that startPTYSession uses
|
||||
// we use an interface here so that we can fake it in tests.
|
||||
type ptySession interface {
|
||||
io.ReadWriter
|
||||
Context() ssh.Context
|
||||
DisablePTYEmulation()
|
||||
RawCommand() string
|
||||
}
|
||||
|
||||
func (s *Server) startPTYSession(session ptySession, magicTypeLabel string, cmd *pty.Cmd, sshPty ssh.Pty, windowSize <-chan ssh.Window) (retErr error) {
|
||||
s.metrics.sessionsTotal.WithLabelValues(magicTypeLabel, "yes").Add(1)
|
||||
|
||||
ctx := session.Context()
|
||||
// Disable minimal PTY emulation set by gliderlabs/ssh (NL-to-CRNL).
|
||||
// See https://github.com/coder/coder/issues/3371.
|
||||
session.DisablePTYEmulation()
|
||||
|
||||
if isLoginShell(session.RawCommand()) {
|
||||
serviceBanner := s.ServiceBanner.Load()
|
||||
if serviceBanner != nil {
|
||||
err := showServiceBanner(session, serviceBanner)
|
||||
if err != nil {
|
||||
s.logger.Error(ctx, "agent failed to show service banner", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "service_banner").Add(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !isQuietLogin(s.fs, session.RawCommand()) {
|
||||
manifest := s.Manifest.Load()
|
||||
if manifest != nil {
|
||||
err := showMOTD(s.fs, session, manifest.MOTDFile)
|
||||
if err != nil {
|
||||
s.logger.Error(ctx, "agent failed to show MOTD", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "motd").Add(1)
|
||||
}
|
||||
} else {
|
||||
s.logger.Warn(ctx, "metadata lookup failed, unable to show MOTD")
|
||||
}
|
||||
}
|
||||
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("TERM=%s", sshPty.Term))
|
||||
|
||||
// The pty package sets `SSH_TTY` on supported platforms.
|
||||
ptty, process, err := pty.Start(cmd, pty.WithPTYOption(
|
||||
pty.WithSSHRequest(sshPty),
|
||||
pty.WithLogger(slog.Stdlib(ctx, s.logger, slog.LevelInfo)),
|
||||
))
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "start_command").Add(1)
|
||||
return xerrors.Errorf("start command: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
closeErr := ptty.Close()
|
||||
if closeErr != nil {
|
||||
s.logger.Warn(ctx, "failed to close tty", slog.Error(closeErr))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "close").Add(1)
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for win := range windowSize {
|
||||
resizeErr := ptty.Resize(uint16(win.Height), uint16(win.Width))
|
||||
// If the pty is closed, then command has exited, no need to log.
|
||||
if resizeErr != nil && !errors.Is(resizeErr, pty.ErrClosed) {
|
||||
s.logger.Warn(ctx, "failed to resize tty", slog.Error(resizeErr))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "resize").Add(1)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
_, err := io.Copy(ptty.InputWriter(), session)
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "input_io_copy").Add(1)
|
||||
}
|
||||
}()
|
||||
|
||||
// We need to wait for the command output to finish copying. It's safe to
|
||||
// just do this copy on the main handler goroutine because one of two things
|
||||
// will happen:
|
||||
//
|
||||
// 1. The command completes & closes the TTY, which then triggers an error
|
||||
// after we've Read() all the buffered data from the PTY.
|
||||
// 2. The client hangs up, which cancels the command's Context, and go will
|
||||
// kill the command's process. This then has the same effect as (1).
|
||||
n, err := io.Copy(session, ptty.OutputReader())
|
||||
s.logger.Debug(ctx, "copy output done", slog.F("bytes", n), slog.Error(err))
|
||||
if err != nil {
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "output_io_copy").Add(1)
|
||||
return xerrors.Errorf("copy error: %w", err)
|
||||
}
|
||||
// We've gotten all the output, but we need to wait for the process to
|
||||
// complete so that we can get the exit code. This returns
|
||||
// immediately if the TTY was closed as part of the command exiting.
|
||||
err = process.Wait()
|
||||
var exitErr *exec.ExitError
|
||||
// ExitErrors just mean the command we run returned a non-zero exit code, which is normal
|
||||
// and not something to be concerned about. But, if it's something else, we should log it.
|
||||
if err != nil && !xerrors.As(err, &exitErr) {
|
||||
s.logger.Warn(ctx, "process wait exited with error", slog.Error(err))
|
||||
s.metrics.sessionErrors.WithLabelValues(magicTypeLabel, "yes", "wait").Add(1)
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("process wait: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) sftpHandler(session ssh.Session) {
|
||||
s.metrics.sftpConnectionsTotal.Add(1)
|
||||
|
||||
ctx := session.Context()
|
||||
|
||||
// Typically sftp sessions don't request a TTY, but if they do,
|
||||
// we must ensure the gliderlabs/ssh CRLF emulation is disabled.
|
||||
// Otherwise sftp will be broken. This can happen if a user sets
|
||||
// `RequestTTY force` in their SSH config.
|
||||
session.DisablePTYEmulation()
|
||||
|
||||
var opts []sftp.ServerOption
|
||||
// Change current working directory to the users home
|
||||
// directory so that SFTP connections land there.
|
||||
homedir, err := userHomeDir()
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, "get sftp working directory failed, unable to get home dir", slog.Error(err))
|
||||
} else {
|
||||
opts = append(opts, sftp.WithServerWorkingDirectory(homedir))
|
||||
}
|
||||
|
||||
server, err := sftp.NewServer(session, opts...)
|
||||
if err != nil {
|
||||
s.logger.Debug(ctx, "initialize sftp server", slog.Error(err))
|
||||
return
|
||||
}
|
||||
defer server.Close()
|
||||
|
||||
err = server.Serve()
|
||||
if errors.Is(err, io.EOF) {
|
||||
// Unless we call `session.Exit(0)` here, the client won't
|
||||
// receive `exit-status` because `(*sftp.Server).Close()`
|
||||
// calls `Close()` on the underlying connection (session),
|
||||
// which actually calls `channel.Close()` because it isn't
|
||||
// wrapped. This causes sftp clients to receive a non-zero
|
||||
// exit code. Typically sftp clients don't echo this exit
|
||||
// code but `scp` on macOS does (when using the default
|
||||
// SFTP backend).
|
||||
_ = session.Exit(0)
|
||||
return
|
||||
}
|
||||
s.logger.Warn(ctx, "sftp server closed with error", slog.Error(err))
|
||||
s.metrics.sftpServerErrors.Add(1)
|
||||
_ = session.Exit(1)
|
||||
}
|
||||
|
||||
// CreateCommand processes raw command input with OpenSSH-like behavior.
|
||||
// If the script provided is empty, it will default to the users shell.
|
||||
// This injects environment variables specified by the user at launch too.
|
||||
func (s *Server) CreateCommand(ctx context.Context, script string, env []string) (*pty.Cmd, error) {
|
||||
currentUser, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get current user: %w", err)
|
||||
}
|
||||
username := currentUser.Username
|
||||
|
||||
shell, err := usershell.Get(username)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get user shell: %w", err)
|
||||
}
|
||||
|
||||
manifest := s.Manifest.Load()
|
||||
if manifest == nil {
|
||||
return nil, xerrors.Errorf("no metadata was provided")
|
||||
}
|
||||
|
||||
// OpenSSH executes all commands with the users current shell.
|
||||
// We replicate that behavior for IDE support.
|
||||
caller := "-c"
|
||||
if runtime.GOOS == "windows" {
|
||||
caller = "/c"
|
||||
}
|
||||
args := []string{caller, script}
|
||||
|
||||
// gliderlabs/ssh returns a command slice of zero
|
||||
// when a shell is requested.
|
||||
if len(script) == 0 {
|
||||
args = []string{}
|
||||
if runtime.GOOS != "windows" {
|
||||
// On Linux and macOS, we should start a login
|
||||
// shell to consume juicy environment variables!
|
||||
args = append(args, "-l")
|
||||
}
|
||||
}
|
||||
|
||||
cmd := pty.CommandContext(ctx, shell, args...)
|
||||
cmd.Dir = manifest.Directory
|
||||
|
||||
// If the metadata directory doesn't exist, we run the command
|
||||
// in the users home directory.
|
||||
_, err = os.Stat(cmd.Dir)
|
||||
if cmd.Dir == "" || err != nil {
|
||||
// Default to user home if a directory is not set.
|
||||
homedir, err := userHomeDir()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get home dir: %w", err)
|
||||
}
|
||||
cmd.Dir = homedir
|
||||
}
|
||||
cmd.Env = append(os.Environ(), env...)
|
||||
executablePath, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting os executable: %w", err)
|
||||
}
|
||||
// Set environment variables reliable detection of being inside a
|
||||
// Coder workspace.
|
||||
cmd.Env = append(cmd.Env, "CODER=true")
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("USER=%s", username))
|
||||
// Git on Windows resolves with UNIX-style paths.
|
||||
// If using backslashes, it's unable to find the executable.
|
||||
unixExecutablePath := strings.ReplaceAll(executablePath, "\\", "/")
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf(`GIT_SSH_COMMAND=%s gitssh --`, unixExecutablePath))
|
||||
|
||||
// Specific Coder subcommands require the agent token exposed!
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("CODER_AGENT_TOKEN=%s", s.AgentToken()))
|
||||
|
||||
// Set SSH connection environment variables (these are also set by OpenSSH
|
||||
// and thus expected to be present by SSH clients). Since the agent does
|
||||
// networking in-memory, trying to provide accurate values here would be
|
||||
// nonsensical. For now, we hard code these values so that they're present.
|
||||
srcAddr, srcPort := "0.0.0.0", "0"
|
||||
dstAddr, dstPort := "0.0.0.0", "0"
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("SSH_CLIENT=%s %s %s", srcAddr, srcPort, dstPort))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("SSH_CONNECTION=%s %s %s %s", srcAddr, srcPort, dstAddr, dstPort))
|
||||
|
||||
// This adds the ports dialog to code-server that enables
|
||||
// proxying a port dynamically.
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("VSCODE_PROXY_URI=%s", manifest.VSCodePortProxyURI))
|
||||
|
||||
// Hide Coder message on code-server's "Getting Started" page
|
||||
cmd.Env = append(cmd.Env, "CS_DISABLE_GETTING_STARTED_OVERRIDE=true")
|
||||
|
||||
// Load environment variables passed via the agent.
|
||||
// These should override all variables we manually specify.
|
||||
for envKey, value := range manifest.EnvironmentVariables {
|
||||
// Expanding environment variables allows for customization
|
||||
// of the $PATH, among other variables. Customers can prepend
|
||||
// or append to the $PATH, so allowing expand is required!
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", envKey, os.ExpandEnv(value)))
|
||||
}
|
||||
|
||||
// Agent-level environment variables should take over all!
|
||||
// This is used for setting agent-specific variables like "CODER_AGENT_TOKEN".
|
||||
for envKey, value := range s.Env {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", envKey, value))
|
||||
}
|
||||
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
func (s *Server) Serve(l net.Listener) (retErr error) {
|
||||
s.logger.Info(context.Background(), "started serving listener", slog.F("listen_addr", l.Addr()))
|
||||
defer func() {
|
||||
s.logger.Info(context.Background(), "stopped serving listener",
|
||||
slog.F("listen_addr", l.Addr()), slog.Error(retErr))
|
||||
}()
|
||||
defer l.Close()
|
||||
|
||||
s.trackListener(l, true)
|
||||
defer s.trackListener(l, false)
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go s.handleConn(l, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleConn(l net.Listener, c net.Conn) {
|
||||
logger := s.logger.With(
|
||||
slog.F("remote_addr", c.RemoteAddr()),
|
||||
slog.F("local_addr", c.LocalAddr()),
|
||||
slog.F("listen_addr", l.Addr()))
|
||||
defer c.Close()
|
||||
|
||||
if !s.trackConn(l, c, true) {
|
||||
// Server is closed or we no longer want
|
||||
// connections from this listener.
|
||||
logger.Info(context.Background(), "received connection after server closed")
|
||||
return
|
||||
}
|
||||
defer s.trackConn(l, c, false)
|
||||
logger.Info(context.Background(), "started serving connection")
|
||||
// note: srv.ConnectionCompleteCallback logs completion of the connection
|
||||
s.srv.HandleConn(c)
|
||||
}
|
||||
|
||||
// trackListener registers the listener with the server. If the server is
|
||||
// closing, the function will block until the server is closed.
|
||||
//
|
||||
//nolint:revive
|
||||
func (s *Server) trackListener(l net.Listener, add bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if add {
|
||||
for s.closing != nil {
|
||||
closing := s.closing
|
||||
// Wait until close is complete before
|
||||
// serving a new listener.
|
||||
s.mu.Unlock()
|
||||
<-closing
|
||||
s.mu.Lock()
|
||||
}
|
||||
s.wg.Add(1)
|
||||
s.listeners[l] = struct{}{}
|
||||
return
|
||||
}
|
||||
s.wg.Done()
|
||||
delete(s.listeners, l)
|
||||
}
|
||||
|
||||
// trackConn registers the connection with the server. If the server is
|
||||
// closed or the listener is closed, the connection is not registered
|
||||
// and should be closed.
|
||||
//
|
||||
//nolint:revive
|
||||
func (s *Server) trackConn(l net.Listener, c net.Conn, add bool) (ok bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if add {
|
||||
found := false
|
||||
for ll := range s.listeners {
|
||||
if l == ll {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if s.closing != nil || !found {
|
||||
// Server or listener closed.
|
||||
return false
|
||||
}
|
||||
s.wg.Add(1)
|
||||
s.conns[c] = struct{}{}
|
||||
return true
|
||||
}
|
||||
s.wg.Done()
|
||||
delete(s.conns, c)
|
||||
return true
|
||||
}
|
||||
|
||||
// trackSession registers the session with the server. If the server is
|
||||
// closing, the session is not registered and should be closed.
|
||||
//
|
||||
//nolint:revive
|
||||
func (s *Server) trackSession(ss ssh.Session, add bool) (ok bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if add {
|
||||
if s.closing != nil {
|
||||
// Server closed.
|
||||
return false
|
||||
}
|
||||
s.wg.Add(1)
|
||||
s.sessions[ss] = struct{}{}
|
||||
return true
|
||||
}
|
||||
s.wg.Done()
|
||||
delete(s.sessions, ss)
|
||||
return true
|
||||
}
|
||||
|
||||
// Close the server and all active connections. Server can be re-used
|
||||
// after Close is done.
|
||||
func (s *Server) Close() error {
|
||||
s.mu.Lock()
|
||||
|
||||
// Guard against multiple calls to Close and
|
||||
// accepting new connections during close.
|
||||
if s.closing != nil {
|
||||
s.mu.Unlock()
|
||||
return xerrors.New("server is closing")
|
||||
}
|
||||
s.closing = make(chan struct{})
|
||||
|
||||
// Close all active sessions to gracefully
|
||||
// terminate client connections.
|
||||
for ss := range s.sessions {
|
||||
// We call Close on the underlying channel here because we don't
|
||||
// want to send an exit status to the client (via Exit()).
|
||||
// Typically OpenSSH clients will return 255 as the exit status.
|
||||
_ = ss.Close()
|
||||
}
|
||||
|
||||
// Close all active listeners and connections.
|
||||
for l := range s.listeners {
|
||||
_ = l.Close()
|
||||
}
|
||||
for c := range s.conns {
|
||||
_ = c.Close()
|
||||
}
|
||||
|
||||
// Close the underlying SSH server.
|
||||
err := s.srv.Close()
|
||||
|
||||
s.mu.Unlock()
|
||||
s.wg.Wait() // Wait for all goroutines to exit.
|
||||
|
||||
s.mu.Lock()
|
||||
close(s.closing)
|
||||
s.closing = nil
|
||||
s.mu.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown gracefully closes all active SSH connections and stops
|
||||
// accepting new connections.
|
||||
//
|
||||
// Shutdown is not implemented.
|
||||
func (*Server) Shutdown(_ context.Context) error {
|
||||
// TODO(mafredri): Implement shutdown, SIGHUP running commands, etc.
|
||||
return nil
|
||||
}
|
||||
|
||||
func isLoginShell(rawCommand string) bool {
|
||||
return len(rawCommand) == 0
|
||||
}
|
||||
|
||||
// isQuietLogin checks if the SSH server should perform a quiet login or not.
|
||||
//
|
||||
// https://github.com/openssh/openssh-portable/blob/25bd659cc72268f2858c5415740c442ee950049f/session.c#L816
|
||||
func isQuietLogin(fs afero.Fs, rawCommand string) bool {
|
||||
// We are always quiet unless this is a login shell.
|
||||
if !isLoginShell(rawCommand) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Best effort, if we can't get the home directory,
|
||||
// we can't lookup .hushlogin.
|
||||
homedir, err := userHomeDir()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = fs.Stat(filepath.Join(homedir, ".hushlogin"))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// showServiceBanner will write the service banner if enabled and not blank
|
||||
// along with a blank line for spacing.
|
||||
func showServiceBanner(session io.Writer, banner *codersdk.ServiceBannerConfig) error {
|
||||
if banner.Enabled && banner.Message != "" {
|
||||
// The banner supports Markdown so we might want to parse it but Markdown is
|
||||
// still fairly readable in its raw form.
|
||||
message := strings.TrimSpace(banner.Message) + "\n\n"
|
||||
return writeWithCarriageReturn(strings.NewReader(message), session)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// showMOTD will output the message of the day from
|
||||
// the given filename to dest, if the file exists.
|
||||
//
|
||||
// https://github.com/openssh/openssh-portable/blob/25bd659cc72268f2858c5415740c442ee950049f/session.c#L784
|
||||
func showMOTD(fs afero.Fs, dest io.Writer, filename string) error {
|
||||
if filename == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := fs.Open(filename)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, os.ErrNotExist) {
|
||||
// This is not an error, there simply isn't a MOTD to show.
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("open MOTD: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return writeWithCarriageReturn(f, dest)
|
||||
}
|
||||
|
||||
// writeWithCarriageReturn writes each line with a carriage return to ensure
|
||||
// that each line starts at the beginning of the terminal.
|
||||
func writeWithCarriageReturn(src io.Reader, dest io.Writer) error {
|
||||
s := bufio.NewScanner(src)
|
||||
for s.Scan() {
|
||||
_, err := fmt.Fprint(dest, s.Text()+"\r\n")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write line: %w", err)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return xerrors.Errorf("read line: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// userHomeDir returns the home directory of the current user, giving
|
||||
// priority to the $HOME environment variable.
|
||||
func userHomeDir() (string, error) {
|
||||
// First we check the environment.
|
||||
homedir, err := os.UserHomeDir()
|
||||
if err == nil {
|
||||
return homedir, nil
|
||||
}
|
||||
|
||||
// As a fallback, we try the user information.
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("current user: %w", err)
|
||||
}
|
||||
return u.HomeDir, nil
|
||||
}
|
||||
@@ -0,0 +1,197 @@
|
||||
//go:build !windows
|
||||
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
gliderssh "github.com/gliderlabs/ssh"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/pty"
|
||||
"github.com/coder/coder/testutil"
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
)
|
||||
|
||||
const longScript = `
|
||||
echo "started"
|
||||
sleep 30
|
||||
echo "done"
|
||||
`
|
||||
|
||||
// Test_sessionStart_orphan tests running a command that takes a long time to
|
||||
// exit normally, and terminate the SSH session context early to verify that we
|
||||
// return quickly and don't leave the command running as an "orphan" with no
|
||||
// active SSH session.
|
||||
func Test_sessionStart_orphan(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancel()
|
||||
logger := slogtest.Make(t, nil)
|
||||
s, err := NewServer(ctx, logger, prometheus.NewRegistry(), afero.NewMemMapFs(), 0, "")
|
||||
require.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
// Here we're going to call the handler directly with a faked SSH session
|
||||
// that just uses io.Pipes instead of a network socket. There is a large
|
||||
// variation in the time between closing the socket from the client side and
|
||||
// the SSH server canceling the session Context, which would lead to a flaky
|
||||
// test if we did it that way. So instead, we directly cancel the context
|
||||
// in this test.
|
||||
sessionCtx, sessionCancel := context.WithCancel(ctx)
|
||||
toClient, fromClient, sess := newTestSession(sessionCtx)
|
||||
ptyInfo := gliderssh.Pty{}
|
||||
windowSize := make(chan gliderssh.Window)
|
||||
close(windowSize)
|
||||
// the command gets the session context so that Go will terminate it when
|
||||
// the session expires.
|
||||
cmd := pty.CommandContext(sessionCtx, "sh", "-c", longScript)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
// we don't really care what the error is here. In the larger scenario,
|
||||
// the client has disconnected, so we can't return any error information
|
||||
// to them.
|
||||
_ = s.startPTYSession(sess, "ssh", cmd, ptyInfo, windowSize)
|
||||
}()
|
||||
|
||||
readDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(readDone)
|
||||
s := bufio.NewScanner(toClient)
|
||||
assert.True(t, s.Scan())
|
||||
txt := s.Text()
|
||||
assert.Equal(t, "started", txt, "output corrupted")
|
||||
}()
|
||||
|
||||
waitForChan(ctx, t, readDone, "read timeout")
|
||||
// process is started, and should be sleeping for ~30 seconds
|
||||
|
||||
sessionCancel()
|
||||
|
||||
// now, we wait for the handler to complete. If it does so before the
|
||||
// main test timeout, we consider this a pass. If not, it indicates
|
||||
// that the server isn't properly shutting down sessions when they are
|
||||
// disconnected client side, which could lead to processes hanging around
|
||||
// indefinitely.
|
||||
waitForChan(ctx, t, done, "handler timeout")
|
||||
|
||||
err = fromClient.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func waitForChan(ctx context.Context, t *testing.T, c <-chan struct{}, msg string) {
|
||||
t.Helper()
|
||||
select {
|
||||
case <-c:
|
||||
// OK!
|
||||
case <-ctx.Done():
|
||||
t.Fatal(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type testSession struct {
|
||||
ctx testSSHContext
|
||||
|
||||
// c2p is the client -> pty buffer
|
||||
toPty *io.PipeReader
|
||||
// p2c is the pty -> client buffer
|
||||
fromPty *io.PipeWriter
|
||||
}
|
||||
|
||||
type testSSHContext struct {
|
||||
context.Context
|
||||
}
|
||||
|
||||
func newTestSession(ctx context.Context) (toClient *io.PipeReader, fromClient *io.PipeWriter, s ptySession) {
|
||||
toClient, fromPty := io.Pipe()
|
||||
toPty, fromClient := io.Pipe()
|
||||
|
||||
return toClient, fromClient, &testSession{
|
||||
ctx: testSSHContext{ctx},
|
||||
toPty: toPty,
|
||||
fromPty: fromPty,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *testSession) Context() gliderssh.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
func (*testSession) DisablePTYEmulation() {}
|
||||
|
||||
// RawCommand returns "quiet logon" so that the PTY handler doesn't attempt to
|
||||
// write the message of the day, which will interfere with our tests. It writes
|
||||
// the message of the day if it's a shell login (zero length RawCommand()).
|
||||
func (*testSession) RawCommand() string { return "quiet logon" }
|
||||
|
||||
func (s *testSession) Read(p []byte) (n int, err error) {
|
||||
return s.toPty.Read(p)
|
||||
}
|
||||
|
||||
func (s *testSession) Write(p []byte) (n int, err error) {
|
||||
return s.fromPty.Write(p)
|
||||
}
|
||||
|
||||
func (testSSHContext) Lock() {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (testSSHContext) Unlock() {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// User returns the username used when establishing the SSH connection.
|
||||
func (testSSHContext) User() string {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// SessionID returns the session hash.
|
||||
func (testSSHContext) SessionID() string {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// ClientVersion returns the version reported by the client.
|
||||
func (testSSHContext) ClientVersion() string {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// ServerVersion returns the version reported by the server.
|
||||
func (testSSHContext) ServerVersion() string {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address for this connection.
|
||||
func (testSSHContext) RemoteAddr() net.Addr {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address for this connection.
|
||||
func (testSSHContext) LocalAddr() net.Addr {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// Permissions returns the Permissions object used for this connection.
|
||||
func (testSSHContext) Permissions() *gliderssh.Permissions {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// SetValue allows you to easily write new values into the underlying context.
|
||||
func (testSSHContext) SetValue(_, _ interface{}) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (testSSHContext) KeepAlive() *gliderssh.SessionKeepAlive {
|
||||
panic("not implemented")
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
// Package agentssh_test provides tests for basic functinoality of the agentssh
|
||||
// package, more test coverage can be found in the `agent` and `cli` package(s).
|
||||
package agentssh_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/goleak"
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
|
||||
"github.com/coder/coder/agent/agentssh"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestNewServer_ServeClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
logger := slogtest.Make(t, nil)
|
||||
s, err := agentssh.NewServer(ctx, logger, prometheus.NewRegistry(), afero.NewMemMapFs(), 0, "")
|
||||
require.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
// The assumption is that these are set before serving SSH connections.
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
err := s.Serve(ln)
|
||||
assert.Error(t, err) // Server is closed.
|
||||
}()
|
||||
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
|
||||
var b bytes.Buffer
|
||||
sess, err := c.NewSession()
|
||||
sess.Stdout = &b
|
||||
require.NoError(t, err)
|
||||
err = sess.Start("echo hello")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sess.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "hello", strings.TrimSpace(b.String()))
|
||||
|
||||
err = s.Close()
|
||||
require.NoError(t, err)
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestNewServer_CloseActiveConnections(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := context.Background()
|
||||
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true})
|
||||
s, err := agentssh.NewServer(ctx, logger, prometheus.NewRegistry(), afero.NewMemMapFs(), 0, "")
|
||||
require.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
// The assumption is that these are set before serving SSH connections.
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := s.Serve(ln)
|
||||
assert.Error(t, err) // Server is closed.
|
||||
}()
|
||||
|
||||
pty := ptytest.New(t)
|
||||
|
||||
doClose := make(chan struct{})
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
sess, err := c.NewSession()
|
||||
sess.Stdin = pty.Input()
|
||||
sess.Stdout = pty.Output()
|
||||
sess.Stderr = pty.Output()
|
||||
|
||||
assert.NoError(t, err)
|
||||
err = sess.Start("")
|
||||
assert.NoError(t, err)
|
||||
|
||||
close(doClose)
|
||||
err = sess.Wait()
|
||||
assert.Error(t, err)
|
||||
}()
|
||||
|
||||
<-doClose
|
||||
err = s.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func sshClient(t *testing.T, addr string) *ssh.Client {
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = conn.Close()
|
||||
})
|
||||
|
||||
sshConn, channels, requests, err := ssh.NewClientConn(conn, "localhost:22", &ssh.ClientConfig{
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(), //nolint:gosec // This is a test.
|
||||
})
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = sshConn.Close()
|
||||
})
|
||||
c := ssh.NewClient(sshConn, channels, requests)
|
||||
t.Cleanup(func() {
|
||||
_ = c.Close()
|
||||
})
|
||||
return c
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Bicopy copies all of the data between the two connections and will close them
|
||||
// after one or both of them are done writing. If the context is canceled, both
|
||||
// of the connections will be closed.
|
||||
func Bicopy(ctx context.Context, c1, c2 io.ReadWriteCloser) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
defer func() {
|
||||
_ = c1.Close()
|
||||
_ = c2.Close()
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
copyFunc := func(dst io.WriteCloser, src io.Reader) {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
// If one side of the copy fails, ensure the other one exits as
|
||||
// well.
|
||||
cancel()
|
||||
}()
|
||||
_, _ = io.Copy(dst, src)
|
||||
}
|
||||
|
||||
wg.Add(2)
|
||||
go copyFunc(c1, c2)
|
||||
go copyFunc(c2, c1)
|
||||
|
||||
// Convert waitgroup to a channel so we can also wait on the context.
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package agent
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -0,0 +1,82 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type sshServerMetrics struct {
|
||||
failedConnectionsTotal prometheus.Counter
|
||||
sftpConnectionsTotal prometheus.Counter
|
||||
sftpServerErrors prometheus.Counter
|
||||
x11HandlerErrors *prometheus.CounterVec
|
||||
sessionsTotal *prometheus.CounterVec
|
||||
sessionErrors *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func newSSHServerMetrics(registerer prometheus.Registerer) *sshServerMetrics {
|
||||
failedConnectionsTotal := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "agent", Subsystem: "ssh_server", Name: "failed_connections_total",
|
||||
})
|
||||
registerer.MustRegister(failedConnectionsTotal)
|
||||
|
||||
sftpConnectionsTotal := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "agent", Subsystem: "ssh_server", Name: "sftp_connections_total",
|
||||
})
|
||||
registerer.MustRegister(sftpConnectionsTotal)
|
||||
|
||||
sftpServerErrors := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "agent", Subsystem: "ssh_server", Name: "sftp_server_errors_total",
|
||||
})
|
||||
registerer.MustRegister(sftpServerErrors)
|
||||
|
||||
x11HandlerErrors := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "agent",
|
||||
Subsystem: "x11_handler",
|
||||
Name: "errors_total",
|
||||
},
|
||||
[]string{"error_type"},
|
||||
)
|
||||
registerer.MustRegister(x11HandlerErrors)
|
||||
|
||||
sessionsTotal := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "agent",
|
||||
Subsystem: "sessions",
|
||||
Name: "total",
|
||||
},
|
||||
[]string{"magic_type", "pty"},
|
||||
)
|
||||
registerer.MustRegister(sessionsTotal)
|
||||
|
||||
sessionErrors := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "agent",
|
||||
Subsystem: "sessions",
|
||||
Name: "errors_total",
|
||||
},
|
||||
[]string{"magic_type", "pty", "error_type"},
|
||||
)
|
||||
registerer.MustRegister(sessionErrors)
|
||||
|
||||
return &sshServerMetrics{
|
||||
failedConnectionsTotal: failedConnectionsTotal,
|
||||
sftpConnectionsTotal: sftpConnectionsTotal,
|
||||
sftpServerErrors: sftpServerErrors,
|
||||
x11HandlerErrors: x11HandlerErrors,
|
||||
sessionsTotal: sessionsTotal,
|
||||
sessionErrors: sessionErrors,
|
||||
}
|
||||
}
|
||||
|
||||
func magicTypeMetricLabel(magicType string) string {
|
||||
switch magicType {
|
||||
case MagicSessionTypeVSCode:
|
||||
case MagicSessionTypeJetBrains:
|
||||
case "":
|
||||
magicType = "ssh"
|
||||
default:
|
||||
magicType = "unknown"
|
||||
}
|
||||
return magicType
|
||||
}
|
||||
@@ -0,0 +1,200 @@
|
||||
package agentssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/gofrs/flock"
|
||||
"github.com/spf13/afero"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
)
|
||||
|
||||
// x11Callback is called when the client requests X11 forwarding.
|
||||
// It adds an Xauthority entry to the Xauthority file.
|
||||
func (s *Server) x11Callback(ctx ssh.Context, x11 ssh.X11) bool {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, "failed to get hostname", slog.Error(err))
|
||||
s.metrics.x11HandlerErrors.WithLabelValues("hostname").Add(1)
|
||||
return false
|
||||
}
|
||||
|
||||
err = s.fs.MkdirAll(s.x11SocketDir, 0o700)
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, "failed to make the x11 socket dir", slog.F("dir", s.x11SocketDir), slog.Error(err))
|
||||
s.metrics.x11HandlerErrors.WithLabelValues("socker_dir").Add(1)
|
||||
return false
|
||||
}
|
||||
|
||||
err = addXauthEntry(ctx, s.fs, hostname, strconv.Itoa(int(x11.ScreenNumber)), x11.AuthProtocol, x11.AuthCookie)
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, "failed to add Xauthority entry", slog.Error(err))
|
||||
s.metrics.x11HandlerErrors.WithLabelValues("xauthority").Add(1)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// x11Handler is called when a session has requested X11 forwarding.
|
||||
// It listens for X11 connections and forwards them to the client.
|
||||
func (s *Server) x11Handler(ctx ssh.Context, x11 ssh.X11) bool {
|
||||
serverConn, valid := ctx.Value(ssh.ContextKeyConn).(*gossh.ServerConn)
|
||||
if !valid {
|
||||
s.logger.Warn(ctx, "failed to get server connection")
|
||||
return false
|
||||
}
|
||||
// We want to overwrite the socket so that subsequent connections will succeed.
|
||||
socketPath := filepath.Join(s.x11SocketDir, fmt.Sprintf("X%d", x11.ScreenNumber))
|
||||
err := os.Remove(socketPath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
s.logger.Warn(ctx, "failed to remove existing X11 socket", slog.Error(err))
|
||||
return false
|
||||
}
|
||||
listener, err := net.Listen("unix", socketPath)
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, "failed to listen for X11", slog.Error(err))
|
||||
return false
|
||||
}
|
||||
s.trackListener(listener, true)
|
||||
|
||||
go func() {
|
||||
defer listener.Close()
|
||||
defer s.trackListener(listener, false)
|
||||
handledFirstConnection := false
|
||||
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
if errors.Is(err, net.ErrClosed) {
|
||||
return
|
||||
}
|
||||
s.logger.Warn(ctx, "failed to accept X11 connection", slog.Error(err))
|
||||
return
|
||||
}
|
||||
if x11.SingleConnection && handledFirstConnection {
|
||||
s.logger.Warn(ctx, "X11 connection rejected because single connection is enabled")
|
||||
_ = conn.Close()
|
||||
continue
|
||||
}
|
||||
handledFirstConnection = true
|
||||
|
||||
unixConn, ok := conn.(*net.UnixConn)
|
||||
if !ok {
|
||||
s.logger.Warn(ctx, fmt.Sprintf("failed to cast connection to UnixConn. got: %T", conn))
|
||||
return
|
||||
}
|
||||
unixAddr, ok := unixConn.LocalAddr().(*net.UnixAddr)
|
||||
if !ok {
|
||||
s.logger.Warn(ctx, fmt.Sprintf("failed to cast local address to UnixAddr. got: %T", unixConn.LocalAddr()))
|
||||
return
|
||||
}
|
||||
|
||||
channel, reqs, err := serverConn.OpenChannel("x11", gossh.Marshal(struct {
|
||||
OriginatorAddress string
|
||||
OriginatorPort uint32
|
||||
}{
|
||||
OriginatorAddress: unixAddr.Name,
|
||||
OriginatorPort: 0,
|
||||
}))
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, "failed to open X11 channel", slog.Error(err))
|
||||
return
|
||||
}
|
||||
go gossh.DiscardRequests(reqs)
|
||||
go Bicopy(ctx, conn, channel)
|
||||
}
|
||||
}()
|
||||
return true
|
||||
}
|
||||
|
||||
// addXauthEntry adds an Xauthority entry to the Xauthority file.
|
||||
// The Xauthority file is located at ~/.Xauthority.
|
||||
func addXauthEntry(ctx context.Context, fs afero.Fs, host string, display string, authProtocol string, authCookie string) error {
|
||||
// Get the Xauthority file path
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get user home directory: %w", err)
|
||||
}
|
||||
|
||||
xauthPath := filepath.Join(homeDir, ".Xauthority")
|
||||
|
||||
lock := flock.New(xauthPath)
|
||||
defer lock.Close()
|
||||
ok, err := lock.TryLockContext(ctx, 100*time.Millisecond)
|
||||
if !ok {
|
||||
return xerrors.Errorf("failed to lock Xauthority file: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to lock Xauthority file: %w", err)
|
||||
}
|
||||
|
||||
// Open or create the Xauthority file
|
||||
file, err := fs.OpenFile(xauthPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o600)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open Xauthority file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Convert the authCookie from hex string to byte slice
|
||||
authCookieBytes, err := hex.DecodeString(authCookie)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to decode auth cookie: %w", err)
|
||||
}
|
||||
|
||||
// Write Xauthority entry
|
||||
family := uint16(0x0100) // FamilyLocal
|
||||
err = binary.Write(file, binary.BigEndian, family)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write family: %w", err)
|
||||
}
|
||||
|
||||
err = binary.Write(file, binary.BigEndian, uint16(len(host)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write host length: %w", err)
|
||||
}
|
||||
_, err = file.WriteString(host)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write host: %w", err)
|
||||
}
|
||||
|
||||
err = binary.Write(file, binary.BigEndian, uint16(len(display)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write display length: %w", err)
|
||||
}
|
||||
_, err = file.WriteString(display)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write display: %w", err)
|
||||
}
|
||||
|
||||
err = binary.Write(file, binary.BigEndian, uint16(len(authProtocol)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write auth protocol length: %w", err)
|
||||
}
|
||||
_, err = file.WriteString(authProtocol)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write auth protocol: %w", err)
|
||||
}
|
||||
|
||||
err = binary.Write(file, binary.BigEndian, uint16(len(authCookieBytes)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write auth cookie length: %w", err)
|
||||
}
|
||||
_, err = file.Write(authCookieBytes)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to write auth cookie: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
package agentssh_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/agent/agentssh"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func TestServer_X11(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("X11 forwarding is only supported on Linux")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
||||
fs := afero.NewOsFs()
|
||||
dir := t.TempDir()
|
||||
s, err := agentssh.NewServer(ctx, logger, prometheus.NewRegistry(), fs, 0, dir)
|
||||
require.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
// The assumption is that these are set before serving SSH connections.
|
||||
s.AgentToken = func() string { return "" }
|
||||
s.Manifest = atomic.NewPointer(&agentsdk.Manifest{})
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
err := s.Serve(ln)
|
||||
assert.Error(t, err) // Server is closed.
|
||||
}()
|
||||
|
||||
c := sshClient(t, ln.Addr().String())
|
||||
|
||||
sess, err := c.NewSession()
|
||||
require.NoError(t, err)
|
||||
|
||||
reply, err := sess.SendRequest("x11-req", true, gossh.Marshal(ssh.X11{
|
||||
AuthProtocol: "MIT-MAGIC-COOKIE-1",
|
||||
AuthCookie: hex.EncodeToString([]byte("cookie")),
|
||||
ScreenNumber: 0,
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, reply)
|
||||
|
||||
err = sess.Shell()
|
||||
require.NoError(t, err)
|
||||
|
||||
x11Chans := c.HandleChannelOpen("x11")
|
||||
payload := "hello world"
|
||||
require.Eventually(t, func() bool {
|
||||
conn, err := net.Dial("unix", filepath.Join(dir, "X0"))
|
||||
if err == nil {
|
||||
_, err = conn.Write([]byte(payload))
|
||||
assert.NoError(t, err)
|
||||
_ = conn.Close()
|
||||
}
|
||||
return err == nil
|
||||
}, testutil.WaitShort, testutil.IntervalFast)
|
||||
|
||||
x11 := <-x11Chans
|
||||
ch, reqs, err := x11.Accept()
|
||||
require.NoError(t, err)
|
||||
go gossh.DiscardRequests(reqs)
|
||||
got := make([]byte, len(payload))
|
||||
_, err = ch.Read(got)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, payload, string(got))
|
||||
_ = ch.Close()
|
||||
_ = s.Close()
|
||||
<-done
|
||||
|
||||
// Ensure the Xauthority file was written!
|
||||
home, err := os.UserHomeDir()
|
||||
require.NoError(t, err)
|
||||
_, err = fs.Stat(filepath.Join(home, ".Xauthority"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -0,0 +1,222 @@
|
||||
package agenttest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
"github.com/coder/coder/tailnet"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func NewClient(t testing.TB,
|
||||
logger slog.Logger,
|
||||
agentID uuid.UUID,
|
||||
manifest agentsdk.Manifest,
|
||||
statsChan chan *agentsdk.Stats,
|
||||
coordinator tailnet.Coordinator,
|
||||
) *Client {
|
||||
if manifest.AgentID == uuid.Nil {
|
||||
manifest.AgentID = agentID
|
||||
}
|
||||
return &Client{
|
||||
t: t,
|
||||
logger: logger.Named("client"),
|
||||
agentID: agentID,
|
||||
manifest: manifest,
|
||||
statsChan: statsChan,
|
||||
coordinator: coordinator,
|
||||
derpMapUpdates: make(chan agentsdk.DERPMapUpdate),
|
||||
}
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
t testing.TB
|
||||
logger slog.Logger
|
||||
agentID uuid.UUID
|
||||
manifest agentsdk.Manifest
|
||||
metadata map[string]agentsdk.PostMetadataRequest
|
||||
statsChan chan *agentsdk.Stats
|
||||
coordinator tailnet.Coordinator
|
||||
LastWorkspaceAgent func()
|
||||
PatchWorkspaceLogs func() error
|
||||
GetServiceBannerFunc func() (codersdk.ServiceBannerConfig, error)
|
||||
|
||||
mu sync.Mutex // Protects following.
|
||||
lifecycleStates []codersdk.WorkspaceAgentLifecycle
|
||||
startup agentsdk.PostStartupRequest
|
||||
logs []agentsdk.Log
|
||||
derpMapUpdates chan agentsdk.DERPMapUpdate
|
||||
}
|
||||
|
||||
func (c *Client) Manifest(_ context.Context) (agentsdk.Manifest, error) {
|
||||
return c.manifest, nil
|
||||
}
|
||||
|
||||
func (c *Client) Listen(_ context.Context) (net.Conn, error) {
|
||||
clientConn, serverConn := net.Pipe()
|
||||
closed := make(chan struct{})
|
||||
c.LastWorkspaceAgent = func() {
|
||||
_ = serverConn.Close()
|
||||
_ = clientConn.Close()
|
||||
<-closed
|
||||
}
|
||||
c.t.Cleanup(c.LastWorkspaceAgent)
|
||||
go func() {
|
||||
_ = c.coordinator.ServeAgent(serverConn, c.agentID, "")
|
||||
close(closed)
|
||||
}()
|
||||
return clientConn, nil
|
||||
}
|
||||
|
||||
func (c *Client) ReportStats(ctx context.Context, _ slog.Logger, statsChan <-chan *agentsdk.Stats, setInterval func(time.Duration)) (io.Closer, error) {
|
||||
doneCh := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
go func() {
|
||||
defer close(doneCh)
|
||||
|
||||
setInterval(500 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case stat := <-statsChan:
|
||||
select {
|
||||
case c.statsChan <- stat:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
// We don't want to send old stats.
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return closeFunc(func() error {
|
||||
cancel()
|
||||
<-doneCh
|
||||
close(c.statsChan)
|
||||
return nil
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (c *Client) GetLifecycleStates() []codersdk.WorkspaceAgentLifecycle {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.lifecycleStates
|
||||
}
|
||||
|
||||
func (c *Client) PostLifecycle(ctx context.Context, req agentsdk.PostLifecycleRequest) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.lifecycleStates = append(c.lifecycleStates, req.State)
|
||||
c.logger.Debug(ctx, "post lifecycle", slog.F("req", req))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) PostAppHealth(ctx context.Context, req agentsdk.PostAppHealthsRequest) error {
|
||||
c.logger.Debug(ctx, "post app health", slog.F("req", req))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) GetStartup() agentsdk.PostStartupRequest {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.startup
|
||||
}
|
||||
|
||||
func (c *Client) GetMetadata() map[string]agentsdk.PostMetadataRequest {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return maps.Clone(c.metadata)
|
||||
}
|
||||
|
||||
func (c *Client) PostMetadata(ctx context.Context, key string, req agentsdk.PostMetadataRequest) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.metadata == nil {
|
||||
c.metadata = make(map[string]agentsdk.PostMetadataRequest)
|
||||
}
|
||||
c.metadata[key] = req
|
||||
c.logger.Debug(ctx, "post metadata", slog.F("key", key), slog.F("req", req))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) PostStartup(ctx context.Context, startup agentsdk.PostStartupRequest) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.startup = startup
|
||||
c.logger.Debug(ctx, "post startup", slog.F("req", startup))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) GetStartupLogs() []agentsdk.Log {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.logs
|
||||
}
|
||||
|
||||
func (c *Client) PatchLogs(ctx context.Context, logs agentsdk.PatchLogs) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.PatchWorkspaceLogs != nil {
|
||||
return c.PatchWorkspaceLogs()
|
||||
}
|
||||
c.logs = append(c.logs, logs.Logs...)
|
||||
c.logger.Debug(ctx, "patch startup logs", slog.F("req", logs))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) SetServiceBannerFunc(f func() (codersdk.ServiceBannerConfig, error)) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.GetServiceBannerFunc = f
|
||||
}
|
||||
|
||||
func (c *Client) GetServiceBanner(ctx context.Context) (codersdk.ServiceBannerConfig, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.logger.Debug(ctx, "get service banner")
|
||||
if c.GetServiceBannerFunc != nil {
|
||||
return c.GetServiceBannerFunc()
|
||||
}
|
||||
return codersdk.ServiceBannerConfig{}, nil
|
||||
}
|
||||
|
||||
func (c *Client) PushDERPMapUpdate(update agentsdk.DERPMapUpdate) error {
|
||||
timer := time.NewTimer(testutil.WaitShort)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case c.derpMapUpdates <- update:
|
||||
case <-timer.C:
|
||||
return xerrors.New("timeout waiting to push derp map update")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) DERPMapUpdates(_ context.Context) (<-chan agentsdk.DERPMapUpdate, io.Closer, error) {
|
||||
closed := make(chan struct{})
|
||||
return c.derpMapUpdates, closeFunc(func() error {
|
||||
close(closed)
|
||||
return nil
|
||||
}), nil
|
||||
}
|
||||
|
||||
type closeFunc func() error
|
||||
|
||||
func (c closeFunc) Close() error {
|
||||
return c()
|
||||
}
|
||||
@@ -0,0 +1,130 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
prompb "github.com/prometheus/client_model/go"
|
||||
"tailscale.com/util/clientmetric"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
)
|
||||
|
||||
type agentMetrics struct {
|
||||
connectionsTotal prometheus.Counter
|
||||
reconnectingPTYErrors *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func newAgentMetrics(registerer prometheus.Registerer) *agentMetrics {
|
||||
connectionsTotal := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "agent", Subsystem: "reconnecting_pty", Name: "connections_total",
|
||||
})
|
||||
registerer.MustRegister(connectionsTotal)
|
||||
|
||||
reconnectingPTYErrors := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "agent",
|
||||
Subsystem: "reconnecting_pty",
|
||||
Name: "errors_total",
|
||||
},
|
||||
[]string{"error_type"},
|
||||
)
|
||||
registerer.MustRegister(reconnectingPTYErrors)
|
||||
|
||||
return &agentMetrics{
|
||||
connectionsTotal: connectionsTotal,
|
||||
reconnectingPTYErrors: reconnectingPTYErrors,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *agent) collectMetrics(ctx context.Context) []agentsdk.AgentMetric {
|
||||
var collected []agentsdk.AgentMetric
|
||||
|
||||
// Tailscale internal metrics
|
||||
metrics := clientmetric.Metrics()
|
||||
for _, m := range metrics {
|
||||
if isIgnoredMetric(m.Name()) {
|
||||
continue
|
||||
}
|
||||
|
||||
collected = append(collected, agentsdk.AgentMetric{
|
||||
Name: m.Name(),
|
||||
Type: asMetricType(m.Type()),
|
||||
Value: float64(m.Value()),
|
||||
})
|
||||
}
|
||||
|
||||
metricFamilies, err := a.prometheusRegistry.Gather()
|
||||
if err != nil {
|
||||
a.logger.Error(ctx, "can't gather agent metrics", slog.Error(err))
|
||||
return collected
|
||||
}
|
||||
|
||||
for _, metricFamily := range metricFamilies {
|
||||
for _, metric := range metricFamily.GetMetric() {
|
||||
labels := toAgentMetricLabels(metric.Label)
|
||||
|
||||
if metric.Counter != nil {
|
||||
collected = append(collected, agentsdk.AgentMetric{
|
||||
Name: metricFamily.GetName(),
|
||||
Type: agentsdk.AgentMetricTypeCounter,
|
||||
Value: metric.Counter.GetValue(),
|
||||
Labels: labels,
|
||||
})
|
||||
} else if metric.Gauge != nil {
|
||||
collected = append(collected, agentsdk.AgentMetric{
|
||||
Name: metricFamily.GetName(),
|
||||
Type: agentsdk.AgentMetricTypeGauge,
|
||||
Value: metric.Gauge.GetValue(),
|
||||
Labels: labels,
|
||||
})
|
||||
} else {
|
||||
a.logger.Error(ctx, "unsupported metric type", slog.F("type", metricFamily.Type.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
return collected
|
||||
}
|
||||
|
||||
func toAgentMetricLabels(metricLabels []*prompb.LabelPair) []agentsdk.AgentMetricLabel {
|
||||
if len(metricLabels) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
labels := make([]agentsdk.AgentMetricLabel, 0, len(metricLabels))
|
||||
for _, metricLabel := range metricLabels {
|
||||
labels = append(labels, agentsdk.AgentMetricLabel{
|
||||
Name: metricLabel.GetName(),
|
||||
Value: metricLabel.GetValue(),
|
||||
})
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// isIgnoredMetric checks if the metric should be ignored, as Coder agent doesn't use related features.
|
||||
// Expected metric families: magicsock_*, derp_*, tstun_*, netcheck_*, portmap_*, etc.
|
||||
func isIgnoredMetric(metricName string) bool {
|
||||
if strings.HasPrefix(metricName, "dns_") ||
|
||||
strings.HasPrefix(metricName, "controlclient_") ||
|
||||
strings.HasPrefix(metricName, "peerapi_") ||
|
||||
strings.HasPrefix(metricName, "profiles_") ||
|
||||
strings.HasPrefix(metricName, "tstun_") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func asMetricType(typ clientmetric.Type) agentsdk.AgentMetricType {
|
||||
switch typ {
|
||||
case clientmetric.TypeGauge:
|
||||
return agentsdk.AgentMetricTypeGauge
|
||||
case clientmetric.TypeCounter:
|
||||
return agentsdk.AgentMetricTypeCounter
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown metric type: %d", typ))
|
||||
}
|
||||
}
|
||||
+32
-35
@@ -18,54 +18,51 @@ import (
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
//nolint:paralleltest // Non-parallel subtest.
|
||||
// TestReap checks that's the reaper is successfully reaping
|
||||
// exited processes and passing the PIDs through the shared
|
||||
// channel.
|
||||
//
|
||||
//nolint:paralleltest
|
||||
func TestReap(t *testing.T) {
|
||||
// Don't run the reaper test in CI. It does weird
|
||||
// things like forkexecing which may have unintended
|
||||
// consequences in CI.
|
||||
if _, ok := os.LookupEnv("CI"); ok {
|
||||
if testutil.InCI() {
|
||||
t.Skip("Detected CI, skipping reaper tests")
|
||||
}
|
||||
|
||||
// OK checks that's the reaper is successfully reaping
|
||||
// exited processes and passing the PIDs through the shared
|
||||
// channel.
|
||||
pids := make(reap.PidCh, 1)
|
||||
err := reaper.ForkReap(
|
||||
reaper.WithPIDCallback(pids),
|
||||
// Provide some argument that immediately exits.
|
||||
reaper.WithExecArgs("/bin/sh", "-c", "exit 0"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:paralleltest // Signal handling.
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
pids := make(reap.PidCh, 1)
|
||||
err := reaper.ForkReap(
|
||||
reaper.WithPIDCallback(pids),
|
||||
// Provide some argument that immediately exits.
|
||||
reaper.WithExecArgs("/bin/sh", "-c", "exit 0"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
cmd := exec.Command("tail", "-f", "/dev/null")
|
||||
err = cmd.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
cmd := exec.Command("tail", "-f", "/dev/null")
|
||||
err = cmd.Start()
|
||||
require.NoError(t, err)
|
||||
cmd2 := exec.Command("tail", "-f", "/dev/null")
|
||||
err = cmd2.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
cmd2 := exec.Command("tail", "-f", "/dev/null")
|
||||
err = cmd2.Start()
|
||||
require.NoError(t, err)
|
||||
err = cmd.Process.Kill()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = cmd.Process.Kill()
|
||||
require.NoError(t, err)
|
||||
err = cmd2.Process.Kill()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = cmd2.Process.Kill()
|
||||
require.NoError(t, err)
|
||||
expectedPIDs := []int{cmd.Process.Pid, cmd2.Process.Pid}
|
||||
|
||||
expectedPIDs := []int{cmd.Process.Pid, cmd2.Process.Pid}
|
||||
|
||||
for i := 0; i < len(expectedPIDs); i++ {
|
||||
select {
|
||||
case <-time.After(testutil.WaitShort):
|
||||
t.Fatalf("Timed out waiting for process")
|
||||
case pid := <-pids:
|
||||
require.Contains(t, expectedPIDs, pid)
|
||||
}
|
||||
for i := 0; i < len(expectedPIDs); i++ {
|
||||
select {
|
||||
case <-time.After(testutil.WaitShort):
|
||||
t.Fatalf("Timed out waiting for process")
|
||||
case pid := <-pids:
|
||||
require.Contains(t, expectedPIDs, pid)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:paralleltest // Signal handling.
|
||||
@@ -73,7 +70,7 @@ func TestReapInterrupt(t *testing.T) {
|
||||
// Don't run the reaper test in CI. It does weird
|
||||
// things like forkexecing which may have unintended
|
||||
// consequences in CI.
|
||||
if _, ok := os.LookupEnv("CI"); ok {
|
||||
if testutil.InCI() {
|
||||
t.Skip("Detected CI, skipping reaper tests")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,25 @@
|
||||
package usershell
|
||||
|
||||
import "os"
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Get returns the $SHELL environment variable.
|
||||
func Get(_ string) (string, error) {
|
||||
return os.Getenv("SHELL"), nil
|
||||
func Get(username string) (string, error) {
|
||||
// This command will output "UserShell: /bin/zsh" if successful, we
|
||||
// can ignore the error since we have fallback behavior.
|
||||
out, _ := exec.Command("dscl", ".", "-read", filepath.Join("/Users", username), "UserShell").Output()
|
||||
s, ok := strings.CutPrefix(string(out), "UserShell: ")
|
||||
if ok {
|
||||
return strings.TrimSpace(s), nil
|
||||
}
|
||||
if s = os.Getenv("SHELL"); s != "" {
|
||||
return s, nil
|
||||
}
|
||||
return "", xerrors.Errorf("shell for user %q not found via dscl or in $SHELL", username)
|
||||
}
|
||||
|
||||
@@ -27,5 +27,8 @@ func Get(username string) (string, error) {
|
||||
}
|
||||
return parts[6], nil
|
||||
}
|
||||
return "", xerrors.Errorf("user %q not found in /etc/passwd", username)
|
||||
if s := os.Getenv("SHELL"); s != "" {
|
||||
return s, nil
|
||||
}
|
||||
return "", xerrors.Errorf("shell for user %q not found in /etc/passwd or $SHELL", username)
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
package usershell_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/agent/usershell"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("Has", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
shell, err := usershell.Get("root")
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, shell)
|
||||
})
|
||||
t.Run("NotFound", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := usershell.Get("notauser")
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
package usershell_test
|
||||
|
||||
import (
|
||||
"os/user"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/agent/usershell"
|
||||
)
|
||||
|
||||
//nolint:paralleltest,tparallel // This test sets an environment variable.
|
||||
func TestGet(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
t.Run("Fallback", func(t *testing.T) {
|
||||
t.Setenv("SHELL", "/bin/sh")
|
||||
|
||||
t.Run("NonExistentUser", func(t *testing.T) {
|
||||
shell, err := usershell.Get("notauser")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/bin/sh", shell)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("NoFallback", func(t *testing.T) {
|
||||
// Disable env fallback for these tests.
|
||||
t.Setenv("SHELL", "")
|
||||
|
||||
t.Run("NotFound", func(t *testing.T) {
|
||||
_, err := usershell.Get("notauser")
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("User", func(t *testing.T) {
|
||||
u, err := user.Current()
|
||||
require.NoError(t, err)
|
||||
shell, err := usershell.Get(u.Username)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, shell)
|
||||
})
|
||||
})
|
||||
}
|
||||
+190
-42
@@ -18,23 +18,36 @@ import (
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
"tailscale.com/util/clientmetric"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/sloghuman"
|
||||
"cdr.dev/slog/sloggers/slogjson"
|
||||
"cdr.dev/slog/sloggers/slogstackdriver"
|
||||
"github.com/coder/coder/agent"
|
||||
"github.com/coder/coder/agent/reaper"
|
||||
"github.com/coder/coder/buildinfo"
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/codersdk/agentsdk"
|
||||
)
|
||||
|
||||
func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
var (
|
||||
auth string
|
||||
logDir string
|
||||
pprofAddress string
|
||||
noReap bool
|
||||
sshMaxTimeout time.Duration
|
||||
auth string
|
||||
logDir string
|
||||
pprofAddress string
|
||||
noReap bool
|
||||
sshMaxTimeout time.Duration
|
||||
tailnetListenPort int64
|
||||
prometheusAddress string
|
||||
debugAddress string
|
||||
slogHumanPath string
|
||||
slogJSONPath string
|
||||
slogStackdriverPath string
|
||||
)
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "agent",
|
||||
@@ -45,19 +58,64 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
ctx, cancel := context.WithCancel(inv.Context())
|
||||
defer cancel()
|
||||
|
||||
agentPorts := map[int]string{}
|
||||
var (
|
||||
ignorePorts = map[int]string{}
|
||||
isLinux = runtime.GOOS == "linux"
|
||||
|
||||
isLinux := runtime.GOOS == "linux"
|
||||
sinks = []slog.Sink{}
|
||||
logClosers = []func() error{}
|
||||
)
|
||||
defer func() {
|
||||
for _, closer := range logClosers {
|
||||
_ = closer()
|
||||
}
|
||||
}()
|
||||
|
||||
addSinkIfProvided := func(sinkFn func(io.Writer) slog.Sink, loc string) error {
|
||||
switch loc {
|
||||
case "":
|
||||
// Do nothing.
|
||||
|
||||
case "/dev/stderr":
|
||||
sinks = append(sinks, sinkFn(inv.Stderr))
|
||||
|
||||
case "/dev/stdout":
|
||||
sinks = append(sinks, sinkFn(inv.Stdout))
|
||||
|
||||
default:
|
||||
fi, err := os.OpenFile(loc, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("open log file %q: %w", loc, err)
|
||||
}
|
||||
sinks = append(sinks, sinkFn(fi))
|
||||
logClosers = append(logClosers, fi.Close)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := addSinkIfProvided(sloghuman.Sink, slogHumanPath); err != nil {
|
||||
return xerrors.Errorf("add human sink: %w", err)
|
||||
}
|
||||
if err := addSinkIfProvided(slogjson.Sink, slogJSONPath); err != nil {
|
||||
return xerrors.Errorf("add json sink: %w", err)
|
||||
}
|
||||
if err := addSinkIfProvided(slogstackdriver.Sink, slogStackdriverPath); err != nil {
|
||||
return xerrors.Errorf("add stackdriver sink: %w", err)
|
||||
}
|
||||
|
||||
// Spawn a reaper so that we don't accumulate a ton
|
||||
// of zombie processes.
|
||||
if reaper.IsInitProcess() && !noReap && isLinux {
|
||||
logWriter := &lumberjack.Logger{
|
||||
logWriter := &lumberjackWriteCloseFixer{w: &lumberjack.Logger{
|
||||
Filename: filepath.Join(logDir, "coder-agent-init.log"),
|
||||
MaxSize: 5, // MB
|
||||
}
|
||||
// Without this, rotated logs will never be deleted.
|
||||
MaxBackups: 1,
|
||||
}}
|
||||
defer logWriter.Close()
|
||||
logger := slog.Make(sloghuman.Sink(inv.Stderr), sloghuman.Sink(logWriter)).Leveled(slog.LevelDebug)
|
||||
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := slog.Make(sinks...).Leveled(slog.LevelDebug)
|
||||
|
||||
logger.Info(ctx, "spawning reaper process")
|
||||
// Do not start a reaper on the child process. It's important
|
||||
@@ -68,7 +126,7 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
reaper.WithCatchSignals(InterruptSignals...),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "failed to reap", slog.Error(err))
|
||||
logger.Error(ctx, "agent process reaper unable to fork", slog.Error(err))
|
||||
return xerrors.Errorf("fork reap: %w", err)
|
||||
}
|
||||
|
||||
@@ -87,28 +145,29 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
ctx, stopNotify := signal.NotifyContext(ctx, InterruptSignals...)
|
||||
defer stopNotify()
|
||||
|
||||
// dumpHandler does signal handling, so we call it after the
|
||||
// DumpHandler does signal handling, so we call it after the
|
||||
// reaper.
|
||||
go dumpHandler(ctx)
|
||||
go DumpHandler(ctx)
|
||||
|
||||
ljLogger := &lumberjack.Logger{
|
||||
logWriter := &lumberjackWriteCloseFixer{w: &lumberjack.Logger{
|
||||
Filename: filepath.Join(logDir, "coder-agent.log"),
|
||||
MaxSize: 5, // MB
|
||||
}
|
||||
defer ljLogger.Close()
|
||||
logWriter := &closeWriter{w: ljLogger}
|
||||
// Without this, rotated logs will never be deleted.
|
||||
MaxBackups: 1,
|
||||
}}
|
||||
defer logWriter.Close()
|
||||
|
||||
logger := slog.Make(sloghuman.Sink(inv.Stderr), sloghuman.Sink(logWriter)).Leveled(slog.LevelDebug)
|
||||
sinks = append(sinks, sloghuman.Sink(logWriter))
|
||||
logger := slog.Make(sinks...).Leveled(slog.LevelDebug)
|
||||
|
||||
version := buildinfo.Version()
|
||||
logger.Info(ctx, "starting agent",
|
||||
logger.Info(ctx, "agent is starting now",
|
||||
slog.F("url", r.agentURL),
|
||||
slog.F("auth", auth),
|
||||
slog.F("version", version),
|
||||
)
|
||||
client := agentsdk.New(r.agentURL)
|
||||
client.SDK.Logger = logger
|
||||
client.SDK.SetLogger(logger)
|
||||
// Set a reasonable timeout so requests can't hang forever!
|
||||
// The timeout needs to be reasonably long, because requests
|
||||
// with large payloads can take a bit. e.g. startup scripts
|
||||
@@ -118,11 +177,18 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
// Enable pprof handler
|
||||
// This prevents the pprof import from being accidentally deleted.
|
||||
_ = pprof.Handler
|
||||
pprofSrvClose := serveHandler(ctx, logger, nil, pprofAddress, "pprof")
|
||||
pprofSrvClose := ServeHandler(ctx, logger, nil, pprofAddress, "pprof")
|
||||
defer pprofSrvClose()
|
||||
// Do a best effort here. If this fails, it's not a big deal.
|
||||
if port, err := urlPort(pprofAddress); err == nil {
|
||||
agentPorts[port] = "pprof"
|
||||
if port, err := extractPort(pprofAddress); err == nil {
|
||||
ignorePorts[port] = "pprof"
|
||||
}
|
||||
|
||||
if port, err := extractPort(prometheusAddress); err == nil {
|
||||
ignorePorts[port] = "prometheus"
|
||||
}
|
||||
|
||||
if port, err := extractPort(debugAddress); err == nil {
|
||||
ignorePorts[port] = "debug"
|
||||
}
|
||||
|
||||
// exchangeToken returns a session token.
|
||||
@@ -186,10 +252,13 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
return xerrors.Errorf("add executable to $PATH: %w", err)
|
||||
}
|
||||
|
||||
closer := agent.New(agent.Options{
|
||||
Client: client,
|
||||
Logger: logger,
|
||||
LogDir: logDir,
|
||||
prometheusRegistry := prometheus.NewRegistry()
|
||||
subsystem := inv.Environ.Get(agent.EnvAgentSubsystem)
|
||||
agnt := agent.New(agent.Options{
|
||||
Client: client,
|
||||
Logger: logger,
|
||||
LogDir: logDir,
|
||||
TailnetListenPort: uint16(tailnetListenPort),
|
||||
ExchangeToken: func(ctx context.Context) (string, error) {
|
||||
if exchangeToken == nil {
|
||||
return client.SDK.SessionToken(), nil
|
||||
@@ -204,11 +273,21 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
EnvironmentVariables: map[string]string{
|
||||
"GIT_ASKPASS": executablePath,
|
||||
},
|
||||
AgentPorts: agentPorts,
|
||||
IgnorePorts: ignorePorts,
|
||||
SSHMaxTimeout: sshMaxTimeout,
|
||||
Subsystem: codersdk.AgentSubsystem(subsystem),
|
||||
|
||||
PrometheusRegistry: prometheusRegistry,
|
||||
})
|
||||
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, prometheusMetricsHandler(prometheusRegistry, logger), prometheusAddress, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
|
||||
debugSrvClose := ServeHandler(ctx, logger, agnt.HTTPDebug(), debugAddress, "debug")
|
||||
defer debugSrvClose()
|
||||
|
||||
<-ctx.Done()
|
||||
return closer.Close()
|
||||
return agnt.Close()
|
||||
},
|
||||
}
|
||||
|
||||
@@ -242,18 +321,64 @@ func (r *RootCmd) workspaceAgent() *clibase.Cmd {
|
||||
Value: clibase.BoolOf(&noReap),
|
||||
},
|
||||
{
|
||||
Flag: "ssh-max-timeout",
|
||||
Default: "0",
|
||||
Flag: "ssh-max-timeout",
|
||||
// tcpip.KeepaliveIdleOption = 72h + 1min (forwardTCPSockOpts() in tailnet/conn.go)
|
||||
Default: "72h",
|
||||
Env: "CODER_AGENT_SSH_MAX_TIMEOUT",
|
||||
Description: "Specify the max timeout for a SSH connection.",
|
||||
Description: "Specify the max timeout for a SSH connection, it is advisable to set it to a minimum of 60s, but no more than 72h.",
|
||||
Value: clibase.DurationOf(&sshMaxTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "tailnet-listen-port",
|
||||
Default: "0",
|
||||
Env: "CODER_AGENT_TAILNET_LISTEN_PORT",
|
||||
Description: "Specify a static port for Tailscale to use for listening.",
|
||||
Value: clibase.Int64Of(&tailnetListenPort),
|
||||
},
|
||||
{
|
||||
Flag: "prometheus-address",
|
||||
Default: "127.0.0.1:2112",
|
||||
Env: "CODER_AGENT_PROMETHEUS_ADDRESS",
|
||||
Value: clibase.StringOf(&prometheusAddress),
|
||||
Description: "The bind address to serve Prometheus metrics.",
|
||||
},
|
||||
{
|
||||
Flag: "debug-address",
|
||||
Default: "127.0.0.1:2113",
|
||||
Env: "CODER_AGENT_DEBUG_ADDRESS",
|
||||
Value: clibase.StringOf(&debugAddress),
|
||||
Description: "The bind address to serve a debug HTTP server.",
|
||||
},
|
||||
{
|
||||
Name: "Human Log Location",
|
||||
Description: "Output human-readable logs to a given file.",
|
||||
Flag: "log-human",
|
||||
Env: "CODER_AGENT_LOGGING_HUMAN",
|
||||
Default: "/dev/stderr",
|
||||
Value: clibase.StringOf(&slogHumanPath),
|
||||
},
|
||||
{
|
||||
Name: "JSON Log Location",
|
||||
Description: "Output JSON logs to a given file.",
|
||||
Flag: "log-json",
|
||||
Env: "CODER_AGENT_LOGGING_JSON",
|
||||
Default: "",
|
||||
Value: clibase.StringOf(&slogJSONPath),
|
||||
},
|
||||
{
|
||||
Name: "Stackdriver Log Location",
|
||||
Description: "Output Stackdriver compatible logs to a given file.",
|
||||
Flag: "log-stackdriver",
|
||||
Env: "CODER_AGENT_LOGGING_STACKDRIVER",
|
||||
Default: "",
|
||||
Value: clibase.StringOf(&slogStackdriverPath),
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func serveHandler(ctx context.Context, logger slog.Logger, handler http.Handler, addr, name string) (closeFunc func()) {
|
||||
func ServeHandler(ctx context.Context, logger slog.Logger, handler http.Handler, addr, name string) (closeFunc func()) {
|
||||
logger.Debug(ctx, "http server listening", slog.F("addr", addr), slog.F("name", name))
|
||||
|
||||
// ReadHeaderTimeout is purposefully not enabled. It caused some issues with
|
||||
@@ -276,16 +401,16 @@ func serveHandler(ctx context.Context, logger slog.Logger, handler http.Handler,
|
||||
}
|
||||
}
|
||||
|
||||
// closeWriter is a wrapper around an io.WriteCloser that prevents
|
||||
// writes after Close. This is necessary because lumberjack will
|
||||
// re-open the file on write.
|
||||
type closeWriter struct {
|
||||
// lumberjackWriteCloseFixer is a wrapper around an io.WriteCloser that
|
||||
// prevents writes after Close. This is necessary because lumberjack
|
||||
// re-opens the file on Write.
|
||||
type lumberjackWriteCloseFixer struct {
|
||||
w io.WriteCloser
|
||||
mu sync.Mutex // Protects following.
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (c *closeWriter) Close() error {
|
||||
func (c *lumberjackWriteCloseFixer) Close() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
@@ -293,7 +418,7 @@ func (c *closeWriter) Close() error {
|
||||
return c.w.Close()
|
||||
}
|
||||
|
||||
func (c *closeWriter) Write(p []byte) (int, error) {
|
||||
func (c *lumberjackWriteCloseFixer) Write(p []byte) (int, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
@@ -327,10 +452,33 @@ func urlPort(u string) (int, error) {
|
||||
return -1, xerrors.Errorf("invalid url %q: %w", u, err)
|
||||
}
|
||||
if parsed.Port() != "" {
|
||||
port, err := strconv.ParseInt(parsed.Port(), 10, 64)
|
||||
if err == nil && port > 0 {
|
||||
port, err := strconv.ParseUint(parsed.Port(), 10, 16)
|
||||
if err == nil && port > 0 && port < 1<<16 {
|
||||
return int(port), nil
|
||||
}
|
||||
}
|
||||
return -1, xerrors.Errorf("invalid port: %s", u)
|
||||
}
|
||||
|
||||
func prometheusMetricsHandler(prometheusRegistry *prometheus.Registry, logger slog.Logger) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
|
||||
// Based on: https://github.com/tailscale/tailscale/blob/280255acae604796a1113861f5a84e6fa2dc6121/ipn/localapi/localapi.go#L489
|
||||
clientmetric.WritePrometheusExpositionFormat(w)
|
||||
|
||||
metricFamilies, err := prometheusRegistry.Gather()
|
||||
if err != nil {
|
||||
logger.Error(context.Background(), "Prometheus handler can't gather metric families", slog.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, metricFamily := range metricFamilies {
|
||||
_, err = expfmt.MetricFamilyToText(w, metricFamily)
|
||||
if err != nil {
|
||||
logger.Error(context.Background(), "expfmt.MetricFamilyToText failed", slog.Error(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -46,6 +46,12 @@ func Test_extractPort(t *testing.T) {
|
||||
urlString: "6060",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "127.0.0.1",
|
||||
urlString: "127.0.0.1:2113",
|
||||
want: 2113,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
|
||||
+51
-9
@@ -12,8 +12,10 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/agent"
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/provisioner/echo"
|
||||
"github.com/coder/coder/provisionersdk/proto"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
@@ -51,7 +53,8 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
pty.ExpectMatch("starting agent")
|
||||
ctx := inv.Context()
|
||||
pty.ExpectMatchContext(ctx, "agent is starting now")
|
||||
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
|
||||
@@ -97,8 +100,7 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
//nolint:revive,staticcheck
|
||||
context.WithValue(inv.Context(), "azure-client", metadataClient),
|
||||
)
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
ctx := inv.Context()
|
||||
clitest.Start(t, inv)
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, workspace.ID)
|
||||
@@ -110,7 +112,7 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
dialer, err := client.DialWorkspaceAgent(ctx, resources[0].Agents[0].ID, nil)
|
||||
require.NoError(t, err)
|
||||
defer dialer.Close()
|
||||
require.True(t, dialer.AwaitReachable(context.Background()))
|
||||
require.True(t, dialer.AwaitReachable(ctx))
|
||||
})
|
||||
|
||||
t.Run("AWS", func(t *testing.T) {
|
||||
@@ -151,17 +153,18 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
context.WithValue(inv.Context(), "aws-client", metadataClient),
|
||||
)
|
||||
clitest.Start(t, inv)
|
||||
ctx := inv.Context()
|
||||
coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
workspace, err := client.Workspace(inv.Context(), workspace.ID)
|
||||
workspace, err := client.Workspace(ctx, workspace.ID)
|
||||
require.NoError(t, err)
|
||||
resources := workspace.LatestBuild.Resources
|
||||
if assert.NotEmpty(t, resources) && assert.NotEmpty(t, resources[0].Agents) {
|
||||
assert.NotEmpty(t, resources[0].Agents[0].Version)
|
||||
}
|
||||
dialer, err := client.DialWorkspaceAgent(inv.Context(), resources[0].Agents[0].ID, nil)
|
||||
dialer, err := client.DialWorkspaceAgent(ctx, resources[0].Agents[0].ID, nil)
|
||||
require.NoError(t, err)
|
||||
defer dialer.Close()
|
||||
require.True(t, dialer.AwaitReachable(context.Background()))
|
||||
require.True(t, dialer.AwaitReachable(ctx))
|
||||
})
|
||||
|
||||
t.Run("GoogleCloud", func(t *testing.T) {
|
||||
@@ -202,7 +205,7 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
clitest.Start(t,
|
||||
inv.WithContext(
|
||||
//nolint:revive,staticcheck
|
||||
context.WithValue(context.Background(), "gcp-client", metadataClient),
|
||||
context.WithValue(inv.Context(), "gcp-client", metadataClient),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -218,7 +221,7 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
dialer, err := client.DialWorkspaceAgent(ctx, resources[0].Agents[0].ID, nil)
|
||||
require.NoError(t, err)
|
||||
defer dialer.Close()
|
||||
require.True(t, dialer.AwaitReachable(context.Background()))
|
||||
require.True(t, dialer.AwaitReachable(ctx))
|
||||
sshClient, err := dialer.SSHClient(ctx)
|
||||
require.NoError(t, err)
|
||||
defer sshClient.Close()
|
||||
@@ -235,4 +238,43 @@ func TestWorkspaceAgent(t *testing.T) {
|
||||
_, err = uuid.Parse(strings.TrimSpace(string(token)))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("PostStartup", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
authToken := uuid.NewString()
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(authToken),
|
||||
})
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
logDir := t.TempDir()
|
||||
inv, _ := clitest.New(t,
|
||||
"agent",
|
||||
"--auth", "token",
|
||||
"--agent-token", authToken,
|
||||
"--agent-url", client.URL.String(),
|
||||
"--log-dir", logDir,
|
||||
)
|
||||
// Set the subsystem for the agent.
|
||||
inv.Environ.Set(agent.EnvAgentSubsystem, string(codersdk.AgentSubsystemEnvbox))
|
||||
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
|
||||
clitest.Start(t, inv)
|
||||
pty.ExpectMatchContext(inv.Context(), "agent is starting now")
|
||||
|
||||
resources := coderdtest.AwaitWorkspaceAgents(t, client, workspace.ID)
|
||||
require.Len(t, resources, 1)
|
||||
require.Len(t, resources[0].Agents, 1)
|
||||
require.Equal(t, codersdk.AgentSubsystemEnvbox, resources[0].Agents[0].Subsystem)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -14,15 +14,10 @@ import (
|
||||
|
||||
// Group describes a hierarchy of groups that an option or command belongs to.
|
||||
type Group struct {
|
||||
Parent *Group `json:"parent,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Children []Group `json:"children,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
func (g *Group) AddChild(child Group) {
|
||||
child.Parent = g
|
||||
g.Children = append(g.Children, child)
|
||||
Parent *Group `json:"parent,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
YAML string `json:"yaml,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// Ancestry returns the group and all of its parents, in order.
|
||||
|
||||
+124
-92
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Cmd describes an executable command.
|
||||
@@ -76,10 +77,8 @@ func (c *Cmd) PrepareAll() error {
|
||||
}
|
||||
var merr error
|
||||
|
||||
slices.SortFunc(c.Options, func(a, b Option) bool {
|
||||
return a.Flag < b.Flag
|
||||
})
|
||||
for _, opt := range c.Options {
|
||||
for i := range c.Options {
|
||||
opt := &c.Options[i]
|
||||
if opt.Name == "" {
|
||||
switch {
|
||||
case opt.Flag != "":
|
||||
@@ -102,6 +101,10 @@ func (c *Cmd) PrepareAll() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(c.Options, func(a, b Option) bool {
|
||||
return a.Name < b.Name
|
||||
})
|
||||
slices.SortFunc(c.Children, func(a, b *Cmd) bool {
|
||||
return a.Name() < b.Name()
|
||||
})
|
||||
@@ -142,6 +145,16 @@ func (c *Cmd) FullUsage() string {
|
||||
return strings.Join(uses, " ")
|
||||
}
|
||||
|
||||
// FullOptions returns the options of the command and its parents.
|
||||
func (c *Cmd) FullOptions() OptionSet {
|
||||
var opts OptionSet
|
||||
if c.Parent != nil {
|
||||
opts = append(opts, c.Parent.FullOptions()...)
|
||||
}
|
||||
opts = append(opts, c.Options...)
|
||||
return opts
|
||||
}
|
||||
|
||||
// Invoke creates a new invocation of the command, with
|
||||
// stdio discarded.
|
||||
//
|
||||
@@ -172,8 +185,8 @@ type Invocation struct {
|
||||
|
||||
// WithOS returns the invocation as a main package, filling in the invocation's unset
|
||||
// fields with OS defaults.
|
||||
func (i *Invocation) WithOS() *Invocation {
|
||||
return i.with(func(i *Invocation) {
|
||||
func (inv *Invocation) WithOS() *Invocation {
|
||||
return inv.with(func(i *Invocation) {
|
||||
i.Stdout = os.Stdout
|
||||
i.Stderr = os.Stderr
|
||||
i.Stdin = os.Stdin
|
||||
@@ -182,18 +195,18 @@ func (i *Invocation) WithOS() *Invocation {
|
||||
})
|
||||
}
|
||||
|
||||
func (i *Invocation) Context() context.Context {
|
||||
if i.ctx == nil {
|
||||
func (inv *Invocation) Context() context.Context {
|
||||
if inv.ctx == nil {
|
||||
return context.Background()
|
||||
}
|
||||
return i.ctx
|
||||
return inv.ctx
|
||||
}
|
||||
|
||||
func (i *Invocation) ParsedFlags() *pflag.FlagSet {
|
||||
if i.parsedFlags == nil {
|
||||
func (inv *Invocation) ParsedFlags() *pflag.FlagSet {
|
||||
if inv.parsedFlags == nil {
|
||||
panic("flags not parsed, has Run() been called?")
|
||||
}
|
||||
return i.parsedFlags
|
||||
return inv.parsedFlags
|
||||
}
|
||||
|
||||
type runState struct {
|
||||
@@ -218,30 +231,8 @@ func copyFlagSetWithout(fs *pflag.FlagSet, without string) *pflag.FlagSet {
|
||||
// run recursively executes the command and its children.
|
||||
// allArgs is wired through the stack so that global flags can be accepted
|
||||
// anywhere in the command invocation.
|
||||
func (i *Invocation) run(state *runState) error {
|
||||
err := i.Command.Options.SetDefaults()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting defaults: %w", err)
|
||||
}
|
||||
|
||||
// If we set the Default of an array but later see a flag for it, we
|
||||
// don't want to append, we want to replace. So, we need to keep the state
|
||||
// of defaulted array options.
|
||||
defaultedArrays := make(map[string]int)
|
||||
for _, opt := range i.Command.Options {
|
||||
sv, ok := opt.Value.(pflag.SliceValue)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if opt.Flag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
defaultedArrays[opt.Flag] = len(sv.GetSlice())
|
||||
}
|
||||
|
||||
err = i.Command.Options.ParseEnv(i.Environ)
|
||||
func (inv *Invocation) run(state *runState) error {
|
||||
err := inv.Command.Options.ParseEnv(inv.Environ)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing env: %w", err)
|
||||
}
|
||||
@@ -249,8 +240,8 @@ func (i *Invocation) run(state *runState) error {
|
||||
// Now the fun part, argument parsing!
|
||||
|
||||
children := make(map[string]*Cmd)
|
||||
for _, child := range i.Command.Children {
|
||||
child.Parent = i.Command
|
||||
for _, child := range inv.Command.Children {
|
||||
child.Parent = inv.Command
|
||||
for _, name := range append(child.Aliases, child.Name()) {
|
||||
if _, ok := children[name]; ok {
|
||||
return xerrors.Errorf("duplicate command name: %s", name)
|
||||
@@ -259,49 +250,65 @@ func (i *Invocation) run(state *runState) error {
|
||||
}
|
||||
}
|
||||
|
||||
if i.parsedFlags == nil {
|
||||
i.parsedFlags = pflag.NewFlagSet(i.Command.Name(), pflag.ContinueOnError)
|
||||
if inv.parsedFlags == nil {
|
||||
inv.parsedFlags = pflag.NewFlagSet(inv.Command.Name(), pflag.ContinueOnError)
|
||||
// We handle Usage ourselves.
|
||||
i.parsedFlags.Usage = func() {}
|
||||
inv.parsedFlags.Usage = func() {}
|
||||
}
|
||||
|
||||
// If we find a duplicate flag, we want the deeper command's flag to override
|
||||
// the shallow one. Unfortunately, pflag has no way to remove a flag, so we
|
||||
// have to create a copy of the flagset without a value.
|
||||
i.Command.Options.FlagSet().VisitAll(func(f *pflag.Flag) {
|
||||
if i.parsedFlags.Lookup(f.Name) != nil {
|
||||
i.parsedFlags = copyFlagSetWithout(i.parsedFlags, f.Name)
|
||||
inv.Command.Options.FlagSet().VisitAll(func(f *pflag.Flag) {
|
||||
if inv.parsedFlags.Lookup(f.Name) != nil {
|
||||
inv.parsedFlags = copyFlagSetWithout(inv.parsedFlags, f.Name)
|
||||
}
|
||||
i.parsedFlags.AddFlag(f)
|
||||
inv.parsedFlags.AddFlag(f)
|
||||
})
|
||||
|
||||
var parsedArgs []string
|
||||
|
||||
if !i.Command.RawArgs {
|
||||
if !inv.Command.RawArgs {
|
||||
// Flag parsing will fail on intermediate commands in the command tree,
|
||||
// so we check the error after looking for a child command.
|
||||
state.flagParseErr = i.parsedFlags.Parse(state.allArgs)
|
||||
parsedArgs = i.parsedFlags.Args()
|
||||
state.flagParseErr = inv.parsedFlags.Parse(state.allArgs)
|
||||
parsedArgs = inv.parsedFlags.Args()
|
||||
}
|
||||
|
||||
i.parsedFlags.VisitAll(func(f *pflag.Flag) {
|
||||
i, ok := defaultedArrays[f.Name]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Set value sources for flags.
|
||||
for i, opt := range inv.Command.Options {
|
||||
if fl := inv.parsedFlags.Lookup(opt.Flag); fl != nil && fl.Changed {
|
||||
inv.Command.Options[i].ValueSource = ValueSourceFlag
|
||||
}
|
||||
}
|
||||
|
||||
if !f.Changed {
|
||||
return
|
||||
}
|
||||
// Read YAML configs, if any.
|
||||
for _, opt := range inv.Command.Options {
|
||||
path, ok := opt.Value.(*YAMLConfigPath)
|
||||
if !ok || path.String() == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
sv, ok := f.Value.(pflag.SliceValue)
|
||||
if !ok {
|
||||
panic("defaulted array option is not a slice value")
|
||||
}
|
||||
err := sv.Replace(sv.GetSlice()[i:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
byt, err := os.ReadFile(path.String())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading yaml: %w", err)
|
||||
}
|
||||
|
||||
var n yaml.Node
|
||||
err = yaml.Unmarshal(byt, &n)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decoding yaml: %w", err)
|
||||
}
|
||||
|
||||
err = inv.Command.Options.UnmarshalYAML(&n)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("applying yaml: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = inv.Command.Options.SetDefaults()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("setting defaults: %w", err)
|
||||
}
|
||||
|
||||
// Run child command if found (next child only)
|
||||
@@ -310,64 +317,76 @@ func (i *Invocation) run(state *runState) error {
|
||||
if len(parsedArgs) > state.commandDepth {
|
||||
nextArg := parsedArgs[state.commandDepth]
|
||||
if child, ok := children[nextArg]; ok {
|
||||
child.Parent = i.Command
|
||||
i.Command = child
|
||||
child.Parent = inv.Command
|
||||
inv.Command = child
|
||||
state.commandDepth++
|
||||
return i.run(state)
|
||||
return inv.run(state)
|
||||
}
|
||||
}
|
||||
|
||||
// Flag parse errors are irrelevant for raw args commands.
|
||||
if !i.Command.RawArgs && state.flagParseErr != nil && !errors.Is(state.flagParseErr, pflag.ErrHelp) {
|
||||
if !inv.Command.RawArgs && state.flagParseErr != nil && !errors.Is(state.flagParseErr, pflag.ErrHelp) {
|
||||
return xerrors.Errorf(
|
||||
"parsing flags (%v) for %q: %w",
|
||||
state.allArgs,
|
||||
i.Command.FullName(), state.flagParseErr,
|
||||
inv.Command.FullName(), state.flagParseErr,
|
||||
)
|
||||
}
|
||||
|
||||
if i.Command.RawArgs {
|
||||
// All options should be set. Check all required options have sources,
|
||||
// meaning they were set by the user in some way (env, flag, etc).
|
||||
var missing []string
|
||||
for _, opt := range inv.Command.Options {
|
||||
if opt.Required && opt.ValueSource == ValueSourceNone {
|
||||
missing = append(missing, opt.Flag)
|
||||
}
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
return xerrors.Errorf("Missing values for the required flags: %s", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
if inv.Command.RawArgs {
|
||||
// If we're at the root command, then the name is omitted
|
||||
// from the arguments, so we can just use the entire slice.
|
||||
if state.commandDepth == 0 {
|
||||
i.Args = state.allArgs
|
||||
inv.Args = state.allArgs
|
||||
} else {
|
||||
argPos, err := findArg(i.Command.Name(), state.allArgs, i.parsedFlags)
|
||||
argPos, err := findArg(inv.Command.Name(), state.allArgs, inv.parsedFlags)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
i.Args = state.allArgs[argPos+1:]
|
||||
inv.Args = state.allArgs[argPos+1:]
|
||||
}
|
||||
} else {
|
||||
// In non-raw-arg mode, we want to skip over flags.
|
||||
i.Args = parsedArgs[state.commandDepth:]
|
||||
inv.Args = parsedArgs[state.commandDepth:]
|
||||
}
|
||||
|
||||
mw := i.Command.Middleware
|
||||
mw := inv.Command.Middleware
|
||||
if mw == nil {
|
||||
mw = Chain()
|
||||
}
|
||||
|
||||
ctx := i.ctx
|
||||
ctx := inv.ctx
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
i = i.WithContext(ctx)
|
||||
inv = inv.WithContext(ctx)
|
||||
|
||||
if i.Command.Handler == nil || errors.Is(state.flagParseErr, pflag.ErrHelp) {
|
||||
if i.Command.HelpHandler == nil {
|
||||
return xerrors.Errorf("no handler or help for command %s", i.Command.FullName())
|
||||
if inv.Command.Handler == nil || errors.Is(state.flagParseErr, pflag.ErrHelp) {
|
||||
if inv.Command.HelpHandler == nil {
|
||||
return xerrors.Errorf("no handler or help for command %s", inv.Command.FullName())
|
||||
}
|
||||
return i.Command.HelpHandler(i)
|
||||
return inv.Command.HelpHandler(inv)
|
||||
}
|
||||
|
||||
err = mw(i.Command.Handler)(i)
|
||||
err = mw(inv.Command.Handler)(inv)
|
||||
if err != nil {
|
||||
return &RunCommandError{
|
||||
Cmd: i.Command,
|
||||
Cmd: inv.Command,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
@@ -430,33 +449,46 @@ func findArg(want string, args []string, fs *pflag.FlagSet) (int, error) {
|
||||
// If two command share a flag name, the first command wins.
|
||||
//
|
||||
//nolint:revive
|
||||
func (i *Invocation) Run() (err error) {
|
||||
func (inv *Invocation) Run() (err error) {
|
||||
defer func() {
|
||||
// Pflag is panicky, so additional context is helpful in tests.
|
||||
if flag.Lookup("test.v") == nil {
|
||||
return
|
||||
}
|
||||
if r := recover(); r != nil {
|
||||
err = xerrors.Errorf("panic recovered for %s: %v", i.Command.FullName(), r)
|
||||
err = xerrors.Errorf("panic recovered for %s: %v", inv.Command.FullName(), r)
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
err = i.run(&runState{
|
||||
allArgs: i.Args,
|
||||
// We close Stdin to prevent deadlocks, e.g. when the command
|
||||
// has ended but an io.Copy is still reading from Stdin.
|
||||
defer func() {
|
||||
if inv.Stdin == nil {
|
||||
return
|
||||
}
|
||||
rc, ok := inv.Stdin.(io.ReadCloser)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
e := rc.Close()
|
||||
err = errors.Join(err, e)
|
||||
}()
|
||||
err = inv.run(&runState{
|
||||
allArgs: inv.Args,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// WithContext returns a copy of the Invocation with the given context.
|
||||
func (i *Invocation) WithContext(ctx context.Context) *Invocation {
|
||||
return i.with(func(i *Invocation) {
|
||||
func (inv *Invocation) WithContext(ctx context.Context) *Invocation {
|
||||
return inv.with(func(i *Invocation) {
|
||||
i.ctx = ctx
|
||||
})
|
||||
}
|
||||
|
||||
// with returns a copy of the Invocation with the given function applied.
|
||||
func (i *Invocation) with(fn func(*Invocation)) *Invocation {
|
||||
i2 := *i
|
||||
func (inv *Invocation) with(fn func(*Invocation)) *Invocation {
|
||||
i2 := *inv
|
||||
fn(&i2)
|
||||
return &i2
|
||||
}
|
||||
|
||||
+217
-3
@@ -3,6 +3,8 @@ package clibase_test
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -36,6 +38,8 @@ func TestCommand(t *testing.T) {
|
||||
verbose bool
|
||||
lower bool
|
||||
prefix string
|
||||
reqBool bool
|
||||
reqStr string
|
||||
)
|
||||
return &clibase.Cmd{
|
||||
Use: "root [subcommand]",
|
||||
@@ -52,6 +56,34 @@ func TestCommand(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Children: []*clibase.Cmd{
|
||||
{
|
||||
Use: "required-flag --req-bool=true --req-string=foo",
|
||||
Short: "Example with required flags",
|
||||
Options: clibase.OptionSet{
|
||||
clibase.Option{
|
||||
Name: "req-bool",
|
||||
Flag: "req-bool",
|
||||
Value: clibase.BoolOf(&reqBool),
|
||||
Required: true,
|
||||
},
|
||||
clibase.Option{
|
||||
Name: "req-string",
|
||||
Flag: "req-string",
|
||||
Value: clibase.Validate(clibase.StringOf(&reqStr), func(value *clibase.String) error {
|
||||
ok := strings.Contains(value.String(), " ")
|
||||
if !ok {
|
||||
return xerrors.Errorf("string must contain a space")
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
Handler: func(i *clibase.Invocation) error {
|
||||
_, _ = i.Stdout.Write([]byte(fmt.Sprintf("%s-%t", reqStr, reqBool)))
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Use: "toupper [word]",
|
||||
Short: "Converts a word to upper case",
|
||||
@@ -66,8 +98,8 @@ func TestCommand(t *testing.T) {
|
||||
Value: clibase.BoolOf(&lower),
|
||||
},
|
||||
},
|
||||
Handler: (func(i *clibase.Invocation) error {
|
||||
i.Stdout.Write([]byte(prefix))
|
||||
Handler: func(i *clibase.Invocation) error {
|
||||
_, _ = i.Stdout.Write([]byte(prefix))
|
||||
w := i.Args[0]
|
||||
if lower {
|
||||
w = strings.ToLower(w)
|
||||
@@ -83,7 +115,7 @@ func TestCommand(t *testing.T) {
|
||||
i.Stdout.Write([]byte("!!!"))
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -211,6 +243,60 @@ func TestCommand(t *testing.T) {
|
||||
fio := fakeIO(i)
|
||||
require.Error(t, i.Run(), fio.Stdout.String())
|
||||
})
|
||||
|
||||
t.Run("RequiredFlagsMissing", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
i := cmd().Invoke(
|
||||
"required-flag",
|
||||
)
|
||||
fio := fakeIO(i)
|
||||
err := i.Run()
|
||||
require.Error(t, err, fio.Stdout.String())
|
||||
require.ErrorContains(t, err, "Missing values")
|
||||
})
|
||||
|
||||
t.Run("RequiredFlagsMissingBool", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
i := cmd().Invoke(
|
||||
"required-flag", "--req-string", "foo bar",
|
||||
)
|
||||
fio := fakeIO(i)
|
||||
err := i.Run()
|
||||
require.Error(t, err, fio.Stdout.String())
|
||||
require.ErrorContains(t, err, "Missing values for the required flags: req-bool")
|
||||
})
|
||||
|
||||
t.Run("RequiredFlagsMissingString", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
i := cmd().Invoke(
|
||||
"required-flag", "--req-bool", "true",
|
||||
)
|
||||
fio := fakeIO(i)
|
||||
err := i.Run()
|
||||
require.Error(t, err, fio.Stdout.String())
|
||||
require.ErrorContains(t, err, "Missing values for the required flags: req-string")
|
||||
})
|
||||
|
||||
t.Run("RequiredFlagsInvalid", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
i := cmd().Invoke(
|
||||
"required-flag", "--req-string", "nospace",
|
||||
)
|
||||
fio := fakeIO(i)
|
||||
err := i.Run()
|
||||
require.Error(t, err, fio.Stdout.String())
|
||||
require.ErrorContains(t, err, "string must contain a space")
|
||||
})
|
||||
|
||||
t.Run("RequiredFlagsOK", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
i := cmd().Invoke(
|
||||
"required-flag", "--req-bool", "true", "--req-string", "foo bar",
|
||||
)
|
||||
fio := fakeIO(i)
|
||||
err := i.Run()
|
||||
require.NoError(t, err, fio.Stdout.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestCommand_DeepNest(t *testing.T) {
|
||||
@@ -247,6 +333,7 @@ func TestCommand_FlagOverride(t *testing.T) {
|
||||
Use: "1",
|
||||
Options: clibase.OptionSet{
|
||||
{
|
||||
Name: "flag",
|
||||
Flag: "f",
|
||||
Value: clibase.DiscardValue,
|
||||
},
|
||||
@@ -256,6 +343,7 @@ func TestCommand_FlagOverride(t *testing.T) {
|
||||
Use: "2",
|
||||
Options: clibase.OptionSet{
|
||||
{
|
||||
Name: "flag",
|
||||
Flag: "f",
|
||||
Value: clibase.StringOf(&flag),
|
||||
},
|
||||
@@ -503,3 +591,129 @@ func TestCommand_SliceFlags(t *testing.T) {
|
||||
err = cmd("bad", "bad", "bad").Invoke().Run()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCommand_EmptySlice(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cmd := func(want ...string) *clibase.Cmd {
|
||||
var got []string
|
||||
return &clibase.Cmd{
|
||||
Use: "root",
|
||||
Options: clibase.OptionSet{
|
||||
{
|
||||
Name: "arr",
|
||||
Flag: "arr",
|
||||
Default: "def,def,def",
|
||||
Env: "ARR",
|
||||
Value: clibase.StringArrayOf(&got),
|
||||
},
|
||||
},
|
||||
Handler: (func(i *clibase.Invocation) error {
|
||||
require.Equal(t, want, got)
|
||||
return nil
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Base-case, uses default.
|
||||
err := cmd("def", "def", "def").Invoke().Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Empty-env uses default, too.
|
||||
inv := cmd("def", "def", "def").Invoke()
|
||||
inv.Environ.Set("ARR", "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset to nothing at all via flag.
|
||||
inv = cmd().Invoke("--arr", "")
|
||||
inv.Environ.Set("ARR", "cant see")
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset to a specific value with flag.
|
||||
inv = cmd("great").Invoke("--arr", "great")
|
||||
inv.Environ.Set("ARR", "")
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCommand_DefaultsOverride(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
test := func(name string, want string, fn func(t *testing.T, inv *clibase.Invocation)) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
got string
|
||||
config clibase.YAMLConfigPath
|
||||
)
|
||||
cmd := &clibase.Cmd{
|
||||
Options: clibase.OptionSet{
|
||||
{
|
||||
Name: "url",
|
||||
Flag: "url",
|
||||
Default: "def.com",
|
||||
Env: "URL",
|
||||
Value: clibase.StringOf(&got),
|
||||
YAML: "url",
|
||||
},
|
||||
{
|
||||
Name: "config",
|
||||
Flag: "config",
|
||||
Default: "",
|
||||
Value: &config,
|
||||
},
|
||||
},
|
||||
Handler: (func(i *clibase.Invocation) error {
|
||||
_, _ = fmt.Fprintf(i.Stdout, "%s", got)
|
||||
return nil
|
||||
}),
|
||||
}
|
||||
|
||||
inv := cmd.Invoke()
|
||||
stdio := fakeIO(inv)
|
||||
fn(t, inv)
|
||||
err := inv.Run()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, want, stdio.Stdout.String())
|
||||
})
|
||||
}
|
||||
|
||||
test("DefaultOverNothing", "def.com", func(t *testing.T, inv *clibase.Invocation) {})
|
||||
|
||||
test("FlagOverDefault", "good.com", func(t *testing.T, inv *clibase.Invocation) {
|
||||
inv.Args = []string{"--url", "good.com"}
|
||||
})
|
||||
|
||||
test("EnvOverDefault", "good.com", func(t *testing.T, inv *clibase.Invocation) {
|
||||
inv.Environ.Set("URL", "good.com")
|
||||
})
|
||||
|
||||
test("FlagOverEnv", "good.com", func(t *testing.T, inv *clibase.Invocation) {
|
||||
inv.Environ.Set("URL", "bad.com")
|
||||
inv.Args = []string{"--url", "good.com"}
|
||||
})
|
||||
|
||||
test("FlagOverYAML", "good.com", func(t *testing.T, inv *clibase.Invocation) {
|
||||
fi, err := os.CreateTemp(t.TempDir(), "config.yaml")
|
||||
require.NoError(t, err)
|
||||
defer fi.Close()
|
||||
|
||||
_, err = fi.WriteString("url: bad.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
inv.Args = []string{"--config", fi.Name(), "--url", "good.com"}
|
||||
})
|
||||
|
||||
test("YAMLOverDefault", "good.com", func(t *testing.T, inv *clibase.Invocation) {
|
||||
fi, err := os.CreateTemp(t.TempDir(), "config.yaml")
|
||||
require.NoError(t, err)
|
||||
defer fi.Close()
|
||||
|
||||
_, err = fi.WriteString("url: good.com")
|
||||
require.NoError(t, err)
|
||||
|
||||
inv.Args = []string{"--config", fi.Name()}
|
||||
})
|
||||
}
|
||||
|
||||
+66
-4
@@ -2,16 +2,31 @@ package clibase
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type ValueSource string
|
||||
|
||||
const (
|
||||
ValueSourceNone ValueSource = ""
|
||||
ValueSourceFlag ValueSource = "flag"
|
||||
ValueSourceEnv ValueSource = "env"
|
||||
ValueSourceYAML ValueSource = "yaml"
|
||||
ValueSourceDefault ValueSource = "default"
|
||||
)
|
||||
|
||||
// Option is a configuration option for a CLI application.
|
||||
type Option struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
// Required means this value must be set by some means. It requires
|
||||
// `ValueSource != ValueSourceNone`
|
||||
// If `Default` is set, then `Required` is ignored.
|
||||
Required bool `json:"required,omitempty"`
|
||||
|
||||
// Flag is the long name of the flag used to configure this option. If unset,
|
||||
// flag configuring is disabled.
|
||||
@@ -46,6 +61,19 @@ type Option struct {
|
||||
UseInstead []Option `json:"use_instead,omitempty"`
|
||||
|
||||
Hidden bool `json:"hidden,omitempty"`
|
||||
|
||||
ValueSource ValueSource `json:"value_source,omitempty"`
|
||||
}
|
||||
|
||||
func (o Option) YAMLPath() string {
|
||||
if o.YAML == "" {
|
||||
return ""
|
||||
}
|
||||
var gs []string
|
||||
for _, g := range o.Group.Ancestry() {
|
||||
gs = append(gs, g.YAML)
|
||||
}
|
||||
return strings.Join(append(gs, o.YAML), ".")
|
||||
}
|
||||
|
||||
// OptionSet is a group of options that can be applied to a command.
|
||||
@@ -56,6 +84,17 @@ func (s *OptionSet) Add(opts ...Option) {
|
||||
*s = append(*s, opts...)
|
||||
}
|
||||
|
||||
// Filter will only return options that match the given filter. (return true)
|
||||
func (s OptionSet) Filter(filter func(opt Option) bool) OptionSet {
|
||||
cpy := make(OptionSet, 0)
|
||||
for _, opt := range s {
|
||||
if filter(opt) {
|
||||
cpy = append(cpy, opt)
|
||||
}
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
// FlagSet returns a pflag.FlagSet for the OptionSet.
|
||||
func (s *OptionSet) FlagSet() *pflag.FlagSet {
|
||||
if s == nil {
|
||||
@@ -115,7 +154,7 @@ func (s *OptionSet) ParseEnv(vs []EnvVar) error {
|
||||
envs[v.Name] = v.Value
|
||||
}
|
||||
|
||||
for _, opt := range *s {
|
||||
for i, opt := range *s {
|
||||
if opt.Env == "" {
|
||||
continue
|
||||
}
|
||||
@@ -126,10 +165,14 @@ func (s *OptionSet) ParseEnv(vs []EnvVar) error {
|
||||
// way for a user to change a Default value to an empty string from
|
||||
// the environment. Unfortunately, we have old configuration files
|
||||
// that rely on the faulty behavior.
|
||||
//
|
||||
// TODO: We should remove this hack in May 2023, when deployments
|
||||
// have had months to migrate to the new behavior.
|
||||
if !ok || envVal == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
(*s)[i].ValueSource = ValueSourceEnv
|
||||
if err := opt.Value.Set(envVal); err != nil {
|
||||
merr = multierror.Append(
|
||||
merr, xerrors.Errorf("parse %q: %w", opt.Name, err),
|
||||
@@ -140,8 +183,8 @@ func (s *OptionSet) ParseEnv(vs []EnvVar) error {
|
||||
return merr.ErrorOrNil()
|
||||
}
|
||||
|
||||
// SetDefaults sets the default values for each Option.
|
||||
// It should be called before all parsing (e.g. ParseFlags, ParseEnv).
|
||||
// SetDefaults sets the default values for each Option, skipping values
|
||||
// that already have a value source.
|
||||
func (s *OptionSet) SetDefaults() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
@@ -149,10 +192,16 @@ func (s *OptionSet) SetDefaults() error {
|
||||
|
||||
var merr *multierror.Error
|
||||
|
||||
for _, opt := range *s {
|
||||
for i, opt := range *s {
|
||||
// Skip values that may have already been set by the user.
|
||||
if opt.ValueSource != ValueSourceNone {
|
||||
continue
|
||||
}
|
||||
|
||||
if opt.Default == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if opt.Value == nil {
|
||||
merr = multierror.Append(
|
||||
merr,
|
||||
@@ -163,6 +212,7 @@ func (s *OptionSet) SetDefaults() error {
|
||||
)
|
||||
continue
|
||||
}
|
||||
(*s)[i].ValueSource = ValueSourceDefault
|
||||
if err := opt.Value.Set(opt.Default); err != nil {
|
||||
merr = multierror.Append(
|
||||
merr, xerrors.Errorf("parse %q: %w", opt.Name, err),
|
||||
@@ -171,3 +221,15 @@ func (s *OptionSet) SetDefaults() error {
|
||||
}
|
||||
return merr.ErrorOrNil()
|
||||
}
|
||||
|
||||
// ByName returns the Option with the given name, or nil if no such option
|
||||
// exists.
|
||||
func (s *OptionSet) ByName(name string) *Option {
|
||||
for i := range *s {
|
||||
opt := &(*s)[i]
|
||||
if opt.Name == name {
|
||||
return opt
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -118,4 +118,52 @@ func TestOptionSet_ParseEnv(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, "defname", workspaceName)
|
||||
})
|
||||
|
||||
t.Run("StringSlice", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var actual clibase.StringArray
|
||||
expected := []string{"foo", "bar", "baz"}
|
||||
|
||||
os := clibase.OptionSet{
|
||||
clibase.Option{
|
||||
Name: "name",
|
||||
Value: &actual,
|
||||
Env: "NAMES",
|
||||
},
|
||||
}
|
||||
|
||||
err := os.SetDefaults()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.ParseEnv([]clibase.EnvVar{
|
||||
{Name: "NAMES", Value: "foo,bar,baz"},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("StructMapStringString", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var actual clibase.Struct[map[string]string]
|
||||
expected := map[string]string{"foo": "bar", "baz": "zap"}
|
||||
|
||||
os := clibase.OptionSet{
|
||||
clibase.Option{
|
||||
Name: "labels",
|
||||
Value: &actual,
|
||||
Env: "LABELS",
|
||||
},
|
||||
}
|
||||
|
||||
err := os.SetDefaults()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.ParseEnv([]clibase.EnvVar{
|
||||
{Name: "LABELS", Value: `{"foo":"bar","baz":"zap"}`},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, expected, actual.Value)
|
||||
})
|
||||
}
|
||||
|
||||
+101
-13
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -23,6 +24,40 @@ type NoOptDefValuer interface {
|
||||
NoOptDefValue() string
|
||||
}
|
||||
|
||||
// Validator is a wrapper around a pflag.Value that allows for validation
|
||||
// of the value after or before it has been set.
|
||||
type Validator[T pflag.Value] struct {
|
||||
Value T
|
||||
// validate is called after the value is set.
|
||||
validate func(T) error
|
||||
}
|
||||
|
||||
func Validate[T pflag.Value](opt T, validate func(value T) error) *Validator[T] {
|
||||
return &Validator[T]{Value: opt, validate: validate}
|
||||
}
|
||||
|
||||
func (i *Validator[T]) String() string {
|
||||
return i.Value.String()
|
||||
}
|
||||
|
||||
func (i *Validator[T]) Set(input string) error {
|
||||
err := i.Value.Set(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i.validate != nil {
|
||||
err = i.validate(i.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Validator[T]) Type() string {
|
||||
return i.Value.Type()
|
||||
}
|
||||
|
||||
// values.go contains a standard set of value types that can be used as
|
||||
// Option Values.
|
||||
|
||||
@@ -146,6 +181,10 @@ func writeAsCSV(vals []string) string {
|
||||
}
|
||||
|
||||
func (s *StringArray) Set(v string) error {
|
||||
if v == "" {
|
||||
*s = nil
|
||||
return nil
|
||||
}
|
||||
ss, err := readAsCSV(v)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -186,23 +225,21 @@ func (d *Duration) String() string {
|
||||
return time.Duration(*d).String()
|
||||
}
|
||||
|
||||
func (d *Duration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(d.String())
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
err := json.Unmarshal(b, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.Set(s)
|
||||
}
|
||||
|
||||
func (Duration) Type() string {
|
||||
return "duration"
|
||||
}
|
||||
|
||||
func (d *Duration) MarshalYAML() (interface{}, error) {
|
||||
return yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Value: d.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalYAML(n *yaml.Node) error {
|
||||
return d.Set(n.Value)
|
||||
}
|
||||
|
||||
type URL url.URL
|
||||
|
||||
func URLOf(u *url.URL) *URL {
|
||||
@@ -223,6 +260,17 @@ func (u *URL) String() string {
|
||||
return uu.String()
|
||||
}
|
||||
|
||||
func (u *URL) MarshalYAML() (interface{}, error) {
|
||||
return yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Value: u.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (u *URL) UnmarshalYAML(n *yaml.Node) error {
|
||||
return u.Set(n.Value)
|
||||
}
|
||||
|
||||
func (u *URL) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(u.String())
|
||||
}
|
||||
@@ -286,6 +334,17 @@ func (hp *HostPort) UnmarshalJSON(b []byte) error {
|
||||
return hp.Set(s)
|
||||
}
|
||||
|
||||
func (hp *HostPort) MarshalYAML() (interface{}, error) {
|
||||
return yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Value: hp.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hp *HostPort) UnmarshalYAML(n *yaml.Node) error {
|
||||
return hp.Set(n.Value)
|
||||
}
|
||||
|
||||
func (*HostPort) Type() string {
|
||||
return "host:port"
|
||||
}
|
||||
@@ -304,10 +363,12 @@ type Struct[T any] struct {
|
||||
Value T
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (s *Struct[T]) Set(v string) error {
|
||||
return yaml.Unmarshal([]byte(v), &s.Value)
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (s *Struct[T]) String() string {
|
||||
byt, err := yaml.Marshal(s.Value)
|
||||
if err != nil {
|
||||
@@ -326,9 +387,17 @@ func (s *Struct[T]) MarshalYAML() (interface{}, error) {
|
||||
}
|
||||
|
||||
func (s *Struct[T]) UnmarshalYAML(n *yaml.Node) error {
|
||||
// HACK: for compatibility with flags, we use nil slices instead of empty
|
||||
// slices. In most cases, nil slices and empty slices are treated
|
||||
// the same, so this behavior may be removed at some point.
|
||||
if typ := reflect.TypeOf(s.Value); typ.Kind() == reflect.Slice && len(n.Content) == 0 {
|
||||
reflect.ValueOf(&s.Value).Elem().Set(reflect.Zero(typ))
|
||||
return nil
|
||||
}
|
||||
return n.Decode(&s.Value)
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (s *Struct[T]) Type() string {
|
||||
return fmt.Sprintf("struct[%T]", s.Value)
|
||||
}
|
||||
@@ -391,3 +460,22 @@ func (e *Enum) Type() string {
|
||||
func (e *Enum) String() string {
|
||||
return *e.Value
|
||||
}
|
||||
|
||||
var _ pflag.Value = (*YAMLConfigPath)(nil)
|
||||
|
||||
// YAMLConfigPath is a special value type that encodes a path to a YAML
|
||||
// configuration file where options are read from.
|
||||
type YAMLConfigPath string
|
||||
|
||||
func (p *YAMLConfigPath) Set(v string) error {
|
||||
*p = YAMLConfigPath(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *YAMLConfigPath) String() string {
|
||||
return string(*p)
|
||||
}
|
||||
|
||||
func (*YAMLConfigPath) Type() string {
|
||||
return "yaml-config-path"
|
||||
}
|
||||
|
||||
+202
-12
@@ -1,12 +1,20 @@
|
||||
package clibase
|
||||
|
||||
import (
|
||||
"github.com/iancoleman/strcase"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/go-wordwrap"
|
||||
"golang.org/x/xerrors"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var (
|
||||
_ yaml.Marshaler = new(OptionSet)
|
||||
_ yaml.Unmarshaler = new(OptionSet)
|
||||
)
|
||||
|
||||
// deepMapNode returns the mapping node at the given path,
|
||||
// creating it if it doesn't exist.
|
||||
func deepMapNode(n *yaml.Node, path []string, headComment string) *yaml.Node {
|
||||
@@ -36,27 +44,43 @@ func deepMapNode(n *yaml.Node, path []string, headComment string) *yaml.Node {
|
||||
return deepMapNode(&valueNode, path[1:], headComment)
|
||||
}
|
||||
|
||||
// ToYAML converts the option set to a YAML node, that can be
|
||||
// MarshalYAML converts the option set to a YAML node, that can be
|
||||
// converted into bytes via yaml.Marshal.
|
||||
//
|
||||
// The node is returned to enable post-processing higher up in
|
||||
// the stack.
|
||||
func (s OptionSet) ToYAML() (*yaml.Node, error) {
|
||||
//
|
||||
// It is isomorphic with FromYAML.
|
||||
func (s *OptionSet) MarshalYAML() (any, error) {
|
||||
root := yaml.Node{
|
||||
Kind: yaml.MappingNode,
|
||||
}
|
||||
|
||||
for _, opt := range s {
|
||||
for _, opt := range *s {
|
||||
if opt.YAML == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
defValue := opt.Default
|
||||
if defValue == "" {
|
||||
defValue = "<unset>"
|
||||
}
|
||||
comment := wordwrap.WrapString(
|
||||
fmt.Sprintf("%s\n(default: %s, type: %s)", opt.Description, defValue, opt.Value.Type()),
|
||||
80,
|
||||
)
|
||||
nameNode := yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Value: opt.YAML,
|
||||
HeadComment: wordwrap.WrapString(opt.Description, 80),
|
||||
HeadComment: comment,
|
||||
}
|
||||
var valueNode yaml.Node
|
||||
if m, ok := opt.Value.(yaml.Marshaler); ok {
|
||||
if opt.Value == nil {
|
||||
valueNode = yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Value: "null",
|
||||
}
|
||||
} else if m, ok := opt.Value.(yaml.Marshaler); ok {
|
||||
v, err := m.MarshalYAML()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf(
|
||||
@@ -71,21 +95,43 @@ func (s OptionSet) ToYAML() (*yaml.Node, error) {
|
||||
)
|
||||
}
|
||||
} else {
|
||||
valueNode = yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Value: opt.Value.String(),
|
||||
// The all-other types case.
|
||||
//
|
||||
// A bit of a hack, we marshal and then unmarshal to get
|
||||
// the underlying node.
|
||||
byt, err := yaml.Marshal(opt.Value)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf(
|
||||
"marshal %q: %w", opt.Name, err,
|
||||
)
|
||||
}
|
||||
|
||||
var docNode yaml.Node
|
||||
err = yaml.Unmarshal(byt, &docNode)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf(
|
||||
"unmarshal %q: %w", opt.Name, err,
|
||||
)
|
||||
}
|
||||
if len(docNode.Content) != 1 {
|
||||
return nil, xerrors.Errorf(
|
||||
"unmarshal %q: expected one node, got %d",
|
||||
opt.Name, len(docNode.Content),
|
||||
)
|
||||
}
|
||||
|
||||
valueNode = *docNode.Content[0]
|
||||
}
|
||||
var group []string
|
||||
for _, g := range opt.Group.Ancestry() {
|
||||
if g.Name == "" {
|
||||
if g.YAML == "" {
|
||||
return nil, xerrors.Errorf(
|
||||
"group name is empty for %q, groups: %+v",
|
||||
"group yaml name is empty for %q, groups: %+v",
|
||||
opt.Name,
|
||||
opt.Group,
|
||||
)
|
||||
}
|
||||
group = append(group, strcase.ToLowerCamel(g.Name))
|
||||
group = append(group, g.YAML)
|
||||
}
|
||||
var groupDesc string
|
||||
if opt.Group != nil {
|
||||
@@ -103,3 +149,147 @@ func (s OptionSet) ToYAML() (*yaml.Node, error) {
|
||||
}
|
||||
return &root, nil
|
||||
}
|
||||
|
||||
// mapYAMLNodes converts parent into a map with keys of form "group.subgroup.option"
|
||||
// and values as the corresponding YAML nodes.
|
||||
func mapYAMLNodes(parent *yaml.Node) (map[string]*yaml.Node, error) {
|
||||
if parent.Kind != yaml.MappingNode {
|
||||
return nil, xerrors.Errorf("expected mapping node, got type %v", parent.Kind)
|
||||
}
|
||||
if len(parent.Content)%2 != 0 {
|
||||
return nil, xerrors.Errorf("expected an even number of k/v pairs, got %d", len(parent.Content))
|
||||
}
|
||||
var (
|
||||
key string
|
||||
m = make(map[string]*yaml.Node, len(parent.Content)/2)
|
||||
merr error
|
||||
)
|
||||
for i, child := range parent.Content {
|
||||
if i%2 == 0 {
|
||||
if child.Kind != yaml.ScalarNode {
|
||||
// We immediately because the rest of the code is bound to fail
|
||||
// if we don't know to expect a key or a value.
|
||||
return nil, xerrors.Errorf("expected scalar node for key, got type %v", child.Kind)
|
||||
}
|
||||
key = child.Value
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't know if this is a grouped simple option or complex option,
|
||||
// so we store both "key" and "group.key". Since we're storing pointers,
|
||||
// the additional memory is of little concern.
|
||||
m[key] = child
|
||||
if child.Kind != yaml.MappingNode {
|
||||
continue
|
||||
}
|
||||
|
||||
sub, err := mapYAMLNodes(child)
|
||||
if err != nil {
|
||||
merr = errors.Join(merr, xerrors.Errorf("mapping node %q: %w", key, err))
|
||||
continue
|
||||
}
|
||||
for k, v := range sub {
|
||||
m[key+"."+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (o *Option) setFromYAMLNode(n *yaml.Node) error {
|
||||
o.ValueSource = ValueSourceYAML
|
||||
if um, ok := o.Value.(yaml.Unmarshaler); ok {
|
||||
return um.UnmarshalYAML(n)
|
||||
}
|
||||
|
||||
switch n.Kind {
|
||||
case yaml.ScalarNode:
|
||||
return o.Value.Set(n.Value)
|
||||
case yaml.SequenceNode:
|
||||
// We treat empty values as nil for consistency with other option
|
||||
// mechanisms.
|
||||
if len(n.Content) == 0 {
|
||||
o.Value = nil
|
||||
return nil
|
||||
}
|
||||
return n.Decode(o.Value)
|
||||
case yaml.MappingNode:
|
||||
return xerrors.Errorf("mapping nodes must implement yaml.Unmarshaler")
|
||||
default:
|
||||
return xerrors.Errorf("unexpected node kind %v", n.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalYAML converts the given YAML node into the option set.
|
||||
// It is isomorphic with ToYAML.
|
||||
func (s *OptionSet) UnmarshalYAML(rootNode *yaml.Node) error {
|
||||
// The rootNode will be a DocumentNode if it's read from a file. We do
|
||||
// not support multiple documents in a single file.
|
||||
if rootNode.Kind == yaml.DocumentNode {
|
||||
if len(rootNode.Content) != 1 {
|
||||
return xerrors.Errorf("expected one node in document, got %d", len(rootNode.Content))
|
||||
}
|
||||
rootNode = rootNode.Content[0]
|
||||
}
|
||||
|
||||
yamlNodes, err := mapYAMLNodes(rootNode)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("mapping nodes: %w", err)
|
||||
}
|
||||
|
||||
matchedNodes := make(map[string]*yaml.Node, len(yamlNodes))
|
||||
|
||||
var merr error
|
||||
for i := range *s {
|
||||
opt := &(*s)[i]
|
||||
if opt.YAML == "" {
|
||||
continue
|
||||
}
|
||||
var group []string
|
||||
for _, g := range opt.Group.Ancestry() {
|
||||
if g.YAML == "" {
|
||||
return xerrors.Errorf(
|
||||
"group yaml name is empty for %q, groups: %+v",
|
||||
opt.Name,
|
||||
opt.Group,
|
||||
)
|
||||
}
|
||||
group = append(group, g.YAML)
|
||||
delete(yamlNodes, strings.Join(group, "."))
|
||||
}
|
||||
|
||||
key := strings.Join(append(group, opt.YAML), ".")
|
||||
node, ok := yamlNodes[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
matchedNodes[key] = node
|
||||
if opt.ValueSource != ValueSourceNone {
|
||||
continue
|
||||
}
|
||||
if err := opt.setFromYAMLNode(node); err != nil {
|
||||
merr = errors.Join(merr, xerrors.Errorf("setting %q: %w", opt.YAML, err))
|
||||
}
|
||||
}
|
||||
|
||||
// Remove all matched nodes and their descendants from yamlNodes so we
|
||||
// can accurately report unknown options.
|
||||
for k := range yamlNodes {
|
||||
var key string
|
||||
for _, part := range strings.Split(k, ".") {
|
||||
if key != "" {
|
||||
key += "."
|
||||
}
|
||||
key += part
|
||||
if _, ok := matchedNodes[key]; ok {
|
||||
delete(yamlNodes, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
for k := range yamlNodes {
|
||||
merr = errors.Join(merr, xerrors.Errorf("unknown option %q", k))
|
||||
}
|
||||
|
||||
return merr
|
||||
}
|
||||
|
||||
+150
-5
@@ -3,13 +3,15 @@ package clibase_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
)
|
||||
|
||||
func TestOption_ToYAML(t *testing.T) {
|
||||
func TestOptionSet_YAML(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("RequireKey", func(t *testing.T) {
|
||||
@@ -23,9 +25,9 @@ func TestOption_ToYAML(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
node, err := os.ToYAML()
|
||||
node, err := os.MarshalYAML()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, node.Content, 0)
|
||||
require.Len(t, node.(*yaml.Node).Content, 0)
|
||||
})
|
||||
|
||||
t.Run("SimpleString", func(t *testing.T) {
|
||||
@@ -39,7 +41,7 @@ func TestOption_ToYAML(t *testing.T) {
|
||||
Value: &workspaceName,
|
||||
Default: "billie",
|
||||
Description: "The workspace's name.",
|
||||
Group: &clibase.Group{Name: "Names"},
|
||||
Group: &clibase.Group{YAML: "names"},
|
||||
YAML: "workspaceName",
|
||||
},
|
||||
}
|
||||
@@ -47,7 +49,7 @@ func TestOption_ToYAML(t *testing.T) {
|
||||
err := os.SetDefaults()
|
||||
require.NoError(t, err)
|
||||
|
||||
n, err := os.ToYAML()
|
||||
n, err := os.MarshalYAML()
|
||||
require.NoError(t, err)
|
||||
// Visually inspect for now.
|
||||
byt, err := yaml.Marshal(n)
|
||||
@@ -55,3 +57,146 @@ func TestOption_ToYAML(t *testing.T) {
|
||||
t.Logf("Raw YAML:\n%s", string(byt))
|
||||
})
|
||||
}
|
||||
|
||||
func TestOptionSet_YAMLUnknownOptions(t *testing.T) {
|
||||
t.Parallel()
|
||||
os := clibase.OptionSet{
|
||||
{
|
||||
Name: "Workspace Name",
|
||||
Default: "billie",
|
||||
Description: "The workspace's name.",
|
||||
YAML: "workspaceName",
|
||||
Value: new(clibase.String),
|
||||
},
|
||||
}
|
||||
|
||||
const yamlDoc = `something: else`
|
||||
err := yaml.Unmarshal([]byte(yamlDoc), &os)
|
||||
require.Error(t, err)
|
||||
require.Empty(t, os[0].Value.String())
|
||||
|
||||
os[0].YAML = "something"
|
||||
|
||||
err = yaml.Unmarshal([]byte(yamlDoc), &os)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "else", os[0].Value.String())
|
||||
}
|
||||
|
||||
// TestOptionSet_YAMLIsomorphism tests that the YAML representations of an
|
||||
// OptionSet converts to the same OptionSet when read back in.
|
||||
func TestOptionSet_YAMLIsomorphism(t *testing.T) {
|
||||
t.Parallel()
|
||||
// This is used to form a generic.
|
||||
//nolint:unused
|
||||
type kid struct {
|
||||
Name string `yaml:"name"`
|
||||
Age int `yaml:"age"`
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
os clibase.OptionSet
|
||||
zeroValue func() pflag.Value
|
||||
}{
|
||||
{
|
||||
name: "SimpleString",
|
||||
os: clibase.OptionSet{
|
||||
{
|
||||
Name: "Workspace Name",
|
||||
Default: "billie",
|
||||
Description: "The workspace's name.",
|
||||
Group: &clibase.Group{YAML: "names"},
|
||||
YAML: "workspaceName",
|
||||
},
|
||||
},
|
||||
zeroValue: func() pflag.Value {
|
||||
return clibase.StringOf(new(string))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Array",
|
||||
os: clibase.OptionSet{
|
||||
{
|
||||
YAML: "names",
|
||||
Default: "jill,jack,joan",
|
||||
},
|
||||
},
|
||||
zeroValue: func() pflag.Value {
|
||||
return clibase.StringArrayOf(&[]string{})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ComplexObject",
|
||||
os: clibase.OptionSet{
|
||||
{
|
||||
YAML: "kids",
|
||||
Default: `- name: jill
|
||||
age: 12
|
||||
- name: jack
|
||||
age: 13`,
|
||||
},
|
||||
},
|
||||
zeroValue: func() pflag.Value {
|
||||
return &clibase.Struct[[]kid]{}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DeepGroup",
|
||||
os: clibase.OptionSet{
|
||||
{
|
||||
YAML: "names",
|
||||
Default: "jill,jack,joan",
|
||||
Group: &clibase.Group{YAML: "kids", Parent: &clibase.Group{YAML: "family"}},
|
||||
},
|
||||
},
|
||||
zeroValue: func() pflag.Value {
|
||||
return clibase.StringArrayOf(&[]string{})
|
||||
},
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Set initial values.
|
||||
for i := range tc.os {
|
||||
tc.os[i].Value = tc.zeroValue()
|
||||
}
|
||||
err := tc.os.SetDefaults()
|
||||
require.NoError(t, err)
|
||||
|
||||
y, err := tc.os.MarshalYAML()
|
||||
require.NoError(t, err)
|
||||
|
||||
toByt, err := yaml.Marshal(y)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Raw YAML:\n%s", string(toByt))
|
||||
|
||||
var y2 yaml.Node
|
||||
err = yaml.Unmarshal(toByt, &y2)
|
||||
require.NoError(t, err)
|
||||
|
||||
os2 := slices.Clone(tc.os)
|
||||
for i := range os2 {
|
||||
os2[i].Value = tc.zeroValue()
|
||||
os2[i].ValueSource = clibase.ValueSourceNone
|
||||
}
|
||||
|
||||
// os2 values should be zeroed whereas tc.os should be
|
||||
// set to defaults.
|
||||
// This check makes sure we aren't mixing pointers.
|
||||
require.NotEqual(t, tc.os, os2)
|
||||
err = os2.UnmarshalYAML(&y2)
|
||||
require.NoError(t, err)
|
||||
|
||||
want := tc.os
|
||||
for i := range want {
|
||||
want[i].ValueSource = clibase.ValueSourceYAML
|
||||
}
|
||||
|
||||
require.Equal(t, tc.os, os2)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,360 @@
|
||||
package clistat
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/xerrors"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// Paths for CGroupV1.
|
||||
// Ref: https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
|
||||
const (
|
||||
// CPU usage of all tasks in cgroup in nanoseconds.
|
||||
cgroupV1CPUAcctUsage = "/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage"
|
||||
// CFS quota and period for cgroup in MICROseconds
|
||||
cgroupV1CFSQuotaUs = "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us"
|
||||
// CFS period for cgroup in MICROseconds
|
||||
cgroupV1CFSPeriodUs = "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us"
|
||||
// Maximum memory usable by cgroup in bytes
|
||||
cgroupV1MemoryMaxUsageBytes = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
|
||||
// Current memory usage of cgroup in bytes
|
||||
cgroupV1MemoryUsageBytes = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
|
||||
// Other memory stats - we are interested in total_inactive_file
|
||||
cgroupV1MemoryStat = "/sys/fs/cgroup/memory/memory.stat"
|
||||
)
|
||||
|
||||
// Paths for CGroupV2.
|
||||
// Ref: https://docs.kernel.org/admin-guide/cgroup-v2.html
|
||||
const (
|
||||
// Contains quota and period in microseconds separated by a space.
|
||||
cgroupV2CPUMax = "/sys/fs/cgroup/cpu.max"
|
||||
// Contains current CPU usage under usage_usec
|
||||
cgroupV2CPUStat = "/sys/fs/cgroup/cpu.stat"
|
||||
// Contains current cgroup memory usage in bytes.
|
||||
cgroupV2MemoryUsageBytes = "/sys/fs/cgroup/memory.current"
|
||||
// Contains max cgroup memory usage in bytes.
|
||||
cgroupV2MemoryMaxBytes = "/sys/fs/cgroup/memory.max"
|
||||
// Other memory stats - we are interested in total_inactive_file
|
||||
cgroupV2MemoryStat = "/sys/fs/cgroup/memory.stat"
|
||||
)
|
||||
|
||||
// ContainerCPU returns the CPU usage of the container cgroup.
|
||||
// This is calculated as difference of two samples of the
|
||||
// CPU usage of the container cgroup.
|
||||
// The total is read from the relevant path in /sys/fs/cgroup.
|
||||
// If there is no limit set, the total is assumed to be the
|
||||
// number of host cores multiplied by the CFS period.
|
||||
// If the system is not containerized, this always returns nil.
|
||||
func (s *Statter) ContainerCPU() (*Result, error) {
|
||||
// Firstly, check if we are containerized.
|
||||
if ok, err := IsContainerized(s.fs); err != nil || !ok {
|
||||
return nil, nil //nolint: nilnil
|
||||
}
|
||||
|
||||
total, err := s.cGroupCPUTotal()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get total cpu: %w", err)
|
||||
}
|
||||
used1, err := s.cGroupCPUUsed()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get cgroup CPU usage: %w", err)
|
||||
}
|
||||
|
||||
// The measurements in /sys/fs/cgroup are counters.
|
||||
// We need to wait for a bit to get a difference.
|
||||
// Note that someone could reset the counter in the meantime.
|
||||
// We can't do anything about that.
|
||||
s.wait(s.sampleInterval)
|
||||
|
||||
used2, err := s.cGroupCPUUsed()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get cgroup CPU usage: %w", err)
|
||||
}
|
||||
|
||||
if used2 < used1 {
|
||||
// Someone reset the counter. Best we can do is count from zero.
|
||||
used1 = 0
|
||||
}
|
||||
|
||||
r := &Result{
|
||||
Unit: "cores",
|
||||
Used: used2 - used1,
|
||||
Prefix: PrefixDefault,
|
||||
}
|
||||
|
||||
if total > 0 {
|
||||
r.Total = ptr.To(total)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *Statter) cGroupCPUTotal() (used float64, err error) {
|
||||
if s.isCGroupV2() {
|
||||
return s.cGroupV2CPUTotal()
|
||||
}
|
||||
|
||||
// Fall back to CGroupv1
|
||||
return s.cGroupV1CPUTotal()
|
||||
}
|
||||
|
||||
func (s *Statter) cGroupCPUUsed() (used float64, err error) {
|
||||
if s.isCGroupV2() {
|
||||
return s.cGroupV2CPUUsed()
|
||||
}
|
||||
|
||||
return s.cGroupV1CPUUsed()
|
||||
}
|
||||
|
||||
func (s *Statter) isCGroupV2() bool {
|
||||
// Check for the presence of /sys/fs/cgroup/cpu.max
|
||||
_, err := s.fs.Stat(cgroupV2CPUMax)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (s *Statter) cGroupV2CPUUsed() (used float64, err error) {
|
||||
usageUs, err := readInt64Prefix(s.fs, cgroupV2CPUStat, "usage_usec")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("get cgroupv2 cpu used: %w", err)
|
||||
}
|
||||
periodUs, err := readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 1)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("get cpu period: %w", err)
|
||||
}
|
||||
|
||||
return float64(usageUs) / float64(periodUs), nil
|
||||
}
|
||||
|
||||
func (s *Statter) cGroupV2CPUTotal() (total float64, err error) {
|
||||
var quotaUs, periodUs int64
|
||||
periodUs, err = readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 1)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("get cpu period: %w", err)
|
||||
}
|
||||
|
||||
quotaUs, err = readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 0)
|
||||
if err != nil {
|
||||
if xerrors.Is(err, strconv.ErrSyntax) {
|
||||
// If the value is not a valid integer, assume it is the string
|
||||
// 'max' and that there is no limit set.
|
||||
return -1, nil
|
||||
}
|
||||
return 0, xerrors.Errorf("get cpu quota: %w", err)
|
||||
}
|
||||
|
||||
return float64(quotaUs) / float64(periodUs), nil
|
||||
}
|
||||
|
||||
func (s *Statter) cGroupV1CPUTotal() (float64, error) {
|
||||
periodUs, err := readInt64(s.fs, cgroupV1CFSPeriodUs)
|
||||
if err != nil {
|
||||
// Try alternate path under /sys/fs/cpu
|
||||
var merr error
|
||||
merr = multierror.Append(merr, xerrors.Errorf("get cpu period: %w", err))
|
||||
periodUs, err = readInt64(s.fs, strings.Replace(cgroupV1CFSPeriodUs, "cpu,cpuacct", "cpu", 1))
|
||||
if err != nil {
|
||||
merr = multierror.Append(merr, xerrors.Errorf("get cpu period: %w", err))
|
||||
return 0, merr
|
||||
}
|
||||
}
|
||||
|
||||
quotaUs, err := readInt64(s.fs, cgroupV1CFSQuotaUs)
|
||||
if err != nil {
|
||||
// Try alternate path under /sys/fs/cpu
|
||||
var merr error
|
||||
merr = multierror.Append(merr, xerrors.Errorf("get cpu quota: %w", err))
|
||||
quotaUs, err = readInt64(s.fs, strings.Replace(cgroupV1CFSQuotaUs, "cpu,cpuacct", "cpu", 1))
|
||||
if err != nil {
|
||||
merr = multierror.Append(merr, xerrors.Errorf("get cpu quota: %w", err))
|
||||
return 0, merr
|
||||
}
|
||||
}
|
||||
|
||||
if quotaUs < 0 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
return float64(quotaUs) / float64(periodUs), nil
|
||||
}
|
||||
|
||||
func (s *Statter) cGroupV1CPUUsed() (float64, error) {
|
||||
usageNs, err := readInt64(s.fs, cgroupV1CPUAcctUsage)
|
||||
if err != nil {
|
||||
// Try alternate path under /sys/fs/cgroup/cpuacct
|
||||
var merr error
|
||||
merr = multierror.Append(merr, xerrors.Errorf("read cpu used: %w", err))
|
||||
usageNs, err = readInt64(s.fs, strings.Replace(cgroupV1CPUAcctUsage, "cpu,cpuacct", "cpuacct", 1))
|
||||
if err != nil {
|
||||
merr = multierror.Append(merr, xerrors.Errorf("read cpu used: %w", err))
|
||||
return 0, merr
|
||||
}
|
||||
}
|
||||
|
||||
// usage is in ns, convert to us
|
||||
usageNs /= 1000
|
||||
periodUs, err := readInt64(s.fs, cgroupV1CFSPeriodUs)
|
||||
if err != nil {
|
||||
// Try alternate path under /sys/fs/cpu
|
||||
var merr error
|
||||
merr = multierror.Append(merr, xerrors.Errorf("get cpu period: %w", err))
|
||||
periodUs, err = readInt64(s.fs, strings.Replace(cgroupV1CFSPeriodUs, "cpu,cpuacct", "cpu", 1))
|
||||
if err != nil {
|
||||
merr = multierror.Append(merr, xerrors.Errorf("get cpu period: %w", err))
|
||||
return 0, merr
|
||||
}
|
||||
}
|
||||
|
||||
return float64(usageNs) / float64(periodUs), nil
|
||||
}
|
||||
|
||||
// ContainerMemory returns the memory usage of the container cgroup.
|
||||
// If the system is not containerized, this always returns nil.
|
||||
func (s *Statter) ContainerMemory(p Prefix) (*Result, error) {
|
||||
if ok, err := IsContainerized(s.fs); err != nil || !ok {
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
if s.isCGroupV2() {
|
||||
return s.cGroupV2Memory(p)
|
||||
}
|
||||
|
||||
// Fall back to CGroupv1
|
||||
return s.cGroupV1Memory(p)
|
||||
}
|
||||
|
||||
func (s *Statter) cGroupV2Memory(p Prefix) (*Result, error) {
|
||||
r := &Result{
|
||||
Unit: "B",
|
||||
Prefix: p,
|
||||
}
|
||||
maxUsageBytes, err := readInt64(s.fs, cgroupV2MemoryMaxBytes)
|
||||
if err != nil {
|
||||
if !xerrors.Is(err, strconv.ErrSyntax) {
|
||||
return nil, xerrors.Errorf("read memory total: %w", err)
|
||||
}
|
||||
// If the value is not a valid integer, assume it is the string
|
||||
// 'max' and that there is no limit set.
|
||||
} else {
|
||||
r.Total = ptr.To(float64(maxUsageBytes))
|
||||
}
|
||||
|
||||
currUsageBytes, err := readInt64(s.fs, cgroupV2MemoryUsageBytes)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("read memory usage: %w", err)
|
||||
}
|
||||
|
||||
inactiveFileBytes, err := readInt64Prefix(s.fs, cgroupV2MemoryStat, "inactive_file")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("read memory stats: %w", err)
|
||||
}
|
||||
|
||||
r.Used = float64(currUsageBytes - inactiveFileBytes)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *Statter) cGroupV1Memory(p Prefix) (*Result, error) {
|
||||
r := &Result{
|
||||
Unit: "B",
|
||||
Prefix: p,
|
||||
}
|
||||
maxUsageBytes, err := readInt64(s.fs, cgroupV1MemoryMaxUsageBytes)
|
||||
if err != nil {
|
||||
if !xerrors.Is(err, strconv.ErrSyntax) {
|
||||
return nil, xerrors.Errorf("read memory total: %w", err)
|
||||
}
|
||||
// I haven't found an instance where this isn't a valid integer.
|
||||
// Nonetheless, if it is not, assume there is no limit set.
|
||||
maxUsageBytes = -1
|
||||
}
|
||||
|
||||
// need a space after total_rss so we don't hit something else
|
||||
usageBytes, err := readInt64(s.fs, cgroupV1MemoryUsageBytes)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("read memory usage: %w", err)
|
||||
}
|
||||
|
||||
totalInactiveFileBytes, err := readInt64Prefix(s.fs, cgroupV1MemoryStat, "total_inactive_file")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("read memory stats: %w", err)
|
||||
}
|
||||
|
||||
// If max usage bytes is -1, there is no memory limit set.
|
||||
if maxUsageBytes > 0 {
|
||||
r.Total = ptr.To(float64(maxUsageBytes))
|
||||
}
|
||||
|
||||
// Total memory used is usage - total_inactive_file
|
||||
r.Used = float64(usageBytes - totalInactiveFileBytes)
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// read an int64 value from path
|
||||
func readInt64(fs afero.Fs, path string) (int64, error) {
|
||||
data, err := afero.ReadFile(fs, path)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
|
||||
val, err := strconv.ParseInt(string(bytes.TrimSpace(data)), 10, 64)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("parse %s: %w", path, err)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// read an int64 value from path at field idx separated by sep
|
||||
func readInt64SepIdx(fs afero.Fs, path, sep string, idx int) (int64, error) {
|
||||
data, err := afero.ReadFile(fs, path)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
|
||||
parts := strings.Split(string(data), sep)
|
||||
if len(parts) < idx {
|
||||
return 0, xerrors.Errorf("expected line %q to have at least %d parts", string(data), idx+1)
|
||||
}
|
||||
|
||||
val, err := strconv.ParseInt(strings.TrimSpace(parts[idx]), 10, 64)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("parse %s: %w", path, err)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// read the first int64 value from path prefixed with prefix
|
||||
func readInt64Prefix(fs afero.Fs, path, prefix string) (int64, error) {
|
||||
data, err := afero.ReadFile(fs, path)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
|
||||
scn := bufio.NewScanner(bytes.NewReader(data))
|
||||
for scn.Scan() {
|
||||
line := strings.TrimSpace(scn.Text())
|
||||
if !strings.HasPrefix(line, prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) != 2 {
|
||||
return 0, xerrors.Errorf("parse %s: expected two fields but got %s", path, line)
|
||||
}
|
||||
|
||||
val, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("parse %s: %w", path, err)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
return 0, xerrors.Errorf("parse %s: did not find line with prefix %s", path, prefix)
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package clistat
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const (
|
||||
procMounts = "/proc/mounts"
|
||||
procOneCgroup = "/proc/1/cgroup"
|
||||
kubernetesDefaultServiceAccountToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" //nolint:gosec
|
||||
)
|
||||
|
||||
// IsContainerized returns whether the host is containerized.
|
||||
// This is adapted from https://github.com/elastic/go-sysinfo/tree/main/providers/linux/container.go#L31
|
||||
// with modifications to support Sysbox containers.
|
||||
// On non-Linux platforms, it always returns false.
|
||||
func IsContainerized(fs afero.Fs) (ok bool, err error) {
|
||||
cgData, err := afero.ReadFile(fs, procOneCgroup)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, xerrors.Errorf("read file %s: %w", procOneCgroup, err)
|
||||
}
|
||||
|
||||
scn := bufio.NewScanner(bytes.NewReader(cgData))
|
||||
for scn.Scan() {
|
||||
line := scn.Bytes()
|
||||
if bytes.Contains(line, []byte("docker")) ||
|
||||
bytes.Contains(line, []byte(".slice")) ||
|
||||
bytes.Contains(line, []byte("lxc")) ||
|
||||
bytes.Contains(line, []byte("kubepods")) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Sometimes the above method of sniffing /proc/1/cgroup isn't reliable.
|
||||
// If a Kubernetes service account token is present, that's
|
||||
// also a good indication that we are in a container.
|
||||
_, err = afero.ReadFile(fs, kubernetesDefaultServiceAccountToken)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Last-ditch effort to detect Sysbox containers.
|
||||
// Check if we have anything mounted as type sysboxfs in /proc/mounts
|
||||
mountsData, err := afero.ReadFile(fs, procMounts)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, xerrors.Errorf("read file %s: %w", procMounts, err)
|
||||
}
|
||||
|
||||
scn = bufio.NewScanner(bytes.NewReader(mountsData))
|
||||
for scn.Scan() {
|
||||
line := scn.Bytes()
|
||||
if bytes.Contains(line, []byte("sysboxfs")) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, we are _probably_ not running in a container.
|
||||
return false, nil
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
//go:build !windows
|
||||
|
||||
package clistat
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// Disk returns the disk usage of the given path.
|
||||
// If path is empty, it returns the usage of the root directory.
|
||||
func (*Statter) Disk(p Prefix, path string) (*Result, error) {
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
var stat syscall.Statfs_t
|
||||
if err := syscall.Statfs(path, &stat); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r Result
|
||||
r.Total = ptr.To(float64(stat.Blocks * uint64(stat.Bsize)))
|
||||
r.Used = float64(stat.Blocks-stat.Bfree) * float64(stat.Bsize)
|
||||
r.Unit = "B"
|
||||
r.Prefix = p
|
||||
return &r, nil
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
package clistat
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// Disk returns the disk usage of the given path.
|
||||
// If path is empty, it defaults to C:\
|
||||
func (*Statter) Disk(p Prefix, path string) (*Result, error) {
|
||||
if path == "" {
|
||||
path = `C:\`
|
||||
}
|
||||
|
||||
pathPtr, err := windows.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var freeBytes, totalBytes, availBytes uint64
|
||||
if err := windows.GetDiskFreeSpaceEx(
|
||||
pathPtr,
|
||||
&freeBytes,
|
||||
&totalBytes,
|
||||
&availBytes,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var r Result
|
||||
r.Total = ptr.To(float64(totalBytes))
|
||||
r.Used = float64(totalBytes - freeBytes)
|
||||
r.Unit = "B"
|
||||
r.Prefix = p
|
||||
return &r, nil
|
||||
}
|
||||
@@ -0,0 +1,236 @@
|
||||
package clistat
|
||||
|
||||
import (
|
||||
"math"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/go-sysinfo"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/xerrors"
|
||||
"tailscale.com/types/ptr"
|
||||
|
||||
sysinfotypes "github.com/elastic/go-sysinfo/types"
|
||||
)
|
||||
|
||||
// Prefix is a scale multiplier for a result.
|
||||
// Used when creating a human-readable representation.
|
||||
type Prefix float64
|
||||
|
||||
const (
|
||||
PrefixDefault = 1.0
|
||||
PrefixKibi = 1024.0
|
||||
PrefixMebi = PrefixKibi * 1024.0
|
||||
PrefixGibi = PrefixMebi * 1024.0
|
||||
PrefixTebi = PrefixGibi * 1024.0
|
||||
)
|
||||
|
||||
var (
|
||||
PrefixHumanKibi = "Ki"
|
||||
PrefixHumanMebi = "Mi"
|
||||
PrefixHumanGibi = "Gi"
|
||||
PrefixHumanTebi = "Ti"
|
||||
)
|
||||
|
||||
func (s *Prefix) String() string {
|
||||
switch *s {
|
||||
case PrefixKibi:
|
||||
return "Ki"
|
||||
case PrefixMebi:
|
||||
return "Mi"
|
||||
case PrefixGibi:
|
||||
return "Gi"
|
||||
case PrefixTebi:
|
||||
return "Ti"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func ParsePrefix(s string) Prefix {
|
||||
switch s {
|
||||
case PrefixHumanKibi:
|
||||
return PrefixKibi
|
||||
case PrefixHumanMebi:
|
||||
return PrefixMebi
|
||||
case PrefixHumanGibi:
|
||||
return PrefixGibi
|
||||
case PrefixHumanTebi:
|
||||
return PrefixTebi
|
||||
default:
|
||||
return PrefixDefault
|
||||
}
|
||||
}
|
||||
|
||||
// Result is a generic result type for a statistic.
|
||||
// Total is the total amount of the resource available.
|
||||
// It is nil if the resource is not a finite quantity.
|
||||
// Unit is the unit of the resource.
|
||||
// Used is the amount of the resource used.
|
||||
type Result struct {
|
||||
Total *float64 `json:"total"`
|
||||
Unit string `json:"unit"`
|
||||
Used float64 `json:"used"`
|
||||
Prefix Prefix `json:"-"`
|
||||
}
|
||||
|
||||
// String returns a human-readable representation of the result.
|
||||
func (r *Result) String() string {
|
||||
if r == nil {
|
||||
return "-"
|
||||
}
|
||||
|
||||
scale := 1.0
|
||||
if r.Prefix != 0.0 {
|
||||
scale = float64(r.Prefix)
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
var usedScaled, totalScaled float64
|
||||
usedScaled = r.Used / scale
|
||||
_, _ = sb.WriteString(humanizeFloat(usedScaled))
|
||||
if r.Total != (*float64)(nil) {
|
||||
_, _ = sb.WriteString("/")
|
||||
totalScaled = *r.Total / scale
|
||||
_, _ = sb.WriteString(humanizeFloat(totalScaled))
|
||||
}
|
||||
|
||||
_, _ = sb.WriteString(" ")
|
||||
_, _ = sb.WriteString(r.Prefix.String())
|
||||
_, _ = sb.WriteString(r.Unit)
|
||||
|
||||
if r.Total != (*float64)(nil) && *r.Total > 0 {
|
||||
_, _ = sb.WriteString(" (")
|
||||
pct := r.Used / *r.Total * 100.0
|
||||
_, _ = sb.WriteString(strconv.FormatFloat(pct, 'f', 0, 64))
|
||||
_, _ = sb.WriteString("%)")
|
||||
}
|
||||
|
||||
return strings.TrimSpace(sb.String())
|
||||
}
|
||||
|
||||
func humanizeFloat(f float64) string {
|
||||
// humanize.FtoaWithDigits does not round correctly.
|
||||
prec := precision(f)
|
||||
rat := math.Pow(10, float64(prec))
|
||||
rounded := math.Round(f*rat) / rat
|
||||
return strconv.FormatFloat(rounded, 'f', -1, 64)
|
||||
}
|
||||
|
||||
// limit precision to 3 digits at most to preserve space
|
||||
func precision(f float64) int {
|
||||
fabs := math.Abs(f)
|
||||
if fabs == 0.0 {
|
||||
return 0
|
||||
}
|
||||
if fabs < 1.0 {
|
||||
return 3
|
||||
}
|
||||
if fabs < 10.0 {
|
||||
return 2
|
||||
}
|
||||
if fabs < 100.0 {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Statter is a system statistics collector.
|
||||
// It is a thin wrapper around the elastic/go-sysinfo library.
|
||||
type Statter struct {
|
||||
hi sysinfotypes.Host
|
||||
fs afero.Fs
|
||||
sampleInterval time.Duration
|
||||
nproc int
|
||||
wait func(time.Duration)
|
||||
}
|
||||
|
||||
type Option func(*Statter)
|
||||
|
||||
// WithSampleInterval sets the sample interval for the statter.
|
||||
func WithSampleInterval(d time.Duration) Option {
|
||||
return func(s *Statter) {
|
||||
s.sampleInterval = d
|
||||
}
|
||||
}
|
||||
|
||||
// WithFS sets the fs for the statter.
|
||||
func WithFS(fs afero.Fs) Option {
|
||||
return func(s *Statter) {
|
||||
s.fs = fs
|
||||
}
|
||||
}
|
||||
|
||||
func New(opts ...Option) (*Statter, error) {
|
||||
hi, err := sysinfo.Host()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get host info: %w", err)
|
||||
}
|
||||
s := &Statter{
|
||||
hi: hi,
|
||||
fs: afero.NewReadOnlyFs(afero.NewOsFs()),
|
||||
sampleInterval: 100 * time.Millisecond,
|
||||
nproc: runtime.NumCPU(),
|
||||
wait: func(d time.Duration) {
|
||||
<-time.After(d)
|
||||
},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// HostCPU returns the CPU usage of the host. This is calculated by
|
||||
// taking two samples of CPU usage and calculating the difference.
|
||||
// Total will always be equal to the number of cores.
|
||||
// Used will be an estimate of the number of cores used during the sample interval.
|
||||
// This is calculated by taking the difference between the total and idle HostCPU time
|
||||
// and scaling it by the number of cores.
|
||||
// Units are in "cores".
|
||||
func (s *Statter) HostCPU() (*Result, error) {
|
||||
r := &Result{
|
||||
Unit: "cores",
|
||||
Total: ptr.To(float64(s.nproc)),
|
||||
Prefix: PrefixDefault,
|
||||
}
|
||||
c1, err := s.hi.CPUTime()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get first cpu sample: %w", err)
|
||||
}
|
||||
s.wait(s.sampleInterval)
|
||||
c2, err := s.hi.CPUTime()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get second cpu sample: %w", err)
|
||||
}
|
||||
total := c2.Total() - c1.Total()
|
||||
if total == 0 {
|
||||
return r, nil // no change
|
||||
}
|
||||
idle := c2.Idle - c1.Idle
|
||||
used := total - idle
|
||||
scaleFactor := float64(s.nproc) / total.Seconds()
|
||||
r.Used = used.Seconds() * scaleFactor
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// HostMemory returns the memory usage of the host, in gigabytes.
|
||||
func (s *Statter) HostMemory(p Prefix) (*Result, error) {
|
||||
r := &Result{
|
||||
Unit: "B",
|
||||
Prefix: p,
|
||||
}
|
||||
hm, err := s.hi.Memory()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get memory info: %w", err)
|
||||
}
|
||||
r.Total = ptr.To(float64(hm.Total))
|
||||
// On Linux, hm.Used equates to MemTotal - MemFree in /proc/stat.
|
||||
// This includes buffers and cache.
|
||||
// So use MemAvailable instead, which only equates to physical memory.
|
||||
// On Windows, this is also calculated as Total - Available.
|
||||
r.Used = float64(hm.Total - hm.Available)
|
||||
return r, nil
|
||||
}
|
||||
@@ -0,0 +1,398 @@
|
||||
package clistat
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestResultString(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, tt := range []struct {
|
||||
Expected string
|
||||
Result Result
|
||||
}{
|
||||
{
|
||||
Expected: "1.23/5.68 quatloos (22%)",
|
||||
Result: Result{Used: 1.234, Total: ptr.To(5.678), Unit: "quatloos"},
|
||||
},
|
||||
{
|
||||
Expected: "0/0 HP",
|
||||
Result: Result{Used: 0.0, Total: ptr.To(0.0), Unit: "HP"},
|
||||
},
|
||||
{
|
||||
Expected: "123 seconds",
|
||||
Result: Result{Used: 123.01, Total: nil, Unit: "seconds"},
|
||||
},
|
||||
{
|
||||
Expected: "12.3",
|
||||
Result: Result{Used: 12.34, Total: nil, Unit: ""},
|
||||
},
|
||||
{
|
||||
Expected: "1.5 KiB",
|
||||
Result: Result{Used: 1536, Total: nil, Unit: "B", Prefix: PrefixKibi},
|
||||
},
|
||||
{
|
||||
Expected: "1.23 things",
|
||||
Result: Result{Used: 1.234, Total: nil, Unit: "things"},
|
||||
},
|
||||
{
|
||||
Expected: "0/100 TiB (0%)",
|
||||
Result: Result{Used: 1, Total: ptr.To(100.0 * float64(PrefixTebi)), Unit: "B", Prefix: PrefixTebi},
|
||||
},
|
||||
{
|
||||
Expected: "0.5/8 cores (6%)",
|
||||
Result: Result{Used: 0.5, Total: ptr.To(8.0), Unit: "cores"},
|
||||
},
|
||||
} {
|
||||
assert.Equal(t, tt.Expected, tt.Result.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// We cannot make many assertions about the data we get back
|
||||
// for host-specific measurements because these tests could
|
||||
// and should run successfully on any OS.
|
||||
// The best we can do is assert that it is non-zero.
|
||||
t.Run("HostOnly", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsHostOnly)
|
||||
s, err := New(WithFS(fs))
|
||||
require.NoError(t, err)
|
||||
t.Run("HostCPU", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cpu, err := s.HostCPU()
|
||||
require.NoError(t, err)
|
||||
// assert.NotZero(t, cpu.Used) // HostCPU can sometimes be zero.
|
||||
assert.NotZero(t, cpu.Total)
|
||||
assert.Equal(t, "cores", cpu.Unit)
|
||||
})
|
||||
|
||||
t.Run("HostMemory", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
mem, err := s.HostMemory(PrefixDefault)
|
||||
require.NoError(t, err)
|
||||
assert.NotZero(t, mem.Used)
|
||||
assert.NotZero(t, mem.Total)
|
||||
assert.Equal(t, "B", mem.Unit)
|
||||
})
|
||||
|
||||
t.Run("HostDisk", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
disk, err := s.Disk(PrefixDefault, "") // default to home dir
|
||||
require.NoError(t, err)
|
||||
assert.NotZero(t, disk.Used)
|
||||
assert.NotZero(t, disk.Total)
|
||||
assert.Equal(t, "B", disk.Unit)
|
||||
})
|
||||
})
|
||||
|
||||
// Sometimes we do need to "fake" some stuff
|
||||
// that happens while we wait.
|
||||
withWait := func(waitF func(time.Duration)) Option {
|
||||
return func(s *Statter) {
|
||||
s.wait = waitF
|
||||
}
|
||||
}
|
||||
|
||||
// Other times we just want things to run fast.
|
||||
withNoWait := func(s *Statter) {
|
||||
s.wait = func(time.Duration) {}
|
||||
}
|
||||
|
||||
// We don't want to use the actual host CPU here.
|
||||
withNproc := func(n int) Option {
|
||||
return func(s *Statter) {
|
||||
s.nproc = n
|
||||
}
|
||||
}
|
||||
|
||||
// For container-specific measurements, everything we need
|
||||
// can be read from the filesystem. We control the FS, so
|
||||
// we control the data.
|
||||
t.Run("CGroupV1", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("ContainerCPU/Limit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV1)
|
||||
fakeWait := func(time.Duration) {
|
||||
// Fake 1 second in ns of usage
|
||||
mungeFS(t, fs, cgroupV1CPUAcctUsage, "100000000")
|
||||
}
|
||||
s, err := New(WithFS(fs), withWait(fakeWait))
|
||||
require.NoError(t, err)
|
||||
cpu, err := s.ContainerCPU()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cpu)
|
||||
assert.Equal(t, 1.0, cpu.Used)
|
||||
require.NotNil(t, cpu.Total)
|
||||
assert.Equal(t, 2.5, *cpu.Total)
|
||||
assert.Equal(t, "cores", cpu.Unit)
|
||||
})
|
||||
|
||||
t.Run("ContainerCPU/NoLimit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV1NoLimit)
|
||||
fakeWait := func(time.Duration) {
|
||||
// Fake 1 second in ns of usage
|
||||
mungeFS(t, fs, cgroupV1CPUAcctUsage, "100000000")
|
||||
}
|
||||
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait))
|
||||
require.NoError(t, err)
|
||||
cpu, err := s.ContainerCPU()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cpu)
|
||||
assert.Equal(t, 1.0, cpu.Used)
|
||||
require.Nil(t, cpu.Total)
|
||||
assert.Equal(t, "cores", cpu.Unit)
|
||||
})
|
||||
|
||||
t.Run("ContainerCPU/AltPath", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV1AltPath)
|
||||
fakeWait := func(time.Duration) {
|
||||
// Fake 1 second in ns of usage
|
||||
mungeFS(t, fs, "/sys/fs/cgroup/cpuacct/cpuacct.usage", "100000000")
|
||||
}
|
||||
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait))
|
||||
require.NoError(t, err)
|
||||
cpu, err := s.ContainerCPU()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cpu)
|
||||
assert.Equal(t, 1.0, cpu.Used)
|
||||
require.NotNil(t, cpu.Total)
|
||||
assert.Equal(t, 2.5, *cpu.Total)
|
||||
assert.Equal(t, "cores", cpu.Unit)
|
||||
})
|
||||
|
||||
t.Run("ContainerMemory", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV1)
|
||||
s, err := New(WithFS(fs), withNoWait)
|
||||
require.NoError(t, err)
|
||||
mem, err := s.ContainerMemory(PrefixDefault)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mem)
|
||||
assert.Equal(t, 268435456.0, mem.Used)
|
||||
assert.NotNil(t, mem.Total)
|
||||
assert.Equal(t, 1073741824.0, *mem.Total)
|
||||
assert.Equal(t, "B", mem.Unit)
|
||||
})
|
||||
|
||||
t.Run("ContainerMemory/NoLimit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV1NoLimit)
|
||||
s, err := New(WithFS(fs), withNoWait)
|
||||
require.NoError(t, err)
|
||||
mem, err := s.ContainerMemory(PrefixDefault)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mem)
|
||||
assert.Equal(t, 268435456.0, mem.Used)
|
||||
assert.Nil(t, mem.Total)
|
||||
assert.Equal(t, "B", mem.Unit)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("CGroupV2", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("ContainerCPU/Limit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV2)
|
||||
fakeWait := func(time.Duration) {
|
||||
mungeFS(t, fs, cgroupV2CPUStat, "usage_usec 100000")
|
||||
}
|
||||
s, err := New(WithFS(fs), withWait(fakeWait))
|
||||
require.NoError(t, err)
|
||||
cpu, err := s.ContainerCPU()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cpu)
|
||||
assert.Equal(t, 1.0, cpu.Used)
|
||||
require.NotNil(t, cpu.Total)
|
||||
assert.Equal(t, 2.5, *cpu.Total)
|
||||
assert.Equal(t, "cores", cpu.Unit)
|
||||
})
|
||||
|
||||
t.Run("ContainerCPU/NoLimit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV2NoLimit)
|
||||
fakeWait := func(time.Duration) {
|
||||
mungeFS(t, fs, cgroupV2CPUStat, "usage_usec 100000")
|
||||
}
|
||||
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait))
|
||||
require.NoError(t, err)
|
||||
cpu, err := s.ContainerCPU()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cpu)
|
||||
assert.Equal(t, 1.0, cpu.Used)
|
||||
require.Nil(t, cpu.Total)
|
||||
assert.Equal(t, "cores", cpu.Unit)
|
||||
})
|
||||
|
||||
t.Run("ContainerMemory/Limit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV2)
|
||||
s, err := New(WithFS(fs), withNoWait)
|
||||
require.NoError(t, err)
|
||||
mem, err := s.ContainerMemory(PrefixDefault)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mem)
|
||||
assert.Equal(t, 268435456.0, mem.Used)
|
||||
assert.NotNil(t, mem.Total)
|
||||
assert.Equal(t, 1073741824.0, *mem.Total)
|
||||
assert.Equal(t, "B", mem.Unit)
|
||||
})
|
||||
|
||||
t.Run("ContainerMemory/NoLimit", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, fsContainerCgroupV2NoLimit)
|
||||
s, err := New(WithFS(fs), withNoWait)
|
||||
require.NoError(t, err)
|
||||
mem, err := s.ContainerMemory(PrefixDefault)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mem)
|
||||
assert.Equal(t, 268435456.0, mem.Used)
|
||||
assert.Nil(t, mem.Total)
|
||||
assert.Equal(t, "B", mem.Unit)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsContainerized(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, tt := range []struct {
|
||||
Name string
|
||||
FS map[string]string
|
||||
Expected bool
|
||||
Error string
|
||||
}{
|
||||
{
|
||||
Name: "Empty",
|
||||
FS: map[string]string{},
|
||||
Expected: false,
|
||||
Error: "",
|
||||
},
|
||||
{
|
||||
Name: "BareMetal",
|
||||
FS: fsHostOnly,
|
||||
Expected: false,
|
||||
Error: "",
|
||||
},
|
||||
{
|
||||
Name: "Docker",
|
||||
FS: fsContainerCgroupV1,
|
||||
Expected: true,
|
||||
Error: "",
|
||||
},
|
||||
{
|
||||
Name: "Sysbox",
|
||||
FS: fsContainerSysbox,
|
||||
Expected: true,
|
||||
Error: "",
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
t.Run(tt.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fs := initFS(t, tt.FS)
|
||||
actual, err := IsContainerized(fs)
|
||||
if tt.Error == "" {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.Expected, actual)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, tt.Error)
|
||||
assert.False(t, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// helper function for initializing a fs
|
||||
func initFS(t testing.TB, m map[string]string) afero.Fs {
|
||||
t.Helper()
|
||||
fs := afero.NewMemMapFs()
|
||||
for k, v := range m {
|
||||
mungeFS(t, fs, k, v)
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
// helper function for writing v to fs under path k
|
||||
func mungeFS(t testing.TB, fs afero.Fs, k, v string) {
|
||||
t.Helper()
|
||||
require.NoError(t, afero.WriteFile(fs, k, []byte(v+"\n"), 0o600))
|
||||
}
|
||||
|
||||
var (
|
||||
fsHostOnly = map[string]string{
|
||||
procOneCgroup: "0::/",
|
||||
procMounts: "/dev/sda1 / ext4 rw,relatime 0 0",
|
||||
}
|
||||
fsContainerSysbox = map[string]string{
|
||||
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
|
||||
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
|
||||
sysboxfs /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
|
||||
cgroupV2CPUMax: "250000 100000",
|
||||
cgroupV2CPUStat: "usage_usec 0",
|
||||
}
|
||||
fsContainerCgroupV2 = map[string]string{
|
||||
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
|
||||
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
|
||||
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
|
||||
cgroupV2CPUMax: "250000 100000",
|
||||
cgroupV2CPUStat: "usage_usec 0",
|
||||
cgroupV2MemoryMaxBytes: "1073741824",
|
||||
cgroupV2MemoryUsageBytes: "536870912",
|
||||
cgroupV2MemoryStat: "inactive_file 268435456",
|
||||
}
|
||||
fsContainerCgroupV2NoLimit = map[string]string{
|
||||
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
|
||||
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
|
||||
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
|
||||
cgroupV2CPUMax: "max 100000",
|
||||
cgroupV2CPUStat: "usage_usec 0",
|
||||
cgroupV2MemoryMaxBytes: "max",
|
||||
cgroupV2MemoryUsageBytes: "536870912",
|
||||
cgroupV2MemoryStat: "inactive_file 268435456",
|
||||
}
|
||||
fsContainerCgroupV1 = map[string]string{
|
||||
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
|
||||
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
|
||||
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
|
||||
cgroupV1CPUAcctUsage: "0",
|
||||
cgroupV1CFSQuotaUs: "250000",
|
||||
cgroupV1CFSPeriodUs: "100000",
|
||||
cgroupV1MemoryMaxUsageBytes: "1073741824",
|
||||
cgroupV1MemoryUsageBytes: "536870912",
|
||||
cgroupV1MemoryStat: "total_inactive_file 268435456",
|
||||
}
|
||||
fsContainerCgroupV1NoLimit = map[string]string{
|
||||
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
|
||||
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
|
||||
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
|
||||
cgroupV1CPUAcctUsage: "0",
|
||||
cgroupV1CFSQuotaUs: "-1",
|
||||
cgroupV1CFSPeriodUs: "100000",
|
||||
cgroupV1MemoryMaxUsageBytes: "max", // I have never seen this in the wild
|
||||
cgroupV1MemoryUsageBytes: "536870912",
|
||||
cgroupV1MemoryStat: "total_inactive_file 268435456",
|
||||
}
|
||||
fsContainerCgroupV1AltPath = map[string]string{
|
||||
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
|
||||
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
|
||||
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
|
||||
"/sys/fs/cgroup/cpuacct/cpuacct.usage": "0",
|
||||
"/sys/fs/cgroup/cpu/cpu.cfs_quota_us": "250000",
|
||||
"/sys/fs/cgroup/cpu/cpu.cfs_period_us": "100000",
|
||||
cgroupV1MemoryMaxUsageBytes: "1073741824",
|
||||
cgroupV1MemoryUsageBytes: "536870912",
|
||||
cgroupV1MemoryStat: "total_inactive_file 268435456",
|
||||
}
|
||||
)
|
||||
+44
-26
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -18,6 +17,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/slogtest"
|
||||
"github.com/coder/coder/cli"
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
"github.com/coder/coder/cli/config"
|
||||
@@ -39,7 +40,7 @@ func New(t *testing.T, args ...string) (*clibase.Invocation, config.Root) {
|
||||
|
||||
type logWriter struct {
|
||||
prefix string
|
||||
t *testing.T
|
||||
log slog.Logger
|
||||
}
|
||||
|
||||
func (l *logWriter) Write(p []byte) (n int, err error) {
|
||||
@@ -47,8 +48,9 @@ func (l *logWriter) Write(p []byte) (n int, err error) {
|
||||
if trimmed == "" {
|
||||
return len(p), nil
|
||||
}
|
||||
l.t.Log(
|
||||
l.prefix + ": " + trimmed,
|
||||
l.log.Info(
|
||||
context.Background(),
|
||||
l.prefix+": "+trimmed,
|
||||
)
|
||||
return len(p), nil
|
||||
}
|
||||
@@ -57,12 +59,13 @@ func NewWithCommand(
|
||||
t *testing.T, cmd *clibase.Cmd, args ...string,
|
||||
) (*clibase.Invocation, config.Root) {
|
||||
configDir := config.Root(t.TempDir())
|
||||
logger := slogtest.Make(t, nil)
|
||||
i := &clibase.Invocation{
|
||||
Command: cmd,
|
||||
Args: append([]string{"--global-config", string(configDir)}, args...),
|
||||
Stdin: io.LimitReader(nil, 0),
|
||||
Stdout: (&logWriter{prefix: "stdout", t: t}),
|
||||
Stderr: (&logWriter{prefix: "stderr", t: t}),
|
||||
Stdout: (&logWriter{prefix: "stdout", log: logger}),
|
||||
Stderr: (&logWriter{prefix: "stderr", log: logger}),
|
||||
}
|
||||
t.Logf("invoking command: %s %s", cmd.Name(), strings.Join(i.Args, " "))
|
||||
|
||||
@@ -82,7 +85,10 @@ func SetupConfig(t *testing.T, client *codersdk.Client, root config.Root) {
|
||||
// new temporary testing directory.
|
||||
func CreateTemplateVersionSource(t *testing.T, responses *echo.Responses) string {
|
||||
directory := t.TempDir()
|
||||
f, err := ioutil.TempFile(directory, "*.tf")
|
||||
f, err := os.CreateTemp(directory, "*.tf")
|
||||
require.NoError(t, err)
|
||||
_ = f.Close()
|
||||
f, err = os.Create(filepath.Join(directory, ".terraform.lock.hcl"))
|
||||
require.NoError(t, err)
|
||||
_ = f.Close()
|
||||
data, err := echo.Tar(responses)
|
||||
@@ -127,15 +133,23 @@ func extractTar(t *testing.T, data []byte, directory string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Start runs the command in a goroutine and cleans it up when
|
||||
// the test completed.
|
||||
// Start runs the command in a goroutine and cleans it up when the test
|
||||
// completed.
|
||||
func Start(t *testing.T, inv *clibase.Invocation) {
|
||||
t.Helper()
|
||||
|
||||
closeCh := make(chan struct{})
|
||||
// StartWithWaiter adds its own `t.Cleanup`, so we need to be sure it's added
|
||||
// before ours.
|
||||
waiter := StartWithWaiter(t, inv)
|
||||
t.Cleanup(func() {
|
||||
waiter.Cancel()
|
||||
<-closeCh
|
||||
})
|
||||
|
||||
go func() {
|
||||
defer close(closeCh)
|
||||
err := StartWithWaiter(t, inv).Wait()
|
||||
err := waiter.Wait()
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
return
|
||||
@@ -143,10 +157,6 @@ func Start(t *testing.T, inv *clibase.Invocation) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Cleanup(func() {
|
||||
<-closeCh
|
||||
})
|
||||
}
|
||||
|
||||
// Run runs the command and asserts that there is no error.
|
||||
@@ -160,17 +170,22 @@ func Run(t *testing.T, inv *clibase.Invocation) {
|
||||
type ErrorWaiter struct {
|
||||
waitOnce sync.Once
|
||||
cachedError error
|
||||
cancelFunc context.CancelFunc
|
||||
|
||||
c <-chan error
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (w *ErrorWaiter) Cancel() {
|
||||
w.cancelFunc()
|
||||
}
|
||||
|
||||
func (w *ErrorWaiter) Wait() error {
|
||||
w.waitOnce.Do(func() {
|
||||
var ok bool
|
||||
w.cachedError, ok = <-w.c
|
||||
if !ok {
|
||||
panic("unexpoected channel close")
|
||||
panic("unexpected channel close")
|
||||
}
|
||||
})
|
||||
return w.cachedError
|
||||
@@ -196,18 +211,18 @@ func (w *ErrorWaiter) RequireAs(want interface{}) {
|
||||
require.ErrorAs(w.t, w.Wait(), want)
|
||||
}
|
||||
|
||||
// StartWithWaiter runs the command in a goroutine but returns the error
|
||||
// instead of asserting it. This is useful for testing error cases.
|
||||
// StartWithWaiter runs the command in a goroutine but returns the error instead
|
||||
// of asserting it. This is useful for testing error cases.
|
||||
func StartWithWaiter(t *testing.T, inv *clibase.Invocation) *ErrorWaiter {
|
||||
t.Helper()
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
var cleaningUp atomic.Bool
|
||||
|
||||
var (
|
||||
ctx = inv.Context()
|
||||
cancel func()
|
||||
|
||||
cleaningUp atomic.Bool
|
||||
errCh = make(chan error, 1)
|
||||
doneCh = make(chan struct{})
|
||||
)
|
||||
if _, ok := ctx.Deadline(); !ok {
|
||||
ctx, cancel = context.WithDeadline(ctx, time.Now().Add(testutil.WaitMedium))
|
||||
@@ -218,14 +233,17 @@ func StartWithWaiter(t *testing.T, inv *clibase.Invocation) *ErrorWaiter {
|
||||
inv = inv.WithContext(ctx)
|
||||
|
||||
go func() {
|
||||
defer close(doneCh)
|
||||
defer close(errCh)
|
||||
err := inv.Run()
|
||||
if cleaningUp.Load() && errors.Is(err, context.DeadlineExceeded) {
|
||||
// If we're cleaning up, this error is likely related to the
|
||||
// CLI teardown process. E.g., the server could be slow to shut
|
||||
// down Postgres.
|
||||
// If we're cleaning up, this error is likely related to the CLI
|
||||
// teardown process. E.g., the server could be slow to shut down
|
||||
// Postgres.
|
||||
t.Logf("command %q timed out during test cleanup", inv.Command.FullName())
|
||||
}
|
||||
// Whether or not this fails the test is left to the caller.
|
||||
t.Logf("command %q exited with error: %v", inv.Command.FullName(), err)
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
@@ -233,7 +251,7 @@ func StartWithWaiter(t *testing.T, inv *clibase.Invocation) *ErrorWaiter {
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
cleaningUp.Store(true)
|
||||
<-errCh
|
||||
<-doneCh
|
||||
})
|
||||
return &ErrorWaiter{c: errCh, t: t}
|
||||
return &ErrorWaiter{c: errCh, t: t, cancelFunc: cancel}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,222 @@
|
||||
package clitest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
"github.com/muesli/termenv"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
"github.com/coder/coder/cli/config"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
// UpdateGoldenFiles indicates golden files should be updated.
|
||||
// To update the golden files:
|
||||
// make update-golden-files
|
||||
var UpdateGoldenFiles = flag.Bool("update", false, "update .golden files")
|
||||
|
||||
var timestampRegex = regexp.MustCompile(`(?i)\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(.\d+)?Z`)
|
||||
|
||||
type CommandHelpCase struct {
|
||||
Name string
|
||||
Cmd []string
|
||||
}
|
||||
|
||||
func DefaultCases() []CommandHelpCase {
|
||||
return []CommandHelpCase{
|
||||
{
|
||||
Name: "coder --help",
|
||||
Cmd: []string{"--help"},
|
||||
},
|
||||
{
|
||||
Name: "coder server --help",
|
||||
Cmd: []string{"server", "--help"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestCommandHelp will test the help output of the given commands
|
||||
// using golden files.
|
||||
//
|
||||
//nolint:tparallel,paralleltest
|
||||
func TestCommandHelp(t *testing.T, getRoot func(t *testing.T) *clibase.Cmd, cases []CommandHelpCase) {
|
||||
ogColorProfile := lipgloss.ColorProfile()
|
||||
// ANSI256 escape codes are far easier for humans to parse in a diff,
|
||||
// but TrueColor is probably more popular with modern terminals.
|
||||
lipgloss.SetColorProfile(termenv.ANSI)
|
||||
t.Cleanup(func() {
|
||||
lipgloss.SetColorProfile(ogColorProfile)
|
||||
})
|
||||
rootClient, replacements := prepareTestData(t)
|
||||
|
||||
root := getRoot(t)
|
||||
|
||||
ExtractCommandPathsLoop:
|
||||
for _, cp := range extractVisibleCommandPaths(nil, root.Children) {
|
||||
name := fmt.Sprintf("coder %s --help", strings.Join(cp, " "))
|
||||
cmd := append(cp, "--help")
|
||||
for _, tt := range cases {
|
||||
if tt.Name == name {
|
||||
continue ExtractCommandPathsLoop
|
||||
}
|
||||
}
|
||||
cases = append(cases, CommandHelpCase{Name: name, Cmd: cmd})
|
||||
}
|
||||
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitLong)
|
||||
|
||||
var outBuf bytes.Buffer
|
||||
|
||||
caseCmd := getRoot(t)
|
||||
|
||||
inv, cfg := NewWithCommand(t, caseCmd, tt.Cmd...)
|
||||
inv.Stderr = &outBuf
|
||||
inv.Stdout = &outBuf
|
||||
inv.Environ.Set("CODER_URL", rootClient.URL.String())
|
||||
inv.Environ.Set("CODER_SESSION_TOKEN", rootClient.SessionToken())
|
||||
inv.Environ.Set("CODER_CACHE_DIRECTORY", "~/.cache")
|
||||
|
||||
SetupConfig(t, rootClient, cfg)
|
||||
|
||||
StartWithWaiter(t, inv.WithContext(ctx)).RequireSuccess()
|
||||
|
||||
actual := outBuf.Bytes()
|
||||
if len(actual) == 0 {
|
||||
t.Fatal("no output")
|
||||
}
|
||||
|
||||
for k, v := range replacements {
|
||||
actual = bytes.ReplaceAll(actual, []byte(k), []byte(v))
|
||||
}
|
||||
|
||||
actual = NormalizeGoldenFile(t, actual)
|
||||
goldenPath := filepath.Join("testdata", strings.Replace(tt.Name, " ", "_", -1)+".golden")
|
||||
if *UpdateGoldenFiles {
|
||||
t.Logf("update golden file for: %q: %s", tt.Name, goldenPath)
|
||||
err := os.WriteFile(goldenPath, actual, 0o600)
|
||||
require.NoError(t, err, "update golden file")
|
||||
}
|
||||
|
||||
expected, err := os.ReadFile(goldenPath)
|
||||
require.NoError(t, err, "read golden file, run \"make update-golden-files\" and commit the changes")
|
||||
|
||||
expected = NormalizeGoldenFile(t, expected)
|
||||
require.Equal(
|
||||
t, string(expected), string(actual),
|
||||
"golden file mismatch: %s, run \"make update-golden-files\", verify and commit the changes",
|
||||
goldenPath,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// NormalizeGoldenFile replaces any strings that are system or timing dependent
|
||||
// with a placeholder so that the golden files can be compared with a simple
|
||||
// equality check.
|
||||
func NormalizeGoldenFile(t *testing.T, byt []byte) []byte {
|
||||
// Replace any timestamps with a placeholder.
|
||||
byt = timestampRegex.ReplaceAll(byt, []byte("[timestamp]"))
|
||||
|
||||
homeDir, err := os.UserHomeDir()
|
||||
require.NoError(t, err)
|
||||
|
||||
configDir := config.DefaultDir()
|
||||
byt = bytes.ReplaceAll(byt, []byte(configDir), []byte("~/.config/coderv2"))
|
||||
|
||||
byt = bytes.ReplaceAll(byt, []byte(codersdk.DefaultCacheDir()), []byte("[cache dir]"))
|
||||
|
||||
// The home directory changes depending on the test environment.
|
||||
byt = bytes.ReplaceAll(byt, []byte(homeDir), []byte("~"))
|
||||
for _, r := range []struct {
|
||||
old string
|
||||
new string
|
||||
}{
|
||||
{"\r\n", "\n"},
|
||||
{`~\.cache\coder`, "~/.cache/coder"},
|
||||
{`C:\Users\RUNNER~1\AppData\Local\Temp`, "/tmp"},
|
||||
{os.TempDir(), "/tmp"},
|
||||
} {
|
||||
byt = bytes.ReplaceAll(byt, []byte(r.old), []byte(r.new))
|
||||
}
|
||||
return byt
|
||||
}
|
||||
|
||||
func extractVisibleCommandPaths(cmdPath []string, cmds []*clibase.Cmd) [][]string {
|
||||
var cmdPaths [][]string
|
||||
for _, c := range cmds {
|
||||
if c.Hidden {
|
||||
continue
|
||||
}
|
||||
cmdPath := append(cmdPath, c.Name())
|
||||
cmdPaths = append(cmdPaths, cmdPath)
|
||||
cmdPaths = append(cmdPaths, extractVisibleCommandPaths(cmdPath, c.Children)...)
|
||||
}
|
||||
return cmdPaths
|
||||
}
|
||||
|
||||
func prepareTestData(t *testing.T) (*codersdk.Client, map[string]string) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancel()
|
||||
|
||||
db, pubsub := dbtestutil.NewDB(t)
|
||||
rootClient := coderdtest.New(t, &coderdtest.Options{
|
||||
Database: db,
|
||||
Pubsub: pubsub,
|
||||
IncludeProvisionerDaemon: true,
|
||||
})
|
||||
firstUser := coderdtest.CreateFirstUser(t, rootClient)
|
||||
secondUser, err := rootClient.CreateUser(ctx, codersdk.CreateUserRequest{
|
||||
Email: "testuser2@coder.com",
|
||||
Username: "testuser2",
|
||||
Password: coderdtest.FirstUserParams.Password,
|
||||
OrganizationID: firstUser.OrganizationID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
version := coderdtest.CreateTemplateVersion(t, rootClient, firstUser.OrganizationID, nil)
|
||||
version = coderdtest.AwaitTemplateVersionJob(t, rootClient, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, rootClient, firstUser.OrganizationID, version.ID, func(req *codersdk.CreateTemplateRequest) {
|
||||
req.Name = "test-template"
|
||||
})
|
||||
workspace := coderdtest.CreateWorkspace(t, rootClient, firstUser.OrganizationID, template.ID, func(req *codersdk.CreateWorkspaceRequest) {
|
||||
req.Name = "test-workspace"
|
||||
})
|
||||
workspaceBuild := coderdtest.AwaitWorkspaceBuildJob(t, rootClient, workspace.LatestBuild.ID)
|
||||
|
||||
replacements := map[string]string{
|
||||
firstUser.UserID.String(): "[first user ID]",
|
||||
secondUser.ID.String(): "[second user ID]",
|
||||
firstUser.OrganizationID.String(): "[first org ID]",
|
||||
version.ID.String(): "[version ID]",
|
||||
version.Name: "[version name]",
|
||||
version.Job.ID.String(): "[version job ID]",
|
||||
version.Job.FileID.String(): "[version file ID]",
|
||||
version.Job.WorkerID.String(): "[version worker ID]",
|
||||
template.ID.String(): "[template ID]",
|
||||
workspace.ID.String(): "[workspace ID]",
|
||||
workspaceBuild.ID.String(): "[workspace build ID]",
|
||||
workspaceBuild.Job.ID.String(): "[workspace build job ID]",
|
||||
workspaceBuild.Job.FileID.String(): "[workspace build file ID]",
|
||||
workspaceBuild.Job.WorkerID.String(): "[workspace build worker ID]",
|
||||
}
|
||||
|
||||
return rootClient, replacements
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package clitest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
)
|
||||
|
||||
// HandlersOK asserts that all commands have a handler.
|
||||
// Without a handler, the command has no default behavior. Even for
|
||||
// non-root commands (like 'groups' or 'users'), a handler is required.
|
||||
// These handlers are likely just the 'help' handler, but this must be
|
||||
// explicitly set.
|
||||
func HandlersOK(t *testing.T, cmd *clibase.Cmd) {
|
||||
cmd.Walk(func(cmd *clibase.Cmd) {
|
||||
if cmd.Handler == nil {
|
||||
// If you see this error, make the Handler a helper invoker.
|
||||
// Handler: func(inv *clibase.Invocation) error {
|
||||
// return inv.Command.HelpHandler(inv)
|
||||
// },
|
||||
t.Errorf("command %q has no handler, change to a helper invoker using: 'inv.Command.HelpHandler(inv)'", cmd.Name())
|
||||
}
|
||||
})
|
||||
}
|
||||
+206
-220
@@ -2,263 +2,249 @@ package cliui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/muesli/reflow/indent"
|
||||
"github.com/muesli/reflow/wordwrap"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
var (
|
||||
AgentStartError = xerrors.New("agent startup exited with non-zero exit status")
|
||||
AgentShuttingDown = xerrors.New("agent is shutting down")
|
||||
)
|
||||
var errAgentShuttingDown = xerrors.New("agent is shutting down")
|
||||
|
||||
type AgentOptions struct {
|
||||
WorkspaceName string
|
||||
Fetch func(context.Context) (codersdk.WorkspaceAgent, error)
|
||||
FetchInterval time.Duration
|
||||
WarnInterval time.Duration
|
||||
NoWait bool // If true, don't wait for the agent to be ready.
|
||||
Fetch func(ctx context.Context, agentID uuid.UUID) (codersdk.WorkspaceAgent, error)
|
||||
FetchLogs func(ctx context.Context, agentID uuid.UUID, after int64, follow bool) (<-chan []codersdk.WorkspaceAgentLog, io.Closer, error)
|
||||
Wait bool // If true, wait for the agent to be ready (startup script).
|
||||
}
|
||||
|
||||
// Agent displays a spinning indicator that waits for a workspace agent to connect.
|
||||
func Agent(ctx context.Context, writer io.Writer, opts AgentOptions) error {
|
||||
func Agent(ctx context.Context, writer io.Writer, agentID uuid.UUID, opts AgentOptions) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if opts.FetchInterval == 0 {
|
||||
opts.FetchInterval = 500 * time.Millisecond
|
||||
}
|
||||
if opts.WarnInterval == 0 {
|
||||
opts.WarnInterval = 30 * time.Second
|
||||
if opts.FetchLogs == nil {
|
||||
opts.FetchLogs = func(_ context.Context, _ uuid.UUID, _ int64, _ bool) (<-chan []codersdk.WorkspaceAgentLog, io.Closer, error) {
|
||||
c := make(chan []codersdk.WorkspaceAgentLog)
|
||||
close(c)
|
||||
return c, closeFunc(func() error { return nil }), nil
|
||||
}
|
||||
}
|
||||
var resourceMutex sync.Mutex
|
||||
agent, err := opts.Fetch(ctx)
|
||||
|
||||
type fetchAgent struct {
|
||||
agent codersdk.WorkspaceAgent
|
||||
err error
|
||||
}
|
||||
fetchedAgent := make(chan fetchAgent, 1)
|
||||
go func() {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
agent, err := opts.Fetch(ctx, agentID)
|
||||
select {
|
||||
case <-fetchedAgent:
|
||||
default:
|
||||
}
|
||||
if err != nil {
|
||||
fetchedAgent <- fetchAgent{err: xerrors.Errorf("fetch workspace agent: %w", err)}
|
||||
return
|
||||
}
|
||||
fetchedAgent <- fetchAgent{agent: agent}
|
||||
t.Reset(opts.FetchInterval)
|
||||
}
|
||||
}
|
||||
}()
|
||||
fetch := func() (codersdk.WorkspaceAgent, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return codersdk.WorkspaceAgent{}, ctx.Err()
|
||||
case f := <-fetchedAgent:
|
||||
if f.err != nil {
|
||||
return codersdk.WorkspaceAgent{}, f.err
|
||||
}
|
||||
return f.agent, nil
|
||||
}
|
||||
}
|
||||
|
||||
agent, err := fetch()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch: %w", err)
|
||||
}
|
||||
|
||||
// Fast path if the agent is ready (avoid showing connecting prompt).
|
||||
// We don't take the fast path for opts.NoWait yet because we want to
|
||||
// show the message.
|
||||
if agent.Status == codersdk.WorkspaceAgentConnected &&
|
||||
(agent.LoginBeforeReady || agent.LifecycleState == codersdk.WorkspaceAgentLifecycleReady) {
|
||||
return nil
|
||||
}
|
||||
sw := &stageWriter{w: writer}
|
||||
|
||||
ctx, cancel := signal.NotifyContext(ctx, os.Interrupt)
|
||||
defer cancel()
|
||||
|
||||
spin := spinner.New(spinner.CharSets[78], 100*time.Millisecond, spinner.WithColor("fgHiGreen"))
|
||||
spin.Writer = writer
|
||||
spin.ForceOutput = true
|
||||
spin.Suffix = waitingMessage(agent, opts).Spin
|
||||
|
||||
waitMessage := &message{}
|
||||
showMessage := func() {
|
||||
resourceMutex.Lock()
|
||||
defer resourceMutex.Unlock()
|
||||
|
||||
m := waitingMessage(agent, opts)
|
||||
if m.Prompt == waitMessage.Prompt {
|
||||
return
|
||||
}
|
||||
moveUp := ""
|
||||
if waitMessage.Prompt != "" {
|
||||
// If this is an update, move a line up
|
||||
// to keep it tidy and aligned.
|
||||
moveUp = "\033[1A"
|
||||
}
|
||||
waitMessage = m
|
||||
|
||||
// Stop the spinner while we write our message.
|
||||
spin.Stop()
|
||||
spin.Suffix = waitMessage.Spin
|
||||
// Clear the line and (if necessary) move up a line to write our message.
|
||||
_, _ = fmt.Fprintf(writer, "\033[2K%s\n%s\n", moveUp, waitMessage.Prompt)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
// Safe to resume operation.
|
||||
if spin.Suffix != "" {
|
||||
spin.Start()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fast path for showing the error message even when using no wait,
|
||||
// we do this just before starting the spinner to avoid needless
|
||||
// spinning.
|
||||
if agent.Status == codersdk.WorkspaceAgentConnected &&
|
||||
!agent.LoginBeforeReady && opts.NoWait {
|
||||
showMessage()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start spinning after fast paths are handled.
|
||||
if spin.Suffix != "" {
|
||||
spin.Start()
|
||||
}
|
||||
defer spin.Stop()
|
||||
|
||||
warnAfter := time.NewTimer(opts.WarnInterval)
|
||||
defer warnAfter.Stop()
|
||||
warningShown := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(warningShown)
|
||||
case <-warnAfter.C:
|
||||
close(warningShown)
|
||||
showMessage()
|
||||
}
|
||||
}()
|
||||
|
||||
fetchInterval := time.NewTicker(opts.FetchInterval)
|
||||
defer fetchInterval.Stop()
|
||||
showStartupLogs := false
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-fetchInterval.C:
|
||||
// It doesn't matter if we're connected or not, if the agent is
|
||||
// shutting down, we don't know if it's coming back.
|
||||
if agent.LifecycleState.ShuttingDown() {
|
||||
return errAgentShuttingDown
|
||||
}
|
||||
resourceMutex.Lock()
|
||||
agent, err = opts.Fetch(ctx)
|
||||
if err != nil {
|
||||
resourceMutex.Unlock()
|
||||
return xerrors.Errorf("fetch: %w", err)
|
||||
}
|
||||
resourceMutex.Unlock()
|
||||
|
||||
switch agent.Status {
|
||||
case codersdk.WorkspaceAgentConnected:
|
||||
// NOTE(mafredri): Once we have access to the workspace agent's
|
||||
// startup script logs, we can show them here.
|
||||
// https://github.com/coder/coder/issues/2957
|
||||
if !agent.LoginBeforeReady && !opts.NoWait {
|
||||
switch agent.LifecycleState {
|
||||
case codersdk.WorkspaceAgentLifecycleReady:
|
||||
return nil
|
||||
case codersdk.WorkspaceAgentLifecycleStartTimeout:
|
||||
showMessage()
|
||||
case codersdk.WorkspaceAgentLifecycleStartError:
|
||||
showMessage()
|
||||
return AgentStartError
|
||||
case codersdk.WorkspaceAgentLifecycleShuttingDown, codersdk.WorkspaceAgentLifecycleShutdownTimeout,
|
||||
codersdk.WorkspaceAgentLifecycleShutdownError, codersdk.WorkspaceAgentLifecycleOff:
|
||||
showMessage()
|
||||
return AgentShuttingDown
|
||||
default:
|
||||
select {
|
||||
case <-warningShown:
|
||||
showMessage()
|
||||
default:
|
||||
// This state is normal, we don't want
|
||||
// to show a message prematurely.
|
||||
case codersdk.WorkspaceAgentConnecting, codersdk.WorkspaceAgentTimeout:
|
||||
// Since we were waiting for the agent to connect, also show
|
||||
// startup logs if applicable.
|
||||
showStartupLogs = true
|
||||
|
||||
stage := "Waiting for the workspace agent to connect"
|
||||
sw.Start(stage)
|
||||
for agent.Status == codersdk.WorkspaceAgentConnecting {
|
||||
if agent, err = fetch(); err != nil {
|
||||
return xerrors.Errorf("fetch: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if agent.Status == codersdk.WorkspaceAgentTimeout {
|
||||
now := time.Now()
|
||||
sw.Log(now, codersdk.LogLevelInfo, "The workspace agent is having trouble connecting, wait for it to connect or restart your workspace.")
|
||||
sw.Log(now, codersdk.LogLevelInfo, troubleshootingMessage(agent, "https://coder.com/docs/v2/latest/templates#agent-connection-issues"))
|
||||
for agent.Status == codersdk.WorkspaceAgentTimeout {
|
||||
if agent, err = fetch(); err != nil {
|
||||
return xerrors.Errorf("fetch: %w", err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
case codersdk.WorkspaceAgentTimeout, codersdk.WorkspaceAgentDisconnected:
|
||||
showMessage()
|
||||
}
|
||||
}
|
||||
}
|
||||
sw.Complete(stage, agent.FirstConnectedAt.Sub(agent.CreatedAt))
|
||||
|
||||
type message struct {
|
||||
Spin string
|
||||
Prompt string
|
||||
Troubleshoot bool
|
||||
}
|
||||
|
||||
func waitingMessage(agent codersdk.WorkspaceAgent, opts AgentOptions) (m *message) {
|
||||
m = &message{
|
||||
Spin: fmt.Sprintf("Waiting for connection from %s...", Styles.Field.Render(agent.Name)),
|
||||
Prompt: "Don't panic, your workspace is booting up!",
|
||||
}
|
||||
defer func() {
|
||||
if agent.Status == codersdk.WorkspaceAgentConnected && opts.NoWait {
|
||||
m.Spin = ""
|
||||
}
|
||||
if m.Spin != "" {
|
||||
m.Spin = " " + m.Spin
|
||||
}
|
||||
|
||||
// We don't want to wrap the troubleshooting URL, so we'll handle word
|
||||
// wrapping ourselves (vs using lipgloss).
|
||||
w := wordwrap.NewWriter(Styles.Paragraph.GetWidth() - Styles.Paragraph.GetMarginLeft()*2)
|
||||
w.Breakpoints = []rune{' ', '\n'}
|
||||
|
||||
_, _ = fmt.Fprint(w, m.Prompt)
|
||||
if m.Troubleshoot {
|
||||
if agent.TroubleshootingURL != "" {
|
||||
_, _ = fmt.Fprintf(w, " See troubleshooting instructions at:\n%s", agent.TroubleshootingURL)
|
||||
} else {
|
||||
_, _ = fmt.Fprint(w, " Wait for it to (re)connect or restart your workspace.")
|
||||
case codersdk.WorkspaceAgentConnected:
|
||||
if !showStartupLogs && agent.LifecycleState == codersdk.WorkspaceAgentLifecycleReady {
|
||||
// The workspace is ready, there's nothing to do but connect.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
_, _ = fmt.Fprint(w, "\n")
|
||||
|
||||
// We want to prefix the prompt with a caret, but we want text on the
|
||||
// following lines to align with the text on the first line (i.e. added
|
||||
// spacing).
|
||||
ind := " " + Styles.Prompt.String()
|
||||
iw := indent.NewWriter(1, func(w io.Writer) {
|
||||
_, _ = w.Write([]byte(ind))
|
||||
ind = " " // Set indentation to space after initial prompt.
|
||||
})
|
||||
_, _ = fmt.Fprint(iw, w.String())
|
||||
m.Prompt = iw.String()
|
||||
}()
|
||||
stage := "Running workspace agent startup script"
|
||||
follow := opts.Wait
|
||||
if !follow {
|
||||
stage += " (non-blocking)"
|
||||
}
|
||||
sw.Start(stage)
|
||||
|
||||
switch agent.Status {
|
||||
case codersdk.WorkspaceAgentTimeout:
|
||||
m.Prompt = "The workspace agent is having trouble connecting."
|
||||
case codersdk.WorkspaceAgentDisconnected:
|
||||
m.Prompt = "The workspace agent lost connection!"
|
||||
case codersdk.WorkspaceAgentConnected:
|
||||
m.Spin = fmt.Sprintf("Waiting for %s to become ready...", Styles.Field.Render(agent.Name))
|
||||
m.Prompt = "Don't panic, your workspace agent has connected and the workspace is getting ready!"
|
||||
if opts.NoWait {
|
||||
m.Prompt = "Your workspace is still getting ready, it may be in an incomplete state."
|
||||
}
|
||||
err = func() error { // Use func because of defer in for loop.
|
||||
logStream, logsCloser, err := opts.FetchLogs(ctx, agent.ID, 0, follow)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch workspace agent startup logs: %w", err)
|
||||
}
|
||||
defer logsCloser.Close()
|
||||
|
||||
var lastLog codersdk.WorkspaceAgentLog
|
||||
fetchedAgentWhileFollowing := fetchedAgent
|
||||
if !follow {
|
||||
fetchedAgentWhileFollowing = nil
|
||||
}
|
||||
for {
|
||||
// This select is essentially and inline `fetch()`.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case f := <-fetchedAgentWhileFollowing:
|
||||
if f.err != nil {
|
||||
return xerrors.Errorf("fetch: %w", f.err)
|
||||
}
|
||||
agent = f.agent
|
||||
|
||||
// If the agent is no longer starting, stop following
|
||||
// logs because FetchLogs will keep streaming forever.
|
||||
// We do one last non-follow request to ensure we have
|
||||
// fetched all logs.
|
||||
if !agent.LifecycleState.Starting() {
|
||||
_ = logsCloser.Close()
|
||||
fetchedAgentWhileFollowing = nil
|
||||
|
||||
logStream, logsCloser, err = opts.FetchLogs(ctx, agent.ID, lastLog.ID, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch workspace agent startup logs: %w", err)
|
||||
}
|
||||
// Logs are already primed, so we can call close.
|
||||
_ = logsCloser.Close()
|
||||
}
|
||||
case logs, ok := <-logStream:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
for _, log := range logs {
|
||||
sw.Log(log.CreatedAt, log.Level, log.Output)
|
||||
lastLog = log
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for follow && agent.LifecycleState.Starting() {
|
||||
if agent, err = fetch(); err != nil {
|
||||
return xerrors.Errorf("fetch: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
switch agent.LifecycleState {
|
||||
case codersdk.WorkspaceAgentLifecycleStartTimeout:
|
||||
m.Prompt = "The workspace is taking longer than expected to get ready, the agent startup script is still executing."
|
||||
case codersdk.WorkspaceAgentLifecycleStartError:
|
||||
m.Spin = ""
|
||||
m.Prompt = "The workspace ran into a problem while getting ready, the agent startup script exited with non-zero status."
|
||||
default:
|
||||
switch agent.LifecycleState {
|
||||
case codersdk.WorkspaceAgentLifecycleShutdownTimeout:
|
||||
m.Spin = ""
|
||||
m.Prompt = "The workspace is shutting down, but is taking longer than expected to shut down and the agent shutdown script is still executing."
|
||||
m.Troubleshoot = true
|
||||
case codersdk.WorkspaceAgentLifecycleShutdownError:
|
||||
m.Spin = ""
|
||||
m.Prompt = "The workspace ran into a problem while shutting down, the agent shutdown script exited with non-zero status."
|
||||
m.Troubleshoot = true
|
||||
case codersdk.WorkspaceAgentLifecycleShuttingDown:
|
||||
m.Spin = ""
|
||||
m.Prompt = "The workspace is shutting down."
|
||||
case codersdk.WorkspaceAgentLifecycleOff:
|
||||
m.Spin = ""
|
||||
m.Prompt = "The workspace is not running."
|
||||
case codersdk.WorkspaceAgentLifecycleReady:
|
||||
sw.Complete(stage, agent.ReadyAt.Sub(*agent.StartedAt))
|
||||
case codersdk.WorkspaceAgentLifecycleStartError:
|
||||
sw.Fail(stage, agent.ReadyAt.Sub(*agent.StartedAt))
|
||||
// Use zero time (omitted) to separate these from the startup logs.
|
||||
sw.Log(time.Time{}, codersdk.LogLevelWarn, "Warning: The startup script exited with an error and your workspace may be incomplete.")
|
||||
sw.Log(time.Time{}, codersdk.LogLevelWarn, troubleshootingMessage(agent, "https://coder.com/docs/v2/latest/templates#startup-script-exited-with-an-error"))
|
||||
default:
|
||||
switch {
|
||||
case agent.LifecycleState.Starting():
|
||||
// Use zero time (omitted) to separate these from the startup logs.
|
||||
sw.Log(time.Time{}, codersdk.LogLevelWarn, "Notice: The startup script is still running and your workspace may be incomplete.")
|
||||
sw.Log(time.Time{}, codersdk.LogLevelWarn, troubleshootingMessage(agent, "https://coder.com/docs/v2/latest/templates#your-workspace-may-be-incomplete"))
|
||||
// Note: We don't complete or fail the stage here, it's
|
||||
// intentionally left open to indicate this stage didn't
|
||||
// complete.
|
||||
case agent.LifecycleState.ShuttingDown():
|
||||
// We no longer know if the startup script failed or not,
|
||||
// but we need to tell the user something.
|
||||
sw.Complete(stage, agent.ReadyAt.Sub(*agent.StartedAt))
|
||||
return errAgentShuttingDown
|
||||
}
|
||||
}
|
||||
// Not a failure state, no troubleshooting necessary.
|
||||
return m
|
||||
|
||||
return nil
|
||||
|
||||
case codersdk.WorkspaceAgentDisconnected:
|
||||
// If the agent was still starting during disconnect, we'll
|
||||
// show startup logs.
|
||||
showStartupLogs = agent.LifecycleState.Starting()
|
||||
|
||||
stage := "The workspace agent lost connection"
|
||||
sw.Start(stage)
|
||||
sw.Log(time.Now(), codersdk.LogLevelWarn, "Wait for it to reconnect or restart your workspace.")
|
||||
sw.Log(time.Now(), codersdk.LogLevelWarn, troubleshootingMessage(agent, "https://coder.com/docs/v2/latest/templates#agent-connection-issues"))
|
||||
for agent.Status == codersdk.WorkspaceAgentDisconnected {
|
||||
if agent, err = fetch(); err != nil {
|
||||
return xerrors.Errorf("fetch: %w", err)
|
||||
}
|
||||
}
|
||||
sw.Complete(stage, agent.LastConnectedAt.Sub(*agent.DisconnectedAt))
|
||||
}
|
||||
default:
|
||||
// Not a failure state, no troubleshooting necessary.
|
||||
return m
|
||||
}
|
||||
m.Troubleshoot = true
|
||||
}
|
||||
|
||||
func troubleshootingMessage(agent codersdk.WorkspaceAgent, url string) string {
|
||||
m := "For more information and troubleshooting, see " + url
|
||||
if agent.TroubleshootingURL != "" {
|
||||
m += " and " + agent.TroubleshootingURL
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type closeFunc func() error
|
||||
|
||||
func (c closeFunc) Close() error {
|
||||
return c()
|
||||
}
|
||||
|
||||
+364
-340
@@ -1,363 +1,387 @@
|
||||
package cliui_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
"github.com/coder/coder/coderd/util/ptr"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func TestAgent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
var disconnected atomic.Bool
|
||||
ptty := ptytest.New(t)
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
err := cliui.Agent(inv.Context(), inv.Stdout, cliui.AgentOptions{
|
||||
WorkspaceName: "example",
|
||||
Fetch: func(_ context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
agent := codersdk.WorkspaceAgent{
|
||||
Status: codersdk.WorkspaceAgentDisconnected,
|
||||
LoginBeforeReady: true,
|
||||
}
|
||||
if disconnected.Load() {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
}
|
||||
return agent, nil
|
||||
},
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
iter []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error
|
||||
logs chan []codersdk.WorkspaceAgentLog
|
||||
opts cliui.AgentOptions
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Initial connection",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
WarnInterval: 10 * time.Millisecond,
|
||||
})
|
||||
return err
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnecting
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"⧗ Waiting for the workspace agent to connect",
|
||||
"✔ Waiting for the workspace agent to connect",
|
||||
"⧗ Running workspace agent startup script (non-blocking)",
|
||||
"Notice: The startup script is still running and your workspace may be incomplete.",
|
||||
"For more information and troubleshooting, see",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Initial connection timeout",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: 1 * time.Millisecond,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnecting
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleStarting
|
||||
agent.StartedAt = ptr.Ref(time.Now())
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentTimeout
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleReady
|
||||
agent.ReadyAt = ptr.Ref(time.Now())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"⧗ Waiting for the workspace agent to connect",
|
||||
"The workspace agent is having trouble connecting, wait for it to connect or restart your workspace.",
|
||||
"For more information and troubleshooting, see",
|
||||
"✔ Waiting for the workspace agent to connect",
|
||||
"⧗ Running workspace agent startup script (non-blocking)",
|
||||
"✔ Running workspace agent startup script (non-blocking)",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Disconnected",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: 1 * time.Millisecond,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentDisconnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now().Add(-1 * time.Minute))
|
||||
agent.LastConnectedAt = ptr.Ref(time.Now().Add(-1 * time.Minute))
|
||||
agent.DisconnectedAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleReady
|
||||
agent.StartedAt = ptr.Ref(time.Now().Add(-1 * time.Minute))
|
||||
agent.ReadyAt = ptr.Ref(time.Now())
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.LastConnectedAt = ptr.Ref(time.Now())
|
||||
return nil
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"⧗ The workspace agent lost connection",
|
||||
"Wait for it to reconnect or restart your workspace.",
|
||||
"For more information and troubleshooting, see",
|
||||
"✔ The workspace agent lost connection",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Startup script logs",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleStarting
|
||||
agent.StartedAt = ptr.Ref(time.Now())
|
||||
logs <- []codersdk.WorkspaceAgentLog{
|
||||
{
|
||||
CreatedAt: time.Now(),
|
||||
Output: "Hello world",
|
||||
},
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleReady
|
||||
agent.ReadyAt = ptr.Ref(time.Now())
|
||||
logs <- []codersdk.WorkspaceAgentLog{
|
||||
{
|
||||
CreatedAt: time.Now(),
|
||||
Output: "Bye now",
|
||||
},
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"⧗ Running workspace agent startup script",
|
||||
"Hello world",
|
||||
"Bye now",
|
||||
"✔ Running workspace agent startup script",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Startup script exited with error",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
agent.StartedAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleStartError
|
||||
agent.ReadyAt = ptr.Ref(time.Now())
|
||||
logs <- []codersdk.WorkspaceAgentLog{
|
||||
{
|
||||
CreatedAt: time.Now(),
|
||||
Output: "Hello world",
|
||||
},
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"⧗ Running workspace agent startup script",
|
||||
"Hello world",
|
||||
"✘ Running workspace agent startup script",
|
||||
"Warning: The startup script exited with an error and your workspace may be incomplete.",
|
||||
"For more information and troubleshooting, see",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Error when shutting down",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentDisconnected
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleOff
|
||||
return nil
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Error when shutting down while waiting",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
agent.FirstConnectedAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleStarting
|
||||
agent.StartedAt = ptr.Ref(time.Now())
|
||||
logs <- []codersdk.WorkspaceAgentLog{
|
||||
{
|
||||
CreatedAt: time.Now(),
|
||||
Output: "Hello world",
|
||||
},
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, logs chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.ReadyAt = ptr.Ref(time.Now())
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycleShuttingDown
|
||||
return nil
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"⧗ Running workspace agent startup script",
|
||||
"Hello world",
|
||||
"✔ Running workspace agent startup script",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Error during fetch",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentConnecting
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return xerrors.New("bad")
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"⧗ Waiting for the workspace agent to connect",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Shows agent troubleshooting URL",
|
||||
opts: cliui.AgentOptions{
|
||||
FetchInterval: time.Millisecond,
|
||||
Wait: true,
|
||||
},
|
||||
iter: []func(context.Context, *codersdk.WorkspaceAgent, chan []codersdk.WorkspaceAgentLog) error{
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
agent.Status = codersdk.WorkspaceAgentTimeout
|
||||
agent.TroubleshootingURL = "https://troubleshoot"
|
||||
return nil
|
||||
},
|
||||
func(_ context.Context, agent *codersdk.WorkspaceAgent, _ chan []codersdk.WorkspaceAgentLog) error {
|
||||
return xerrors.New("bad")
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"⧗ Waiting for the workspace agent to connect",
|
||||
"The workspace agent is having trouble connecting, wait for it to connect or restart your workspace.",
|
||||
"https://troubleshoot",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
var buf bytes.Buffer
|
||||
agent := codersdk.WorkspaceAgent{
|
||||
ID: uuid.New(),
|
||||
Status: codersdk.WorkspaceAgentConnecting,
|
||||
StartupScriptBehavior: codersdk.WorkspaceAgentStartupScriptBehaviorNonBlocking,
|
||||
CreatedAt: time.Now(),
|
||||
LifecycleState: codersdk.WorkspaceAgentLifecycleCreated,
|
||||
}
|
||||
logs := make(chan []codersdk.WorkspaceAgentLog, 1)
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
tc.opts.Fetch = func(_ context.Context, _ uuid.UUID) (codersdk.WorkspaceAgent, error) {
|
||||
var err error
|
||||
if len(tc.iter) > 0 {
|
||||
err = tc.iter[0](ctx, &agent, logs)
|
||||
tc.iter = tc.iter[1:]
|
||||
}
|
||||
return agent, err
|
||||
}
|
||||
tc.opts.FetchLogs = func(ctx context.Context, _ uuid.UUID, _ int64, follow bool) (<-chan []codersdk.WorkspaceAgentLog, io.Closer, error) {
|
||||
if follow {
|
||||
return logs, closeFunc(func() error { return nil }), nil
|
||||
}
|
||||
|
||||
fetchLogs := make(chan []codersdk.WorkspaceAgentLog, 1)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
case l := <-logs:
|
||||
fetchLogs <- l
|
||||
default:
|
||||
}
|
||||
close(fetchLogs)
|
||||
return fetchLogs, closeFunc(func() error { return nil }), nil
|
||||
}
|
||||
err := cliui.Agent(inv.Context(), &buf, uuid.Nil, tc.opts)
|
||||
return err
|
||||
},
|
||||
}
|
||||
inv := cmd.Invoke()
|
||||
|
||||
w := clitest.StartWithWaiter(t, inv)
|
||||
if tc.wantErr {
|
||||
w.RequireError()
|
||||
} else {
|
||||
w.RequireSuccess()
|
||||
}
|
||||
|
||||
s := bufio.NewScanner(&buf)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
t.Log(line)
|
||||
if len(tc.want) == 0 {
|
||||
require.Fail(t, "unexpected line: "+line)
|
||||
}
|
||||
require.Contains(t, line, tc.want[0])
|
||||
tc.want = tc.want[1:]
|
||||
}
|
||||
require.NoError(t, s.Err())
|
||||
if len(tc.want) > 0 {
|
||||
require.Fail(t, "missing lines: "+strings.Join(tc.want, ", "))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
inv := cmd.Invoke()
|
||||
ptty.Attach(inv)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
ptty.ExpectMatchContext(ctx, "lost connection")
|
||||
disconnected.Store(true)
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestAgent_TimeoutWithTroubleshootingURL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
wantURL := "https://coder.com/troubleshoot"
|
||||
|
||||
var connected, timeout atomic.Bool
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
err := cliui.Agent(inv.Context(), inv.Stdout, cliui.AgentOptions{
|
||||
WorkspaceName: "example",
|
||||
Fetch: func(_ context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
agent := codersdk.WorkspaceAgent{
|
||||
Status: codersdk.WorkspaceAgentConnecting,
|
||||
TroubleshootingURL: wantURL,
|
||||
LoginBeforeReady: true,
|
||||
}
|
||||
switch {
|
||||
case !connected.Load() && timeout.Load():
|
||||
agent.Status = codersdk.WorkspaceAgentTimeout
|
||||
case connected.Load():
|
||||
agent.Status = codersdk.WorkspaceAgentConnected
|
||||
}
|
||||
return agent, nil
|
||||
},
|
||||
FetchInterval: time.Millisecond,
|
||||
WarnInterval: 5 * time.Millisecond,
|
||||
})
|
||||
return err
|
||||
},
|
||||
}
|
||||
ptty := ptytest.New(t)
|
||||
|
||||
inv := cmd.Invoke()
|
||||
ptty.Attach(inv)
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- inv.WithContext(ctx).Run()
|
||||
}()
|
||||
ptty.ExpectMatchContext(ctx, "Don't panic, your workspace is booting")
|
||||
timeout.Store(true)
|
||||
ptty.ExpectMatchContext(ctx, wantURL)
|
||||
connected.Store(true)
|
||||
require.NoError(t, <-done)
|
||||
}
|
||||
|
||||
func TestAgent_StartupTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
wantURL := "https://coder.com/this-is-a-really-long-troubleshooting-url-that-should-not-wrap"
|
||||
|
||||
var status, state atomic.String
|
||||
setStatus := func(s codersdk.WorkspaceAgentStatus) { status.Store(string(s)) }
|
||||
setState := func(s codersdk.WorkspaceAgentLifecycle) { state.Store(string(s)) }
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
err := cliui.Agent(inv.Context(), inv.Stdout, cliui.AgentOptions{
|
||||
WorkspaceName: "example",
|
||||
Fetch: func(_ context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
agent := codersdk.WorkspaceAgent{
|
||||
Status: codersdk.WorkspaceAgentConnecting,
|
||||
LoginBeforeReady: false,
|
||||
LifecycleState: codersdk.WorkspaceAgentLifecycleCreated,
|
||||
TroubleshootingURL: wantURL,
|
||||
}
|
||||
|
||||
if s := status.Load(); s != "" {
|
||||
agent.Status = codersdk.WorkspaceAgentStatus(s)
|
||||
}
|
||||
if s := state.Load(); s != "" {
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycle(s)
|
||||
}
|
||||
return agent, nil
|
||||
},
|
||||
FetchInterval: time.Millisecond,
|
||||
WarnInterval: time.Millisecond,
|
||||
NoWait: false,
|
||||
})
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
ptty := ptytest.New(t)
|
||||
|
||||
inv := cmd.Invoke()
|
||||
ptty.Attach(inv)
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- inv.WithContext(ctx).Run()
|
||||
}()
|
||||
setStatus(codersdk.WorkspaceAgentConnecting)
|
||||
ptty.ExpectMatchContext(ctx, "Don't panic, your workspace is booting")
|
||||
setStatus(codersdk.WorkspaceAgentConnected)
|
||||
setState(codersdk.WorkspaceAgentLifecycleStarting)
|
||||
ptty.ExpectMatchContext(ctx, "workspace is getting ready")
|
||||
setState(codersdk.WorkspaceAgentLifecycleStartTimeout)
|
||||
ptty.ExpectMatchContext(ctx, "is taking longer")
|
||||
ptty.ExpectMatchContext(ctx, wantURL)
|
||||
setState(codersdk.WorkspaceAgentLifecycleReady)
|
||||
require.NoError(t, <-done)
|
||||
}
|
||||
|
||||
func TestAgent_StartErrorExit(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
wantURL := "https://coder.com/this-is-a-really-long-troubleshooting-url-that-should-not-wrap"
|
||||
|
||||
var status, state atomic.String
|
||||
setStatus := func(s codersdk.WorkspaceAgentStatus) { status.Store(string(s)) }
|
||||
setState := func(s codersdk.WorkspaceAgentLifecycle) { state.Store(string(s)) }
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
err := cliui.Agent(inv.Context(), inv.Stdout, cliui.AgentOptions{
|
||||
WorkspaceName: "example",
|
||||
Fetch: func(_ context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
agent := codersdk.WorkspaceAgent{
|
||||
Status: codersdk.WorkspaceAgentConnecting,
|
||||
LoginBeforeReady: false,
|
||||
LifecycleState: codersdk.WorkspaceAgentLifecycleCreated,
|
||||
TroubleshootingURL: wantURL,
|
||||
}
|
||||
|
||||
if s := status.Load(); s != "" {
|
||||
agent.Status = codersdk.WorkspaceAgentStatus(s)
|
||||
}
|
||||
if s := state.Load(); s != "" {
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycle(s)
|
||||
}
|
||||
return agent, nil
|
||||
},
|
||||
FetchInterval: time.Millisecond,
|
||||
WarnInterval: 60 * time.Second,
|
||||
NoWait: false,
|
||||
})
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
ptty := ptytest.New(t)
|
||||
|
||||
inv := cmd.Invoke()
|
||||
ptty.Attach(inv)
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- inv.WithContext(ctx).Run()
|
||||
}()
|
||||
setStatus(codersdk.WorkspaceAgentConnected)
|
||||
setState(codersdk.WorkspaceAgentLifecycleStarting)
|
||||
ptty.ExpectMatchContext(ctx, "to become ready...")
|
||||
setState(codersdk.WorkspaceAgentLifecycleStartError)
|
||||
ptty.ExpectMatchContext(ctx, "ran into a problem")
|
||||
err := <-done
|
||||
require.ErrorIs(t, err, cliui.AgentStartError, "lifecycle start_error should exit with error")
|
||||
}
|
||||
|
||||
func TestAgent_NoWait(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
wantURL := "https://coder.com/this-is-a-really-long-troubleshooting-url-that-should-not-wrap"
|
||||
|
||||
var status, state atomic.String
|
||||
setStatus := func(s codersdk.WorkspaceAgentStatus) { status.Store(string(s)) }
|
||||
setState := func(s codersdk.WorkspaceAgentLifecycle) { state.Store(string(s)) }
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
err := cliui.Agent(inv.Context(), inv.Stdout, cliui.AgentOptions{
|
||||
WorkspaceName: "example",
|
||||
Fetch: func(_ context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
agent := codersdk.WorkspaceAgent{
|
||||
Status: codersdk.WorkspaceAgentConnecting,
|
||||
LoginBeforeReady: false,
|
||||
LifecycleState: codersdk.WorkspaceAgentLifecycleCreated,
|
||||
TroubleshootingURL: wantURL,
|
||||
}
|
||||
|
||||
if s := status.Load(); s != "" {
|
||||
agent.Status = codersdk.WorkspaceAgentStatus(s)
|
||||
}
|
||||
if s := state.Load(); s != "" {
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycle(s)
|
||||
}
|
||||
return agent, nil
|
||||
},
|
||||
FetchInterval: time.Millisecond,
|
||||
WarnInterval: time.Second,
|
||||
NoWait: true,
|
||||
})
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
ptty := ptytest.New(t)
|
||||
|
||||
inv := cmd.Invoke()
|
||||
ptty.Attach(inv)
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- inv.WithContext(ctx).Run()
|
||||
}()
|
||||
setStatus(codersdk.WorkspaceAgentConnecting)
|
||||
ptty.ExpectMatchContext(ctx, "Don't panic, your workspace is booting")
|
||||
|
||||
setStatus(codersdk.WorkspaceAgentConnected)
|
||||
require.NoError(t, <-done, "created - should exit early")
|
||||
|
||||
setState(codersdk.WorkspaceAgentLifecycleStarting)
|
||||
go func() { done <- inv.WithContext(ctx).Run() }()
|
||||
require.NoError(t, <-done, "starting - should exit early")
|
||||
|
||||
setState(codersdk.WorkspaceAgentLifecycleStartTimeout)
|
||||
go func() { done <- inv.WithContext(ctx).Run() }()
|
||||
require.NoError(t, <-done, "start timeout - should exit early")
|
||||
|
||||
setState(codersdk.WorkspaceAgentLifecycleStartError)
|
||||
go func() { done <- inv.WithContext(ctx).Run() }()
|
||||
require.NoError(t, <-done, "start error - should exit early")
|
||||
|
||||
setState(codersdk.WorkspaceAgentLifecycleReady)
|
||||
go func() { done <- inv.WithContext(ctx).Run() }()
|
||||
require.NoError(t, <-done, "ready - should exit early")
|
||||
}
|
||||
|
||||
func TestAgent_LoginBeforeReadyEnabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitShort)
|
||||
defer cancel()
|
||||
|
||||
wantURL := "https://coder.com/this-is-a-really-long-troubleshooting-url-that-should-not-wrap"
|
||||
|
||||
var status, state atomic.String
|
||||
setStatus := func(s codersdk.WorkspaceAgentStatus) { status.Store(string(s)) }
|
||||
setState := func(s codersdk.WorkspaceAgentLifecycle) { state.Store(string(s)) }
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
err := cliui.Agent(inv.Context(), inv.Stdout, cliui.AgentOptions{
|
||||
WorkspaceName: "example",
|
||||
Fetch: func(_ context.Context) (codersdk.WorkspaceAgent, error) {
|
||||
agent := codersdk.WorkspaceAgent{
|
||||
Status: codersdk.WorkspaceAgentConnecting,
|
||||
LoginBeforeReady: true,
|
||||
LifecycleState: codersdk.WorkspaceAgentLifecycleCreated,
|
||||
TroubleshootingURL: wantURL,
|
||||
}
|
||||
|
||||
if s := status.Load(); s != "" {
|
||||
agent.Status = codersdk.WorkspaceAgentStatus(s)
|
||||
}
|
||||
if s := state.Load(); s != "" {
|
||||
agent.LifecycleState = codersdk.WorkspaceAgentLifecycle(s)
|
||||
}
|
||||
return agent, nil
|
||||
},
|
||||
FetchInterval: time.Millisecond,
|
||||
WarnInterval: time.Second,
|
||||
NoWait: false,
|
||||
})
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
inv := cmd.Invoke()
|
||||
|
||||
ptty := ptytest.New(t)
|
||||
ptty.Attach(inv)
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- inv.WithContext(ctx).Run()
|
||||
}()
|
||||
setStatus(codersdk.WorkspaceAgentConnecting)
|
||||
ptty.ExpectMatchContext(ctx, "Don't panic, your workspace is booting")
|
||||
|
||||
setStatus(codersdk.WorkspaceAgentConnected)
|
||||
require.NoError(t, <-done, "created - should exit early")
|
||||
|
||||
setState(codersdk.WorkspaceAgentLifecycleStarting)
|
||||
go func() { done <- inv.WithContext(ctx).Run() }()
|
||||
require.NoError(t, <-done, "starting - should exit early")
|
||||
|
||||
setState(codersdk.WorkspaceAgentLifecycleStartTimeout)
|
||||
go func() { done <- inv.WithContext(ctx).Run() }()
|
||||
require.NoError(t, <-done, "start timeout - should exit early")
|
||||
|
||||
setState(codersdk.WorkspaceAgentLifecycleStartError)
|
||||
go func() { done <- inv.WithContext(ctx).Run() }()
|
||||
require.NoError(t, <-done, "start error - should exit early")
|
||||
|
||||
setState(codersdk.WorkspaceAgentLifecycleReady)
|
||||
go func() { done <- inv.WithContext(ctx).Run() }()
|
||||
require.NoError(t, <-done, "ready - should exit early")
|
||||
t.Run("NotInfinite", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var fetchCalled uint64
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
buf := bytes.Buffer{}
|
||||
err := cliui.Agent(inv.Context(), &buf, uuid.Nil, cliui.AgentOptions{
|
||||
FetchInterval: 10 * time.Millisecond,
|
||||
Fetch: func(ctx context.Context, agentID uuid.UUID) (codersdk.WorkspaceAgent, error) {
|
||||
atomic.AddUint64(&fetchCalled, 1)
|
||||
|
||||
return codersdk.WorkspaceAgent{
|
||||
Status: codersdk.WorkspaceAgentConnected,
|
||||
LifecycleState: codersdk.WorkspaceAgentLifecycleReady,
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
require.Never(t, func() bool {
|
||||
called := atomic.LoadUint64(&fetchCalled)
|
||||
return called > 5 || called == 0
|
||||
}, time.Second, 100*time.Millisecond)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
require.NoError(t, cmd.Invoke().Run())
|
||||
})
|
||||
}
|
||||
|
||||
+48
-33
@@ -1,27 +1,20 @@
|
||||
package cliui
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/charmbracelet/charm/ui/common"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
"github.com/muesli/termenv"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var (
|
||||
Canceled = xerrors.New("canceled")
|
||||
var Canceled = xerrors.New("canceled")
|
||||
|
||||
defaultStyles = common.DefaultStyles()
|
||||
)
|
||||
// DefaultStyles compose visual elements of the UI.
|
||||
var DefaultStyles Styles
|
||||
|
||||
// ValidateNotEmpty is a helper function to disallow empty inputs!
|
||||
func ValidateNotEmpty(s string) error {
|
||||
if s == "" {
|
||||
return xerrors.New("Must be provided!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Styles compose visual elements of the UI!
|
||||
var Styles = struct {
|
||||
type Styles struct {
|
||||
Bold,
|
||||
Checkmark,
|
||||
Code,
|
||||
@@ -38,23 +31,45 @@ var Styles = struct {
|
||||
Logo,
|
||||
Warn,
|
||||
Wrap lipgloss.Style
|
||||
}{
|
||||
Bold: lipgloss.NewStyle().Bold(true),
|
||||
Checkmark: defaultStyles.Checkmark,
|
||||
Code: defaultStyles.Code,
|
||||
Crossmark: defaultStyles.Error.Copy().SetString("✘"),
|
||||
DateTimeStamp: defaultStyles.LabelDim,
|
||||
Error: defaultStyles.Error,
|
||||
Field: defaultStyles.Code.Copy().Foreground(lipgloss.AdaptiveColor{Light: "#000000", Dark: "#FFFFFF"}),
|
||||
Keyword: defaultStyles.Keyword,
|
||||
Paragraph: defaultStyles.Paragraph,
|
||||
Placeholder: lipgloss.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#585858", Dark: "#4d46b3"}),
|
||||
Prompt: defaultStyles.Prompt.Copy().Foreground(lipgloss.AdaptiveColor{Light: "#9B9B9B", Dark: "#5C5C5C"}),
|
||||
FocusedPrompt: defaultStyles.FocusedPrompt.Copy().Foreground(lipgloss.Color("#651fff")),
|
||||
Fuchsia: defaultStyles.SelectedMenuItem.Copy(),
|
||||
Logo: defaultStyles.Logo.Copy().SetString("Coder"),
|
||||
Warn: lipgloss.NewStyle().Foreground(
|
||||
lipgloss.AdaptiveColor{Light: "#04B575", Dark: "#ECFD65"},
|
||||
),
|
||||
Wrap: lipgloss.NewStyle().Width(80),
|
||||
}
|
||||
|
||||
func init() {
|
||||
lipgloss.SetDefaultRenderer(
|
||||
lipgloss.NewRenderer(os.Stdout, termenv.WithColorCache(true)),
|
||||
)
|
||||
|
||||
// All Styles are set after we change the DefaultRenderer so that the ColorCache
|
||||
// is in effect, mitigating the severe performance issue seen here:
|
||||
// https://github.com/coder/coder/issues/7884.
|
||||
|
||||
charmStyles := common.DefaultStyles()
|
||||
|
||||
DefaultStyles = Styles{
|
||||
Bold: lipgloss.NewStyle().Bold(true),
|
||||
Checkmark: charmStyles.Checkmark,
|
||||
Code: charmStyles.Code,
|
||||
Crossmark: charmStyles.Error.Copy().SetString("✘"),
|
||||
DateTimeStamp: charmStyles.LabelDim,
|
||||
Error: charmStyles.Error,
|
||||
Field: charmStyles.Code.Copy().Foreground(lipgloss.AdaptiveColor{Light: "#000000", Dark: "#FFFFFF"}),
|
||||
Keyword: charmStyles.Keyword,
|
||||
Paragraph: charmStyles.Paragraph,
|
||||
Placeholder: lipgloss.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "#585858", Dark: "#4d46b3"}),
|
||||
Prompt: charmStyles.Prompt.Copy().Foreground(lipgloss.AdaptiveColor{Light: "#9B9B9B", Dark: "#5C5C5C"}),
|
||||
FocusedPrompt: charmStyles.FocusedPrompt.Copy().Foreground(lipgloss.Color("#651fff")),
|
||||
Fuchsia: charmStyles.SelectedMenuItem.Copy(),
|
||||
Logo: charmStyles.Logo.Copy().SetString("Coder"),
|
||||
Warn: lipgloss.NewStyle().Foreground(
|
||||
lipgloss.AdaptiveColor{Light: "#04B575", Dark: "#ECFD65"},
|
||||
),
|
||||
Wrap: lipgloss.NewStyle().Width(80),
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateNotEmpty is a helper function to disallow empty inputs!
|
||||
func ValidateNotEmpty(s string) error {
|
||||
if s == "" {
|
||||
return xerrors.New("Must be provided!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package cliui_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -33,7 +32,7 @@ func TestGitAuth(t *testing.T) {
|
||||
ID: "github",
|
||||
Type: codersdk.GitProviderGitHub,
|
||||
Authenticated: fetched.Load(),
|
||||
AuthenticateURL: "https://example.com/gitauth/github?redirect=" + url.QueryEscape("/gitauth?notify"),
|
||||
AuthenticateURL: "https://example.com/gitauth/github",
|
||||
}}, nil
|
||||
},
|
||||
FetchInterval: time.Millisecond,
|
||||
|
||||
+2
-2
@@ -35,7 +35,7 @@ func (m cliMessage) String() string {
|
||||
// Warn writes a log to the writer provided.
|
||||
func Warn(wtr io.Writer, header string, lines ...string) {
|
||||
_, _ = fmt.Fprint(wtr, cliMessage{
|
||||
Style: Styles.Warn,
|
||||
Style: DefaultStyles.Warn.Copy(),
|
||||
Prefix: "WARN: ",
|
||||
Header: header,
|
||||
Lines: lines,
|
||||
@@ -63,7 +63,7 @@ func Infof(wtr io.Writer, fmtStr string, args ...interface{}) {
|
||||
// Error writes a log to the writer provided.
|
||||
func Error(wtr io.Writer, header string, lines ...string) {
|
||||
_, _ = fmt.Fprint(wtr, cliMessage{
|
||||
Style: Styles.Error,
|
||||
Style: DefaultStyles.Error.Copy(),
|
||||
Prefix: "ERROR: ",
|
||||
Header: header,
|
||||
Lines: lines,
|
||||
|
||||
@@ -3,6 +3,7 @@ package cliui
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
@@ -171,3 +172,55 @@ func (jsonFormat) Format(_ context.Context, data any) (string, error) {
|
||||
|
||||
return string(outBytes), nil
|
||||
}
|
||||
|
||||
type textFormat struct{}
|
||||
|
||||
var _ OutputFormat = textFormat{}
|
||||
|
||||
// TextFormat is a formatter that just outputs unstructured text.
|
||||
// It uses fmt.Sprintf under the hood.
|
||||
func TextFormat() OutputFormat {
|
||||
return textFormat{}
|
||||
}
|
||||
|
||||
func (textFormat) ID() string {
|
||||
return "text"
|
||||
}
|
||||
|
||||
func (textFormat) AttachOptions(_ *clibase.OptionSet) {}
|
||||
|
||||
func (textFormat) Format(_ context.Context, data any) (string, error) {
|
||||
return fmt.Sprintf("%s", data), nil
|
||||
}
|
||||
|
||||
// DataChangeFormat allows manipulating the data passed to an output format.
|
||||
// This is because sometimes the data needs to be manipulated before it can be
|
||||
// passed to the output format.
|
||||
// For example, you may want to pass something different to the text formatter
|
||||
// than what you pass to the json formatter.
|
||||
type DataChangeFormat struct {
|
||||
format OutputFormat
|
||||
change func(data any) (any, error)
|
||||
}
|
||||
|
||||
// ChangeFormatterData allows manipulating the data passed to an output
|
||||
// format.
|
||||
func ChangeFormatterData(format OutputFormat, change func(data any) (any, error)) *DataChangeFormat {
|
||||
return &DataChangeFormat{format: format, change: change}
|
||||
}
|
||||
|
||||
func (d *DataChangeFormat) ID() string {
|
||||
return d.format.ID()
|
||||
}
|
||||
|
||||
func (d *DataChangeFormat) AttachOptions(opts *clibase.OptionSet) {
|
||||
d.format.AttachOptions(opts)
|
||||
}
|
||||
|
||||
func (d *DataChangeFormat) Format(ctx context.Context, data any) (string, error) {
|
||||
newData, err := d.change(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return d.format.Format(ctx, newData)
|
||||
}
|
||||
|
||||
@@ -50,6 +50,9 @@ func Test_OutputFormatter(t *testing.T) {
|
||||
require.Panics(t, func() {
|
||||
cliui.NewOutputFormatter(cliui.JSONFormat())
|
||||
})
|
||||
require.NotPanics(t, func() {
|
||||
cliui.NewOutputFormatter(cliui.JSONFormat(), cliui.TextFormat())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("NoMissingFormatID", func(t *testing.T) {
|
||||
|
||||
+14
-56
@@ -6,63 +6,21 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
"github.com/coder/coder/coderd/parameter"
|
||||
"github.com/coder/coder/codersdk"
|
||||
)
|
||||
|
||||
func ParameterSchema(inv *clibase.Invocation, parameterSchema codersdk.ParameterSchema) (string, error) {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, Styles.Bold.Render("var."+parameterSchema.Name))
|
||||
if parameterSchema.Description != "" {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, " "+strings.TrimSpace(strings.Join(strings.Split(parameterSchema.Description, "\n"), "\n "))+"\n")
|
||||
}
|
||||
|
||||
var err error
|
||||
var options []string
|
||||
if parameterSchema.ValidationCondition != "" {
|
||||
options, _, err = parameter.Contains(parameterSchema.ValidationCondition)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
var value string
|
||||
if len(options) > 0 {
|
||||
// Move the cursor up a single line for nicer display!
|
||||
_, _ = fmt.Fprint(inv.Stdout, "\033[1A")
|
||||
value, err = Select(inv, SelectOptions{
|
||||
Options: options,
|
||||
Default: parameterSchema.DefaultSourceValue,
|
||||
HideSearch: true,
|
||||
})
|
||||
if err == nil {
|
||||
_, _ = fmt.Fprintln(inv.Stdout)
|
||||
_, _ = fmt.Fprintln(inv.Stdout, " "+Styles.Prompt.String()+Styles.Field.Render(value))
|
||||
}
|
||||
} else {
|
||||
text := "Enter a value"
|
||||
if parameterSchema.DefaultSourceValue != "" {
|
||||
text += fmt.Sprintf(" (default: %q)", parameterSchema.DefaultSourceValue)
|
||||
}
|
||||
text += ":"
|
||||
|
||||
value, err = Prompt(inv, PromptOptions{
|
||||
Text: Styles.Bold.Render(text),
|
||||
})
|
||||
value = strings.TrimSpace(value)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If they didn't specify anything, use the default value if set.
|
||||
if len(options) == 0 && value == "" {
|
||||
value = parameterSchema.DefaultSourceValue
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func RichParameter(inv *clibase.Invocation, templateVersionParameter codersdk.TemplateVersionParameter) (string, error) {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, Styles.Bold.Render(templateVersionParameter.Name))
|
||||
label := templateVersionParameter.Name
|
||||
if templateVersionParameter.DisplayName != "" {
|
||||
label = templateVersionParameter.DisplayName
|
||||
}
|
||||
|
||||
if templateVersionParameter.Ephemeral {
|
||||
label += DefaultStyles.Warn.Render(" (build option)")
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stdout, DefaultStyles.Bold.Render(label))
|
||||
|
||||
if templateVersionParameter.DescriptionPlaintext != "" {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, " "+strings.TrimSpace(strings.Join(strings.Split(templateVersionParameter.DescriptionPlaintext, "\n"), "\n "))+"\n")
|
||||
}
|
||||
@@ -87,7 +45,7 @@ func RichParameter(inv *clibase.Invocation, templateVersionParameter codersdk.Te
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stdout)
|
||||
_, _ = fmt.Fprintln(inv.Stdout, " "+Styles.Prompt.String()+Styles.Field.Render(strings.Join(values, ", ")))
|
||||
_, _ = fmt.Fprintln(inv.Stdout, " "+DefaultStyles.Prompt.String()+DefaultStyles.Field.Render(strings.Join(values, ", ")))
|
||||
value = string(v)
|
||||
}
|
||||
} else if len(templateVersionParameter.Options) > 0 {
|
||||
@@ -101,7 +59,7 @@ func RichParameter(inv *clibase.Invocation, templateVersionParameter codersdk.Te
|
||||
})
|
||||
if err == nil {
|
||||
_, _ = fmt.Fprintln(inv.Stdout)
|
||||
_, _ = fmt.Fprintln(inv.Stdout, " "+Styles.Prompt.String()+Styles.Field.Render(richParameterOption.Name))
|
||||
_, _ = fmt.Fprintln(inv.Stdout, " "+DefaultStyles.Prompt.String()+DefaultStyles.Field.Render(richParameterOption.Name))
|
||||
value = richParameterOption.Value
|
||||
}
|
||||
} else {
|
||||
@@ -112,7 +70,7 @@ func RichParameter(inv *clibase.Invocation, templateVersionParameter codersdk.Te
|
||||
text += ":"
|
||||
|
||||
value, err = Prompt(inv, PromptOptions{
|
||||
Text: Styles.Bold.Render(text),
|
||||
Text: DefaultStyles.Bold.Render(text),
|
||||
Validate: func(value string) error {
|
||||
return validateRichPrompt(value, templateVersionParameter)
|
||||
},
|
||||
|
||||
+8
-8
@@ -55,21 +55,21 @@ func Prompt(inv *clibase.Invocation, opts PromptOptions) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprint(inv.Stdout, Styles.FocusedPrompt.String()+opts.Text+" ")
|
||||
_, _ = fmt.Fprint(inv.Stdout, DefaultStyles.FocusedPrompt.String()+opts.Text+" ")
|
||||
if opts.IsConfirm {
|
||||
if len(opts.Default) == 0 {
|
||||
opts.Default = ConfirmYes
|
||||
}
|
||||
renderedYes := Styles.Placeholder.Render(ConfirmYes)
|
||||
renderedNo := Styles.Placeholder.Render(ConfirmNo)
|
||||
renderedYes := DefaultStyles.Placeholder.Render(ConfirmYes)
|
||||
renderedNo := DefaultStyles.Placeholder.Render(ConfirmNo)
|
||||
if opts.Default == ConfirmYes {
|
||||
renderedYes = Styles.Bold.Render(ConfirmYes)
|
||||
renderedYes = DefaultStyles.Bold.Render(ConfirmYes)
|
||||
} else {
|
||||
renderedNo = Styles.Bold.Render(ConfirmNo)
|
||||
renderedNo = DefaultStyles.Bold.Render(ConfirmNo)
|
||||
}
|
||||
_, _ = fmt.Fprint(inv.Stdout, Styles.Placeholder.Render("("+renderedYes+Styles.Placeholder.Render("/"+renderedNo+Styles.Placeholder.Render(") "))))
|
||||
_, _ = fmt.Fprint(inv.Stdout, DefaultStyles.Placeholder.Render("("+renderedYes+DefaultStyles.Placeholder.Render("/"+renderedNo+DefaultStyles.Placeholder.Render(") "))))
|
||||
} else if opts.Default != "" {
|
||||
_, _ = fmt.Fprint(inv.Stdout, Styles.Placeholder.Render("("+opts.Default+") "))
|
||||
_, _ = fmt.Fprint(inv.Stdout, DefaultStyles.Placeholder.Render("("+opts.Default+") "))
|
||||
}
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -126,7 +126,7 @@ func Prompt(inv *clibase.Invocation, opts PromptOptions) (string, error) {
|
||||
if opts.Validate != nil {
|
||||
err := opts.Validate(line)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, defaultStyles.Error.Render(err.Error()))
|
||||
_, _ = fmt.Fprintln(inv.Stdout, DefaultStyles.Error.Render(err.Error()))
|
||||
return Prompt(inv, opts)
|
||||
}
|
||||
}
|
||||
|
||||
+83
-48
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -63,35 +64,32 @@ func ProvisionerJob(ctx context.Context, writer io.Writer, opts ProvisionerJobOp
|
||||
var (
|
||||
currentStage = "Queued"
|
||||
currentStageStartedAt = time.Now().UTC()
|
||||
didLogBetweenStage = false
|
||||
|
||||
errChan = make(chan error, 1)
|
||||
job codersdk.ProvisionerJob
|
||||
jobMutex sync.Mutex
|
||||
)
|
||||
|
||||
sw := &stageWriter{w: writer, verbose: opts.Verbose, silentLogs: opts.Silent}
|
||||
|
||||
printStage := func() {
|
||||
_, _ = fmt.Fprintf(writer, Styles.Prompt.Render("⧗")+"%s\n", Styles.Field.Render(currentStage))
|
||||
sw.Start(currentStage)
|
||||
}
|
||||
|
||||
updateStage := func(stage string, startedAt time.Time) {
|
||||
if currentStage != "" {
|
||||
prefix := ""
|
||||
if !didLogBetweenStage {
|
||||
prefix = "\033[1A\r"
|
||||
}
|
||||
mark := Styles.Checkmark
|
||||
duration := startedAt.Sub(currentStageStartedAt)
|
||||
if job.CompletedAt != nil && job.Status != codersdk.ProvisionerJobSucceeded {
|
||||
mark = Styles.Crossmark
|
||||
sw.Fail(currentStage, duration)
|
||||
} else {
|
||||
sw.Complete(currentStage, duration)
|
||||
}
|
||||
_, _ = fmt.Fprintf(writer, prefix+mark.String()+Styles.Placeholder.Render(" %s [%dms]")+"\n", currentStage, startedAt.Sub(currentStageStartedAt).Milliseconds())
|
||||
}
|
||||
if stage == "" {
|
||||
return
|
||||
}
|
||||
currentStage = stage
|
||||
currentStageStartedAt = startedAt
|
||||
didLogBetweenStage = false
|
||||
printStage()
|
||||
}
|
||||
|
||||
@@ -129,7 +127,7 @@ func ProvisionerJob(ctx context.Context, writer io.Writer, opts ProvisionerJobOp
|
||||
return
|
||||
}
|
||||
}
|
||||
_, _ = fmt.Fprintf(writer, "\033[2K\r\n"+Styles.FocusedPrompt.String()+Styles.Bold.Render("Gracefully canceling...")+"\n\n")
|
||||
_, _ = fmt.Fprintf(writer, DefaultStyles.FocusedPrompt.String()+DefaultStyles.Bold.Render("Gracefully canceling...")+"\n\n")
|
||||
err := opts.Cancel()
|
||||
if err != nil {
|
||||
errChan <- xerrors.Errorf("cancel: %w", err)
|
||||
@@ -149,30 +147,15 @@ func ProvisionerJob(ctx context.Context, writer io.Writer, opts ProvisionerJobOp
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
var (
|
||||
// logOutput is where log output is written
|
||||
logOutput = writer
|
||||
// logBuffer is where logs are buffered if opts.Silent is true
|
||||
logBuffer = &bytes.Buffer{}
|
||||
)
|
||||
if opts.Silent {
|
||||
logOutput = logBuffer
|
||||
}
|
||||
flushLogBuffer := func() {
|
||||
if opts.Silent {
|
||||
_, _ = io.Copy(writer, logBuffer)
|
||||
}
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(opts.FetchInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case err = <-errChan:
|
||||
flushLogBuffer()
|
||||
sw.Fail(currentStage, time.Since(currentStageStartedAt))
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
flushLogBuffer()
|
||||
sw.Fail(currentStage, time.Since(currentStageStartedAt))
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
updateJob()
|
||||
@@ -196,37 +179,89 @@ func ProvisionerJob(ctx context.Context, writer io.Writer, opts ProvisionerJobOp
|
||||
Message: job.Error,
|
||||
Code: job.ErrorCode,
|
||||
}
|
||||
sw.Fail(currentStage, time.Since(currentStageStartedAt))
|
||||
jobMutex.Unlock()
|
||||
flushLogBuffer()
|
||||
return err
|
||||
}
|
||||
|
||||
output := ""
|
||||
switch log.Level {
|
||||
case codersdk.LogLevelTrace, codersdk.LogLevelDebug:
|
||||
if !opts.Verbose {
|
||||
continue
|
||||
}
|
||||
output = Styles.Placeholder.Render(log.Output)
|
||||
case codersdk.LogLevelError:
|
||||
output = defaultStyles.Error.Render(log.Output)
|
||||
case codersdk.LogLevelWarn:
|
||||
output = Styles.Warn.Render(log.Output)
|
||||
case codersdk.LogLevelInfo:
|
||||
output = log.Output
|
||||
}
|
||||
|
||||
jobMutex.Lock()
|
||||
if log.Stage != currentStage && log.Stage != "" {
|
||||
updateStage(log.Stage, log.CreatedAt)
|
||||
jobMutex.Unlock()
|
||||
continue
|
||||
}
|
||||
_, _ = fmt.Fprintf(logOutput, "%s %s\n", Styles.Placeholder.Render(" "), output)
|
||||
if !opts.Silent {
|
||||
didLogBetweenStage = true
|
||||
}
|
||||
sw.Log(log.CreatedAt, log.Level, log.Output)
|
||||
jobMutex.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type stageWriter struct {
|
||||
w io.Writer
|
||||
verbose bool
|
||||
silentLogs bool
|
||||
logBuf bytes.Buffer
|
||||
}
|
||||
|
||||
func (s *stageWriter) Start(stage string) {
|
||||
_, _ = fmt.Fprintf(s.w, "==> ⧗ %s\n", stage)
|
||||
}
|
||||
|
||||
func (s *stageWriter) Complete(stage string, duration time.Duration) {
|
||||
s.end(stage, duration, true)
|
||||
}
|
||||
|
||||
func (s *stageWriter) Fail(stage string, duration time.Duration) {
|
||||
s.flushLogs()
|
||||
s.end(stage, duration, false)
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (s *stageWriter) end(stage string, duration time.Duration, ok bool) {
|
||||
s.logBuf.Reset()
|
||||
|
||||
mark := "✔"
|
||||
if !ok {
|
||||
mark = "✘"
|
||||
}
|
||||
if duration < 0 {
|
||||
duration = 0
|
||||
}
|
||||
_, _ = fmt.Fprintf(s.w, "=== %s %s [%dms]\n", mark, stage, duration.Milliseconds())
|
||||
}
|
||||
|
||||
func (s *stageWriter) Log(createdAt time.Time, level codersdk.LogLevel, line string) {
|
||||
w := s.w
|
||||
if s.silentLogs {
|
||||
w = &s.logBuf
|
||||
}
|
||||
|
||||
render := func(s ...string) string { return strings.Join(s, " ") }
|
||||
|
||||
var lines []string
|
||||
if !createdAt.IsZero() {
|
||||
lines = append(lines, createdAt.Local().Format("2006-01-02 15:04:05.000Z07:00"))
|
||||
}
|
||||
lines = append(lines, line)
|
||||
|
||||
switch level {
|
||||
case codersdk.LogLevelTrace, codersdk.LogLevelDebug:
|
||||
if !s.verbose {
|
||||
return
|
||||
}
|
||||
render = DefaultStyles.Placeholder.Render
|
||||
case codersdk.LogLevelError:
|
||||
render = DefaultStyles.Error.Render
|
||||
case codersdk.LogLevelWarn:
|
||||
render = DefaultStyles.Warn.Render
|
||||
case codersdk.LogLevelInfo:
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%s\n", render(lines...))
|
||||
}
|
||||
|
||||
func (s *stageWriter) flushLogs() {
|
||||
if s.silentLogs {
|
||||
_, _ = io.Copy(s.w, &s.logBuf)
|
||||
}
|
||||
s.logBuf.Reset()
|
||||
}
|
||||
|
||||
@@ -82,6 +82,8 @@ func TestProvisionerJob(t *testing.T) {
|
||||
// This cannot be ran in parallel because it uses a signal.
|
||||
// nolint:paralleltest
|
||||
t.Run("Cancel", func(t *testing.T) {
|
||||
t.Skip("This test issues an interrupt signal which will propagate to the test runner.")
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// Sending interrupt signal isn't supported on Windows!
|
||||
t.SkipNow()
|
||||
|
||||
+25
-16
@@ -50,6 +50,7 @@ func WorkspaceResources(writer io.Writer, resources []codersdk.WorkspaceResource
|
||||
row := table.Row{"Resource"}
|
||||
if !options.HideAgentState {
|
||||
row = append(row, "Status")
|
||||
row = append(row, "Health")
|
||||
row = append(row, "Version")
|
||||
}
|
||||
if !options.HideAccess {
|
||||
@@ -78,7 +79,8 @@ func WorkspaceResources(writer io.Writer, resources []codersdk.WorkspaceResource
|
||||
|
||||
// Display a line for the resource.
|
||||
tableWriter.AppendRow(table.Row{
|
||||
Styles.Bold.Render(resourceAddress),
|
||||
DefaultStyles.Bold.Render(resourceAddress),
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
})
|
||||
@@ -93,20 +95,20 @@ func WorkspaceResources(writer io.Writer, resources []codersdk.WorkspaceResource
|
||||
fmt.Sprintf("%s─ %s (%s, %s)", pipe, agent.Name, agent.OperatingSystem, agent.Architecture),
|
||||
}
|
||||
if !options.HideAgentState {
|
||||
var agentStatus string
|
||||
var agentVersion string
|
||||
var agentStatus, agentHealth, agentVersion string
|
||||
if !options.HideAgentState {
|
||||
agentStatus = renderAgentStatus(agent)
|
||||
agentHealth = renderAgentHealth(agent)
|
||||
agentVersion = renderAgentVersion(agent.Version, options.ServerVersion)
|
||||
}
|
||||
row = append(row, agentStatus, agentVersion)
|
||||
row = append(row, agentStatus, agentHealth, agentVersion)
|
||||
}
|
||||
if !options.HideAccess {
|
||||
sshCommand := "coder ssh " + options.WorkspaceName
|
||||
if totalAgents > 1 {
|
||||
sshCommand += "." + agent.Name
|
||||
}
|
||||
sshCommand = Styles.Code.Render(sshCommand)
|
||||
sshCommand = DefaultStyles.Code.Render(sshCommand)
|
||||
row = append(row, sshCommand)
|
||||
}
|
||||
tableWriter.AppendRow(row)
|
||||
@@ -121,36 +123,43 @@ func renderAgentStatus(agent codersdk.WorkspaceAgent) string {
|
||||
switch agent.Status {
|
||||
case codersdk.WorkspaceAgentConnecting:
|
||||
since := database.Now().Sub(agent.CreatedAt)
|
||||
return Styles.Warn.Render("⦾ connecting") + " " +
|
||||
Styles.Placeholder.Render("["+strconv.Itoa(int(since.Seconds()))+"s]")
|
||||
return DefaultStyles.Warn.Render("⦾ connecting") + " " +
|
||||
DefaultStyles.Placeholder.Render("["+strconv.Itoa(int(since.Seconds()))+"s]")
|
||||
case codersdk.WorkspaceAgentDisconnected:
|
||||
since := database.Now().Sub(*agent.DisconnectedAt)
|
||||
return Styles.Error.Render("⦾ disconnected") + " " +
|
||||
Styles.Placeholder.Render("["+strconv.Itoa(int(since.Seconds()))+"s]")
|
||||
return DefaultStyles.Error.Render("⦾ disconnected") + " " +
|
||||
DefaultStyles.Placeholder.Render("["+strconv.Itoa(int(since.Seconds()))+"s]")
|
||||
case codersdk.WorkspaceAgentTimeout:
|
||||
since := database.Now().Sub(agent.CreatedAt)
|
||||
return fmt.Sprintf(
|
||||
"%s %s",
|
||||
Styles.Warn.Render("⦾ timeout"),
|
||||
Styles.Placeholder.Render("["+strconv.Itoa(int(since.Seconds()))+"s]"),
|
||||
DefaultStyles.Warn.Render("⦾ timeout"),
|
||||
DefaultStyles.Placeholder.Render("["+strconv.Itoa(int(since.Seconds()))+"s]"),
|
||||
)
|
||||
case codersdk.WorkspaceAgentConnected:
|
||||
return Styles.Keyword.Render("⦿ connected")
|
||||
return DefaultStyles.Keyword.Render("⦿ connected")
|
||||
default:
|
||||
return Styles.Warn.Render("○ unknown")
|
||||
return DefaultStyles.Warn.Render("○ unknown")
|
||||
}
|
||||
}
|
||||
|
||||
func renderAgentHealth(agent codersdk.WorkspaceAgent) string {
|
||||
if agent.Health.Healthy {
|
||||
return DefaultStyles.Keyword.Render("✔ healthy")
|
||||
}
|
||||
return DefaultStyles.Error.Render("✘ " + agent.Health.Reason)
|
||||
}
|
||||
|
||||
func renderAgentVersion(agentVersion, serverVersion string) string {
|
||||
if agentVersion == "" {
|
||||
agentVersion = "(unknown)"
|
||||
}
|
||||
if !semver.IsValid(serverVersion) || !semver.IsValid(agentVersion) {
|
||||
return Styles.Placeholder.Render(agentVersion)
|
||||
return DefaultStyles.Placeholder.Render(agentVersion)
|
||||
}
|
||||
outdated := semver.Compare(agentVersion, serverVersion) < 0
|
||||
if outdated {
|
||||
return Styles.Warn.Render(agentVersion + " (outdated)")
|
||||
return DefaultStyles.Warn.Render(agentVersion + " (outdated)")
|
||||
}
|
||||
return Styles.Keyword.Render(agentVersion)
|
||||
return DefaultStyles.Keyword.Render(agentVersion)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ func TestWorkspaceResources(t *testing.T) {
|
||||
LifecycleState: codersdk.WorkspaceAgentLifecycleCreated,
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
Health: codersdk.WorkspaceAgentHealth{Healthy: true},
|
||||
}},
|
||||
}}, cliui.WorkspaceResourcesOptions{
|
||||
WorkspaceName: "example",
|
||||
@@ -65,6 +66,7 @@ func TestWorkspaceResources(t *testing.T) {
|
||||
Name: "dev",
|
||||
OperatingSystem: "linux",
|
||||
Architecture: "amd64",
|
||||
Health: codersdk.WorkspaceAgentHealth{Healthy: true},
|
||||
}},
|
||||
}, {
|
||||
Transition: codersdk.WorkspaceTransitionStart,
|
||||
@@ -76,6 +78,7 @@ func TestWorkspaceResources(t *testing.T) {
|
||||
Name: "go",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
Health: codersdk.WorkspaceAgentHealth{Healthy: true},
|
||||
}, {
|
||||
DisconnectedAt: &disconnected,
|
||||
Status: codersdk.WorkspaceAgentDisconnected,
|
||||
@@ -83,6 +86,10 @@ func TestWorkspaceResources(t *testing.T) {
|
||||
Name: "postgres",
|
||||
Architecture: "amd64",
|
||||
OperatingSystem: "linux",
|
||||
Health: codersdk.WorkspaceAgentHealth{
|
||||
Healthy: false,
|
||||
Reason: "agent has lost connection",
|
||||
},
|
||||
}},
|
||||
}}, cliui.WorkspaceResourcesOptions{
|
||||
WorkspaceName: "dev",
|
||||
@@ -94,6 +101,12 @@ func TestWorkspaceResources(t *testing.T) {
|
||||
}()
|
||||
ptty.ExpectMatch("google_compute_disk.root")
|
||||
ptty.ExpectMatch("google_compute_instance.dev")
|
||||
ptty.ExpectMatch("healthy")
|
||||
ptty.ExpectMatch("coder ssh dev.dev")
|
||||
ptty.ExpectMatch("kubernetes_pod.dev")
|
||||
ptty.ExpectMatch("healthy")
|
||||
ptty.ExpectMatch("coder ssh dev.go")
|
||||
ptty.ExpectMatch("agent has lost connection")
|
||||
ptty.ExpectMatch("coder ssh dev.postgres")
|
||||
<-done
|
||||
})
|
||||
|
||||
+6
-1
@@ -70,17 +70,22 @@ type RichSelectOptions struct {
|
||||
// RichSelect displays a list of user options including name and description.
|
||||
func RichSelect(inv *clibase.Invocation, richOptions RichSelectOptions) (*codersdk.TemplateVersionParameterOption, error) {
|
||||
opts := make([]string, len(richOptions.Options))
|
||||
var defaultOpt string
|
||||
for i, option := range richOptions.Options {
|
||||
line := option.Name
|
||||
if len(option.Description) > 0 {
|
||||
line += ": " + option.Description
|
||||
}
|
||||
opts[i] = line
|
||||
|
||||
if option.Value == richOptions.Default {
|
||||
defaultOpt = line
|
||||
}
|
||||
}
|
||||
|
||||
selected, err := Select(inv, SelectOptions{
|
||||
Options: opts,
|
||||
Default: richOptions.Default,
|
||||
Default: defaultOpt,
|
||||
Size: richOptions.Size,
|
||||
HideSearch: richOptions.HideSearch,
|
||||
})
|
||||
|
||||
+24
-9
@@ -188,32 +188,39 @@ func DisplayTable(out any, sort string, filterColumns []string) (string, error)
|
||||
// returned. If the table tag is malformed, an error is returned.
|
||||
//
|
||||
// The returned name is transformed from "snake_case" to "normal text".
|
||||
func parseTableStructTag(field reflect.StructField) (name string, defaultSort, recursive bool, err error) {
|
||||
func parseTableStructTag(field reflect.StructField) (name string, defaultSort, recursive bool, skipParentName bool, err error) {
|
||||
tags, err := structtag.Parse(string(field.Tag))
|
||||
if err != nil {
|
||||
return "", false, false, xerrors.Errorf("parse struct field tag %q: %w", string(field.Tag), err)
|
||||
return "", false, false, false, xerrors.Errorf("parse struct field tag %q: %w", string(field.Tag), err)
|
||||
}
|
||||
|
||||
tag, err := tags.Get("table")
|
||||
if err != nil || tag.Name == "-" {
|
||||
// tags.Get only returns an error if the tag is not found.
|
||||
return "", false, false, nil
|
||||
return "", false, false, false, nil
|
||||
}
|
||||
|
||||
defaultSortOpt := false
|
||||
recursiveOpt := false
|
||||
skipParentNameOpt := false
|
||||
for _, opt := range tag.Options {
|
||||
switch opt {
|
||||
case "default_sort":
|
||||
defaultSortOpt = true
|
||||
case "recursive":
|
||||
recursiveOpt = true
|
||||
case "recursive_inline":
|
||||
// recursive_inline is a helper to make recursive tables look nicer.
|
||||
// It skips prefixing the parent name to the child name. If you do this,
|
||||
// make sure the child name is unique across all nested structs in the parent.
|
||||
recursiveOpt = true
|
||||
skipParentNameOpt = true
|
||||
default:
|
||||
return "", false, false, xerrors.Errorf("unknown option %q in struct field tag", opt)
|
||||
return "", false, false, false, xerrors.Errorf("unknown option %q in struct field tag", opt)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.ReplaceAll(tag.Name, "_", " "), defaultSortOpt, recursiveOpt, nil
|
||||
return strings.ReplaceAll(tag.Name, "_", " "), defaultSortOpt, recursiveOpt, skipParentNameOpt, nil
|
||||
}
|
||||
|
||||
func isStructOrStructPointer(t reflect.Type) bool {
|
||||
@@ -235,7 +242,7 @@ func typeToTableHeaders(t reflect.Type) ([]string, string, error) {
|
||||
defaultSortName := ""
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
name, defaultSort, recursive, err := parseTableStructTag(field)
|
||||
name, defaultSort, recursive, skip, err := parseTableStructTag(field)
|
||||
if err != nil {
|
||||
return nil, "", xerrors.Errorf("parse struct tags for field %q in type %q: %w", field.Name, t.String(), err)
|
||||
}
|
||||
@@ -260,7 +267,11 @@ func typeToTableHeaders(t reflect.Type) ([]string, string, error) {
|
||||
return nil, "", xerrors.Errorf("get child field header names for field %q in type %q: %w", field.Name, fieldType.String(), err)
|
||||
}
|
||||
for _, childName := range childNames {
|
||||
headers = append(headers, fmt.Sprintf("%s %s", name, childName))
|
||||
fullName := fmt.Sprintf("%s %s", name, childName)
|
||||
if skip {
|
||||
fullName = childName
|
||||
}
|
||||
headers = append(headers, fullName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -296,7 +307,7 @@ func valueToTableMap(val reflect.Value) (map[string]any, error) {
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
field := val.Type().Field(i)
|
||||
fieldVal := val.Field(i)
|
||||
name, _, recursive, err := parseTableStructTag(field)
|
||||
name, _, recursive, skip, err := parseTableStructTag(field)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parse struct tags for field %q in type %T: %w", field.Name, val, err)
|
||||
}
|
||||
@@ -318,7 +329,11 @@ func valueToTableMap(val reflect.Value) (map[string]any, error) {
|
||||
return nil, xerrors.Errorf("get child field values for field %q in type %q: %w", field.Name, fieldType.String(), err)
|
||||
}
|
||||
for childName, childValue := range childMap {
|
||||
row[fmt.Sprintf("%s %s", name, childName)] = childValue
|
||||
fullName := fmt.Sprintf("%s %s", name, childName)
|
||||
if skip {
|
||||
fullName = childName
|
||||
}
|
||||
row[fullName] = childValue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -49,6 +49,11 @@ type tableTest3 struct {
|
||||
Sub tableTest2 `table:"inner,recursive,default_sort"`
|
||||
}
|
||||
|
||||
type tableTest4 struct {
|
||||
Inline tableTest2 `table:"ignored,recursive_inline"`
|
||||
SortField string `table:"sort_field,default_sort"`
|
||||
}
|
||||
|
||||
func Test_DisplayTable(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -188,6 +193,31 @@ foo foo1 foo3 2022-08-02T15:49:10Z
|
||||
compareTables(t, expected, out)
|
||||
})
|
||||
|
||||
t.Run("Inline", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
expected := `
|
||||
NAME AGE
|
||||
Alice 25
|
||||
`
|
||||
|
||||
inlineIn := []tableTest4{
|
||||
{
|
||||
Inline: tableTest2{
|
||||
Name: stringWrapper{
|
||||
str: "Alice",
|
||||
},
|
||||
Age: 25,
|
||||
NotIncluded: "IgnoreMe",
|
||||
},
|
||||
},
|
||||
}
|
||||
out, err := cliui.DisplayTable(inlineIn, "", []string{"name", "age"})
|
||||
log.Println("rendered table:\n" + out)
|
||||
require.NoError(t, err)
|
||||
compareTables(t, expected, out)
|
||||
})
|
||||
|
||||
// This test ensures that safeties against invalid use of `table` tags
|
||||
// causes errors (even without data).
|
||||
t.Run("Errors", func(t *testing.T) {
|
||||
|
||||
+5
-1
@@ -125,5 +125,9 @@ func read(path string) ([]byte, error) {
|
||||
}
|
||||
|
||||
func DefaultDir() string {
|
||||
return configdir.LocalConfig("coderv2")
|
||||
configDir := configdir.LocalConfig("coderv2")
|
||||
if dir := os.Getenv("CLIDOCGEN_CONFIG_DIRECTORY"); dir != "" {
|
||||
configDir = dir
|
||||
}
|
||||
return configDir
|
||||
}
|
||||
|
||||
+150
-30
@@ -45,7 +45,9 @@ const (
|
||||
// sshConfigOptions represents options that can be stored and read
|
||||
// from the coder config in ~/.ssh/coder.
|
||||
type sshConfigOptions struct {
|
||||
sshOptions []string
|
||||
waitEnum string
|
||||
userHostPrefix string
|
||||
sshOptions []string
|
||||
}
|
||||
|
||||
// addOptions expects options in the form of "option=value" or "option value".
|
||||
@@ -62,7 +64,7 @@ func (o *sshConfigOptions) addOptions(options ...string) error {
|
||||
}
|
||||
|
||||
func (o *sshConfigOptions) addOption(option string) error {
|
||||
key, _, err := codersdk.ParseSSHConfigOption(option)
|
||||
key, value, err := codersdk.ParseSSHConfigOption(option)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -77,11 +79,20 @@ func (o *sshConfigOptions) addOption(option string) error {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(existingKey, key) {
|
||||
o.sshOptions[i] = option
|
||||
if value == "" {
|
||||
// Delete existing option.
|
||||
o.sshOptions = append(o.sshOptions[:i], o.sshOptions[i+1:]...)
|
||||
} else {
|
||||
// Override existing option.
|
||||
o.sshOptions[i] = option
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
o.sshOptions = append(o.sshOptions, option)
|
||||
// Only append the option if it is not empty.
|
||||
if value != "" {
|
||||
o.sshOptions = append(o.sshOptions, option)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -91,10 +102,19 @@ func (o sshConfigOptions) equal(other sshConfigOptions) bool {
|
||||
sort.Strings(opt1)
|
||||
opt2 := slices.Clone(other.sshOptions)
|
||||
sort.Strings(opt2)
|
||||
return slices.Equal(opt1, opt2)
|
||||
if !slices.Equal(opt1, opt2) {
|
||||
return false
|
||||
}
|
||||
return o.waitEnum == other.waitEnum && o.userHostPrefix == other.userHostPrefix
|
||||
}
|
||||
|
||||
func (o sshConfigOptions) asList() (list []string) {
|
||||
if o.waitEnum != "auto" {
|
||||
list = append(list, fmt.Sprintf("wait: %s", o.waitEnum))
|
||||
}
|
||||
if o.userHostPrefix != "" {
|
||||
list = append(list, fmt.Sprintf("ssh-host-prefix: %s", o.userHostPrefix))
|
||||
}
|
||||
for _, opt := range o.sshOptions {
|
||||
list = append(list, fmt.Sprintf("ssh-option: %s", opt))
|
||||
}
|
||||
@@ -169,14 +189,16 @@ func sshPrepareWorkspaceConfigs(ctx context.Context, client *codersdk.Client) (r
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
var (
|
||||
sshConfigFile string
|
||||
sshConfigOpts sshConfigOptions
|
||||
usePreviousOpts bool
|
||||
dryRun bool
|
||||
skipProxyCommand bool
|
||||
userHostPrefix string
|
||||
sshConfigFile string
|
||||
sshConfigOpts sshConfigOptions
|
||||
usePreviousOpts bool
|
||||
dryRun bool
|
||||
skipProxyCommand bool
|
||||
forceUnixSeparators bool
|
||||
coderCliPath string
|
||||
)
|
||||
client := new(codersdk.Client)
|
||||
cmd := &clibase.Cmd{
|
||||
@@ -198,6 +220,12 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
r.InitClient(client),
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
if sshConfigOpts.waitEnum != "auto" && skipProxyCommand {
|
||||
// The wait option is applied to the ProxyCommand. If the user
|
||||
// specifies skip-proxy-command, then wait cannot be applied.
|
||||
return xerrors.Errorf("cannot specify both --skip-proxy-command and --wait")
|
||||
}
|
||||
|
||||
recvWorkspaceConfigs := sshPrepareWorkspaceConfigs(inv.Context(), client)
|
||||
|
||||
out := inv.Stdout
|
||||
@@ -206,17 +234,23 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
// that it's possible to capture the diff.
|
||||
out = inv.Stderr
|
||||
}
|
||||
coderBinary, err := currentBinPath(out)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
var err error
|
||||
coderBinary := coderCliPath
|
||||
if coderBinary == "" {
|
||||
coderBinary, err = currentBinPath(out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
escapedCoderBinary, err := sshConfigExecEscape(coderBinary)
|
||||
|
||||
escapedCoderBinary, err := sshConfigExecEscape(coderBinary, forceUnixSeparators)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("escape coder binary for ssh failed: %w", err)
|
||||
}
|
||||
|
||||
root := r.createConfig()
|
||||
escapedGlobalConfig, err := sshConfigExecEscape(string(root))
|
||||
escapedGlobalConfig, err := sshConfigExecEscape(string(root), forceUnixSeparators)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("escape global config for ssh failed: %w", err)
|
||||
}
|
||||
@@ -286,7 +320,7 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
// Selecting "no" will use the last config.
|
||||
sshConfigOpts = *lastConfig
|
||||
} else {
|
||||
changes = append(changes, "Use new SSH options")
|
||||
changes = append(changes, "Use new options")
|
||||
}
|
||||
// Only print when prompts are shown.
|
||||
if yes, _ := inv.ParsedFlags().GetBool("yes"); !yes {
|
||||
@@ -327,9 +361,9 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
coderdConfig.HostnamePrefix = "coder."
|
||||
}
|
||||
|
||||
if userHostPrefix != "" {
|
||||
if sshConfigOpts.userHostPrefix != "" {
|
||||
// Override with user flag.
|
||||
coderdConfig.HostnamePrefix = userHostPrefix
|
||||
coderdConfig.HostnamePrefix = sshConfigOpts.userHostPrefix
|
||||
}
|
||||
|
||||
// Ensure stable sorting of output.
|
||||
@@ -354,13 +388,20 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
}
|
||||
|
||||
if !skipProxyCommand {
|
||||
flags := ""
|
||||
if sshConfigOpts.waitEnum != "auto" {
|
||||
flags += " --wait=" + sshConfigOpts.waitEnum
|
||||
}
|
||||
defaultOptions = append(defaultOptions, fmt.Sprintf(
|
||||
"ProxyCommand %s --global-config %s ssh --stdio %s",
|
||||
escapedCoderBinary, escapedGlobalConfig, workspaceHostname,
|
||||
"ProxyCommand %s --global-config %s ssh --stdio%s %s",
|
||||
escapedCoderBinary, escapedGlobalConfig, flags, workspaceHostname,
|
||||
))
|
||||
}
|
||||
|
||||
var configOptions sshConfigOptions
|
||||
// Create a copy of the options so we can modify them.
|
||||
configOptions := sshConfigOpts
|
||||
configOptions.sshOptions = nil
|
||||
|
||||
// Add standard options.
|
||||
err := configOptions.addOptions(defaultOptions...)
|
||||
if err != nil {
|
||||
@@ -467,6 +508,24 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
Description: "Specifies the path to an SSH config.",
|
||||
Value: clibase.StringOf(&sshConfigFile),
|
||||
},
|
||||
{
|
||||
Flag: "coder-binary-path",
|
||||
Env: "CODER_SSH_CONFIG_BINARY_PATH",
|
||||
Default: "",
|
||||
Description: "Optionally specify the absolute path to the coder binary used in ProxyCommand. " +
|
||||
"By default, the binary invoking this command ('config ssh') is used.",
|
||||
Value: clibase.Validate(clibase.StringOf(&coderCliPath), func(value *clibase.String) error {
|
||||
if runtime.GOOS == goosWindows {
|
||||
// For some reason filepath.IsAbs() does not work on windows.
|
||||
return nil
|
||||
}
|
||||
absolute := filepath.IsAbs(value.String())
|
||||
if !absolute {
|
||||
return xerrors.Errorf("coder cli path must be an absolute path")
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
{
|
||||
Flag: "ssh-option",
|
||||
FlagShorthand: "o",
|
||||
@@ -496,9 +555,29 @@ func (r *RootCmd) configSSH() *clibase.Cmd {
|
||||
},
|
||||
{
|
||||
Flag: "ssh-host-prefix",
|
||||
Env: "",
|
||||
Env: "CODER_CONFIGSSH_SSH_HOST_PREFIX",
|
||||
Description: "Override the default host prefix.",
|
||||
Value: clibase.StringOf(&userHostPrefix),
|
||||
Value: clibase.StringOf(&sshConfigOpts.userHostPrefix),
|
||||
},
|
||||
{
|
||||
Flag: "wait",
|
||||
Env: "CODER_CONFIGSSH_WAIT", // Not to be mixed with CODER_SSH_WAIT.
|
||||
Description: "Specifies whether or not to wait for the startup script to finish executing. Auto means that the agent startup script behavior configured in the workspace template is used.",
|
||||
Default: "auto",
|
||||
Value: clibase.EnumOf(&sshConfigOpts.waitEnum, "yes", "no", "auto"),
|
||||
},
|
||||
{
|
||||
Flag: "force-unix-filepaths",
|
||||
Env: "CODER_CONFIGSSH_UNIX_FILEPATHS",
|
||||
Description: "By default, 'config-ssh' uses the os path separator when writing the ssh config. " +
|
||||
"This might be an issue in Windows machine that use a unix-like shell. " +
|
||||
"This flag forces the use of unix file paths (the forward slash '/').",
|
||||
Value: clibase.BoolOf(&forceUnixSeparators),
|
||||
// On non-windows showing this command is useless because it is a noop.
|
||||
// Hide vs disable it though so if a command is copied from a Windows
|
||||
// machine to a unix machine it will still work and not throw an
|
||||
// "unknown flag" error.
|
||||
Hidden: hideForceUnixSlashes,
|
||||
},
|
||||
cliui.SkipPromptOption(),
|
||||
}
|
||||
@@ -515,12 +594,22 @@ func sshConfigWriteSectionHeader(w io.Writer, addNewline bool, o sshConfigOption
|
||||
_, _ = fmt.Fprint(w, nl+sshStartToken+"\n")
|
||||
_, _ = fmt.Fprint(w, sshConfigSectionHeader)
|
||||
_, _ = fmt.Fprint(w, sshConfigDocsHeader)
|
||||
if len(o.sshOptions) > 0 {
|
||||
_, _ = fmt.Fprint(w, sshConfigOptionsHeader)
|
||||
for _, opt := range o.sshOptions {
|
||||
_, _ = fmt.Fprintf(w, "# :%s=%s\n", "ssh-option", opt)
|
||||
}
|
||||
|
||||
var ow strings.Builder
|
||||
if o.waitEnum != "auto" {
|
||||
_, _ = fmt.Fprintf(&ow, "# :%s=%s\n", "wait", o.waitEnum)
|
||||
}
|
||||
if o.userHostPrefix != "" {
|
||||
_, _ = fmt.Fprintf(&ow, "# :%s=%s\n", "ssh-host-prefix", o.userHostPrefix)
|
||||
}
|
||||
for _, opt := range o.sshOptions {
|
||||
_, _ = fmt.Fprintf(&ow, "# :%s=%s\n", "ssh-option", opt)
|
||||
}
|
||||
if ow.Len() > 0 {
|
||||
_, _ = fmt.Fprint(w, sshConfigOptionsHeader)
|
||||
_, _ = fmt.Fprint(w, ow.String())
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprint(w, "#\n")
|
||||
}
|
||||
|
||||
@@ -529,6 +618,9 @@ func sshConfigWriteSectionEnd(w io.Writer) {
|
||||
}
|
||||
|
||||
func sshConfigParseLastOptions(r io.Reader) (o sshConfigOptions) {
|
||||
// Default values.
|
||||
o.waitEnum = "auto"
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
@@ -536,6 +628,10 @@ func sshConfigParseLastOptions(r io.Reader) (o sshConfigOptions) {
|
||||
line = strings.TrimPrefix(line, "# :")
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
switch parts[0] {
|
||||
case "wait":
|
||||
o.waitEnum = parts[1]
|
||||
case "ssh-host-prefix":
|
||||
o.userHostPrefix = parts[1]
|
||||
case "ssh-option":
|
||||
o.sshOptions = append(o.sshOptions, parts[1])
|
||||
default:
|
||||
@@ -670,7 +766,31 @@ func writeWithTempFileAndMove(path string, r io.Reader) (err error) {
|
||||
// - https://github.com/openssh/openssh-portable/blob/V_9_0_P1/sshconnect.c#L158-L167
|
||||
// - https://github.com/PowerShell/openssh-portable/blob/v8.1.0.0/sshconnect.c#L231-L293
|
||||
// - https://github.com/PowerShell/openssh-portable/blob/v8.1.0.0/contrib/win32/win32compat/w32fd.c#L1075-L1100
|
||||
func sshConfigExecEscape(path string) (string, error) {
|
||||
//
|
||||
// Additional Windows-specific notes:
|
||||
//
|
||||
// In some situations a Windows user could be using a unix-like shell such as
|
||||
// git bash. In these situations the coder.exe is using the windows filepath
|
||||
// separator (\), but the shell wants the unix filepath separator (/).
|
||||
// Trying to determine if the shell is unix-like is difficult, so this function
|
||||
// takes the argument 'forceUnixPath' to force the filepath to be unix-like.
|
||||
//
|
||||
// On actual unix machines, this is **always** a noop. Even if a windows
|
||||
// path is provided.
|
||||
//
|
||||
// Passing a "false" for forceUnixPath will result in the filepath separator
|
||||
// untouched from the original input.
|
||||
// ---
|
||||
// This is a control flag, and that is ok. It is a control flag
|
||||
// based on the OS of the user. Making this a different file is excessive.
|
||||
// nolint:revive
|
||||
func sshConfigExecEscape(path string, forceUnixPath bool) (string, error) {
|
||||
if forceUnixPath {
|
||||
// This is a workaround for #7639, where the filepath separator is
|
||||
// incorrectly the Windows separator (\) instead of the unix separator (/).
|
||||
path = filepath.ToSlash(path)
|
||||
}
|
||||
|
||||
// This is unlikely to ever happen, but newlines are allowed on
|
||||
// certain filesystems, but cannot be used inside ssh config.
|
||||
if strings.ContainsAny(path, "\n") {
|
||||
|
||||
@@ -12,6 +12,11 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// For golden files, always show the flag.
|
||||
hideForceUnixSlashes = false
|
||||
}
|
||||
|
||||
func Test_sshConfigSplitOnCoderSection(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -140,14 +145,14 @@ func Test_sshConfigExecEscape(t *testing.T) {
|
||||
name string
|
||||
path string
|
||||
wantErr bool
|
||||
windows bool
|
||||
}{
|
||||
{"no spaces", "simple", false, true},
|
||||
{"spaces", "path with spaces", false, true},
|
||||
{"quotes", "path with \"quotes\"", false, false},
|
||||
{"backslashes", "path with \\backslashes", false, false},
|
||||
{"tabs", "path with \ttabs", false, false},
|
||||
{"newline fails", "path with \nnewline", true, false},
|
||||
{"windows path", `C:\Program Files\Coder\bin\coder.exe`, false},
|
||||
{"no spaces", "simple", false},
|
||||
{"spaces", "path with spaces", false},
|
||||
{"quotes", "path with \"quotes\"", false},
|
||||
{"backslashes", "path with \\backslashes", false},
|
||||
{"tabs", "path with \ttabs", false},
|
||||
{"newline fails", "path with \nnewline", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
@@ -166,7 +171,7 @@ func Test_sshConfigExecEscape(t *testing.T) {
|
||||
err = os.WriteFile(bin, contents, 0o755) //nolint:gosec
|
||||
require.NoError(t, err)
|
||||
|
||||
escaped, err := sshConfigExecEscape(bin)
|
||||
escaped, err := sshConfigExecEscape(bin, false)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
@@ -181,6 +186,72 @@ func Test_sshConfigExecEscape(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_sshConfigExecEscapeSeparatorForce(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
// Behavior is different on Windows
|
||||
expWindowsPath string
|
||||
expOtherPath string
|
||||
forceUnix bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "windows_keep_forward_slashes_with_spaces",
|
||||
// Has a space, expect quotes
|
||||
path: `C:\Program Files\Coder\bin\coder.exe`,
|
||||
expWindowsPath: `"C:\Program Files\Coder\bin\coder.exe"`,
|
||||
expOtherPath: `"C:\Program Files\Coder\bin\coder.exe"`,
|
||||
forceUnix: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "windows_keep_forward_slashes",
|
||||
path: `C:\ProgramFiles\Coder\bin\coder.exe`,
|
||||
expWindowsPath: `C:\ProgramFiles\Coder\bin\coder.exe`,
|
||||
expOtherPath: `C:\ProgramFiles\Coder\bin\coder.exe`,
|
||||
forceUnix: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "windows_force_unix_with_spaces",
|
||||
path: `C:\Program Files\Coder\bin\coder.exe`,
|
||||
expWindowsPath: `"C:/Program Files/Coder/bin/coder.exe"`,
|
||||
expOtherPath: `"C:\Program Files\Coder\bin\coder.exe"`,
|
||||
forceUnix: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "windows_force_unix",
|
||||
path: `C:\ProgramFiles\Coder\bin\coder.exe`,
|
||||
expWindowsPath: `C:/ProgramFiles/Coder/bin/coder.exe`,
|
||||
expOtherPath: `C:\ProgramFiles\Coder\bin\coder.exe`,
|
||||
forceUnix: true,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
found, err := sshConfigExecEscape(tt.path, tt.forceUnix)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
if runtime.GOOS == "windows" {
|
||||
require.Equal(t, tt.expWindowsPath, found, "(Windows) expected path")
|
||||
} else {
|
||||
// this is a noop on non-windows!
|
||||
require.Equal(t, tt.expOtherPath, found, "(Non-Windows) expected path")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_sshConfigOptions_addOption(t *testing.T) {
|
||||
t.Parallel()
|
||||
testCases := []struct {
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
//go:build !windows
|
||||
|
||||
package cli
|
||||
|
||||
var hideForceUnixSlashes = true
|
||||
+56
-12
@@ -66,6 +66,7 @@ func TestConfigSSH(t *testing.T) {
|
||||
|
||||
const hostname = "test-coder."
|
||||
const expectedKey = "ConnectionAttempts"
|
||||
const removeKey = "ConnectionTimeout"
|
||||
client := coderdtest.New(t, &coderdtest.Options{
|
||||
IncludeProvisionerDaemon: true,
|
||||
ConfigSSH: codersdk.SSHConfigResponse{
|
||||
@@ -73,6 +74,7 @@ func TestConfigSSH(t *testing.T) {
|
||||
SSHConfigOptions: map[string]string{
|
||||
// Something we can test for
|
||||
expectedKey: "3",
|
||||
removeKey: "",
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -176,6 +178,7 @@ func TestConfigSSH(t *testing.T) {
|
||||
fileContents, err := os.ReadFile(sshConfigFile)
|
||||
require.NoError(t, err, "read ssh config file")
|
||||
require.Contains(t, string(fileContents), expectedKey, "ssh config file contains expected key")
|
||||
require.NotContains(t, string(fileContents), removeKey, "ssh config file should not have removed key")
|
||||
|
||||
home := filepath.Dir(filepath.Dir(sshConfigFile))
|
||||
// #nosec
|
||||
@@ -213,18 +216,20 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
ssh string
|
||||
}
|
||||
type wantConfig struct {
|
||||
ssh string
|
||||
ssh string
|
||||
regexMatch string
|
||||
}
|
||||
type match struct {
|
||||
match, write string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
matches []match
|
||||
writeConfig writeConfig
|
||||
wantConfig wantConfig
|
||||
wantErr bool
|
||||
name string
|
||||
args []string
|
||||
matches []match
|
||||
writeConfig writeConfig
|
||||
wantConfig wantConfig
|
||||
wantErr bool
|
||||
echoResponse *echo.Responses
|
||||
}{
|
||||
{
|
||||
name: "Config file is created",
|
||||
@@ -478,12 +483,32 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
},
|
||||
args: []string{"--yes"},
|
||||
},
|
||||
{
|
||||
name: "Serialize supported flags",
|
||||
wantConfig: wantConfig{
|
||||
ssh: strings.Join([]string{
|
||||
headerStart,
|
||||
"# Last config-ssh options:",
|
||||
"# :wait=yes",
|
||||
"# :ssh-host-prefix=coder-test.",
|
||||
"#",
|
||||
headerEnd,
|
||||
"",
|
||||
}, "\n"),
|
||||
},
|
||||
args: []string{
|
||||
"--yes",
|
||||
"--wait=yes",
|
||||
"--ssh-host-prefix", "coder-test.",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Do not prompt for new options when prev opts flag is set",
|
||||
writeConfig: writeConfig{
|
||||
ssh: strings.Join([]string{
|
||||
headerStart,
|
||||
"# Last config-ssh options:",
|
||||
"# :wait=no",
|
||||
"# :ssh-option=ForwardAgent=yes",
|
||||
"#",
|
||||
headerEnd,
|
||||
@@ -494,6 +519,7 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
ssh: strings.Join([]string{
|
||||
headerStart,
|
||||
"# Last config-ssh options:",
|
||||
"# :wait=no",
|
||||
"# :ssh-option=ForwardAgent=yes",
|
||||
"#",
|
||||
headerEnd,
|
||||
@@ -555,6 +581,20 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Custom CLI Path",
|
||||
args: []string{
|
||||
"-y", "--coder-binary-path", "/foo/bar/coder",
|
||||
},
|
||||
wantErr: false,
|
||||
echoResponse: &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: echo.ProvisionApplyWithAgent(""),
|
||||
},
|
||||
wantConfig: wantConfig{
|
||||
regexMatch: "ProxyCommand /foo/bar/coder",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
@@ -564,7 +604,7 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
var (
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, tt.echoResponse)
|
||||
_ = coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
project = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
workspace = coderdtest.CreateWorkspace(t, client, user.OrganizationID, project.ID)
|
||||
@@ -586,8 +626,7 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
clitest.SetupConfig(t, client, root)
|
||||
|
||||
pty := ptytest.New(t)
|
||||
inv.Stdin = pty.Input()
|
||||
inv.Stdout = pty.Output()
|
||||
pty.Attach(inv)
|
||||
done := tGo(t, func() {
|
||||
err := inv.Run()
|
||||
if !tt.wantErr {
|
||||
@@ -604,9 +643,14 @@ func TestConfigSSH_FileWriteAndOptionsFlow(t *testing.T) {
|
||||
|
||||
<-done
|
||||
|
||||
if tt.wantConfig.ssh != "" {
|
||||
if tt.wantConfig.ssh != "" || tt.wantConfig.regexMatch != "" {
|
||||
got := sshConfigFileRead(t, sshConfigName)
|
||||
assert.Equal(t, tt.wantConfig.ssh, got)
|
||||
if tt.wantConfig.ssh != "" {
|
||||
assert.Equal(t, tt.wantConfig.ssh, got)
|
||||
}
|
||||
if tt.wantConfig.regexMatch != "" {
|
||||
assert.Regexp(t, tt.wantConfig.regexMatch, got, "regex match")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
//go:build windows
|
||||
|
||||
package cli
|
||||
|
||||
// Must be a var for unit tests to conform behavior
|
||||
var hideForceUnixSlashes = false
|
||||
+55
-105
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@@ -17,7 +18,6 @@ import (
|
||||
|
||||
func (r *RootCmd) create() *clibase.Cmd {
|
||||
var (
|
||||
parameterFile string
|
||||
richParameterFile string
|
||||
templateName string
|
||||
startAt string
|
||||
@@ -29,15 +29,25 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
Annotations: workspaceCommand,
|
||||
Use: "create [name]",
|
||||
Short: "Create a workspace",
|
||||
Middleware: clibase.Chain(r.InitClient(client)),
|
||||
Long: formatExamples(
|
||||
example{
|
||||
Description: "Create a workspace for another user (if you have permission)",
|
||||
Command: "coder create <username>/<workspace_name>",
|
||||
},
|
||||
),
|
||||
Middleware: clibase.Chain(r.InitClient(client)),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
organization, err := CurrentOrganization(inv, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
workspaceOwner := codersdk.Me
|
||||
if len(inv.Args) >= 1 {
|
||||
workspaceName = inv.Args[0]
|
||||
workspaceOwner, workspaceName, err = splitNamedWorkspace(inv.Args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if workspaceName == "" {
|
||||
@@ -56,14 +66,14 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
}
|
||||
}
|
||||
|
||||
_, err = client.WorkspaceByOwnerAndName(inv.Context(), codersdk.Me, workspaceName, codersdk.WorkspaceOptions{})
|
||||
_, err = client.WorkspaceByOwnerAndName(inv.Context(), workspaceOwner, workspaceName, codersdk.WorkspaceOptions{})
|
||||
if err == nil {
|
||||
return xerrors.Errorf("A workspace already exists named %q!", workspaceName)
|
||||
}
|
||||
|
||||
var template codersdk.Template
|
||||
if templateName == "" {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.Styles.Wrap.Render("Select a template below to preview the provisioned infrastructure:"))
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.DefaultStyles.Wrap.Render("Select a template below to preview the provisioned infrastructure:"))
|
||||
|
||||
templates, err := client.TemplatesByOrganization(inv.Context(), organization.ID)
|
||||
if err != nil {
|
||||
@@ -81,7 +91,7 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
templateName := template.Name
|
||||
|
||||
if template.ActiveUserCount > 0 {
|
||||
templateName += cliui.Styles.Placeholder.Render(
|
||||
templateName += cliui.DefaultStyles.Placeholder.Render(
|
||||
fmt.Sprintf(
|
||||
" (used by %s)",
|
||||
formatActiveDevelopers(template.ActiveUserCount),
|
||||
@@ -121,8 +131,6 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
|
||||
buildParams, err := prepWorkspaceBuild(inv, client, prepWorkspaceBuildArgs{
|
||||
Template: template,
|
||||
ExistingParams: []codersdk.Parameter{},
|
||||
ParameterFile: parameterFile,
|
||||
RichParameterFile: richParameterFile,
|
||||
NewWorkspaceName: workspaceName,
|
||||
})
|
||||
@@ -141,16 +149,13 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
var ttlMillis *int64
|
||||
if stopAfter > 0 {
|
||||
ttlMillis = ptr.Ref(stopAfter.Milliseconds())
|
||||
} else if template.MaxTTLMillis > 0 {
|
||||
ttlMillis = &template.MaxTTLMillis
|
||||
}
|
||||
|
||||
workspace, err := client.CreateWorkspace(inv.Context(), organization.ID, codersdk.Me, codersdk.CreateWorkspaceRequest{
|
||||
workspace, err := client.CreateWorkspace(inv.Context(), organization.ID, workspaceOwner, codersdk.CreateWorkspaceRequest{
|
||||
TemplateID: template.ID,
|
||||
Name: workspaceName,
|
||||
AutostartSchedule: schedSpec,
|
||||
TTLMillis: ttlMillis,
|
||||
ParameterValues: buildParams.parameters,
|
||||
RichParameterValues: buildParams.richParameters,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -162,7 +167,7 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
return xerrors.Errorf("watch build: %w", err)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "\nThe %s workspace has been created at %s!\n", cliui.Styles.Keyword.Render(workspace.Name), cliui.Styles.DateTimeStamp.Render(time.Now().Format(time.Stamp)))
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "\nThe %s workspace has been created at %s!\n", cliui.DefaultStyles.Keyword.Render(workspace.Name), cliui.DefaultStyles.DateTimeStamp.Render(time.Now().Format(time.Stamp)))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -174,12 +179,6 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
Description: "Specify a template name.",
|
||||
Value: clibase.StringOf(&templateName),
|
||||
},
|
||||
clibase.Option{
|
||||
Flag: "parameter-file",
|
||||
Env: "CODER_PARAMETER_FILE",
|
||||
Description: "Specify a file path with parameter values.",
|
||||
Value: clibase.StringOf(¶meterFile),
|
||||
},
|
||||
clibase.Option{
|
||||
Flag: "rich-parameter-file",
|
||||
Env: "CODER_RICH_PARAMETER_FILE",
|
||||
@@ -200,24 +199,21 @@ func (r *RootCmd) create() *clibase.Cmd {
|
||||
},
|
||||
cliui.SkipPromptOption(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type prepWorkspaceBuildArgs struct {
|
||||
Template codersdk.Template
|
||||
ExistingParams []codersdk.Parameter
|
||||
ParameterFile string
|
||||
ExistingRichParams []codersdk.WorkspaceBuildParameter
|
||||
RichParameterFile string
|
||||
NewWorkspaceName string
|
||||
|
||||
UpdateWorkspace bool
|
||||
BuildOptions bool
|
||||
WorkspaceID uuid.UUID
|
||||
}
|
||||
|
||||
type buildParameters struct {
|
||||
// Parameters contains legacy parameters stored in /parameters.
|
||||
parameters []codersdk.CreateParameterRequest
|
||||
// Rich parameters stores values for build parameters annotated with description, icon, type, etc.
|
||||
richParameters []codersdk.WorkspaceBuildParameter
|
||||
}
|
||||
@@ -227,109 +223,42 @@ type buildParameters struct {
|
||||
func prepWorkspaceBuild(inv *clibase.Invocation, client *codersdk.Client, args prepWorkspaceBuildArgs) (*buildParameters, error) {
|
||||
ctx := inv.Context()
|
||||
|
||||
var useRichParameters bool
|
||||
if len(args.ExistingRichParams) > 0 && len(args.RichParameterFile) > 0 {
|
||||
useRichParameters = true
|
||||
}
|
||||
|
||||
var useLegacyParameters bool
|
||||
if len(args.ExistingParams) > 0 || len(args.ParameterFile) > 0 {
|
||||
useLegacyParameters = true
|
||||
}
|
||||
|
||||
if useRichParameters && useLegacyParameters {
|
||||
return nil, xerrors.Errorf("Rich parameters can't be used together with legacy parameters.")
|
||||
}
|
||||
|
||||
templateVersion, err := client.TemplateVersion(ctx, args.Template.ActiveVersionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Legacy parameters
|
||||
parameterSchemas, err := client.TemplateVersionSchema(ctx, templateVersion.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parameterMapFromFile can be nil if parameter file is not specified
|
||||
var parameterMapFromFile map[string]string
|
||||
useParamFile := false
|
||||
if args.ParameterFile != "" {
|
||||
useParamFile = true
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.Styles.Paragraph.Render("Attempting to read the variables from the parameter file.")+"\r\n")
|
||||
parameterMapFromFile, err = createParameterMapFromFile(args.ParameterFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
disclaimerPrinted := false
|
||||
legacyParameters := make([]codersdk.CreateParameterRequest, 0)
|
||||
PromptParamLoop:
|
||||
for _, parameterSchema := range parameterSchemas {
|
||||
if !parameterSchema.AllowOverrideSource {
|
||||
continue
|
||||
}
|
||||
if !disclaimerPrinted {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.Styles.Paragraph.Render("This template has customizable parameters. Values can be changed after create, but may have unintended side effects (like data loss).")+"\r\n")
|
||||
disclaimerPrinted = true
|
||||
}
|
||||
|
||||
// Param file is all or nothing
|
||||
if !useParamFile {
|
||||
for _, e := range args.ExistingParams {
|
||||
if e.Name == parameterSchema.Name {
|
||||
// If the param already exists, we do not need to prompt it again.
|
||||
// The workspace scope will reuse params for each build.
|
||||
continue PromptParamLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parameterValue, err := getParameterValueFromMapOrInput(inv, parameterMapFromFile, parameterSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
legacyParameters = append(legacyParameters, codersdk.CreateParameterRequest{
|
||||
Name: parameterSchema.Name,
|
||||
SourceValue: parameterValue,
|
||||
SourceScheme: codersdk.ParameterSourceSchemeData,
|
||||
DestinationScheme: parameterSchema.DefaultDestinationScheme,
|
||||
})
|
||||
}
|
||||
|
||||
if disclaimerPrinted {
|
||||
_, _ = fmt.Fprintln(inv.Stdout)
|
||||
}
|
||||
|
||||
// Rich parameters
|
||||
templateVersionParameters, err := client.TemplateVersionRichParameters(inv.Context(), templateVersion.ID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get template version rich parameters: %w", err)
|
||||
}
|
||||
|
||||
parameterMapFromFile = map[string]string{}
|
||||
useParamFile = false
|
||||
parameterMapFromFile := map[string]string{}
|
||||
useParamFile := false
|
||||
if args.RichParameterFile != "" {
|
||||
useParamFile = true
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.Styles.Paragraph.Render("Attempting to read the variables from the rich parameter file.")+"\r\n")
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.DefaultStyles.Paragraph.Render("Attempting to read the variables from the rich parameter file.")+"\r\n")
|
||||
parameterMapFromFile, err = createParameterMapFromFile(args.RichParameterFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
disclaimerPrinted = false
|
||||
disclaimerPrinted := false
|
||||
richParameters := make([]codersdk.WorkspaceBuildParameter, 0)
|
||||
PromptRichParamLoop:
|
||||
for _, templateVersionParameter := range templateVersionParameters {
|
||||
if !args.BuildOptions && templateVersionParameter.Ephemeral {
|
||||
continue
|
||||
}
|
||||
|
||||
if !disclaimerPrinted {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.Styles.Paragraph.Render("This template has customizable parameters. Values can be changed after create, but may have unintended side effects (like data loss).")+"\r\n")
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.DefaultStyles.Paragraph.Render("This template has customizable parameters. Values can be changed after create, but may have unintended side effects (like data loss).")+"\r\n")
|
||||
disclaimerPrinted = true
|
||||
}
|
||||
|
||||
// Param file is all or nothing
|
||||
if !useParamFile {
|
||||
if !useParamFile && !templateVersionParameter.Ephemeral {
|
||||
for _, e := range args.ExistingRichParams {
|
||||
if e.Name == templateVersionParameter.Name {
|
||||
// If the param already exists, we do not need to prompt it again.
|
||||
@@ -340,8 +269,17 @@ PromptRichParamLoop:
|
||||
}
|
||||
|
||||
if args.UpdateWorkspace && !templateVersionParameter.Mutable {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.Styles.Warn.Render(fmt.Sprintf(`Parameter %q is not mutable, so can't be customized after workspace creation.`, templateVersionParameter.Name)))
|
||||
continue
|
||||
// Check if the immutable parameter was used in the previous build. If so, then it isn't a fresh one
|
||||
// and the user should be warned.
|
||||
exists, err := workspaceBuildParameterExists(ctx, client, args.WorkspaceID, templateVersionParameter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.DefaultStyles.Warn.Render(fmt.Sprintf(`Parameter %q is not mutable, so can't be customized after workspace creation.`, templateVersionParameter.Name)))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
parameterValue, err := getWorkspaceBuildParameterValueFromMapOrInput(inv, parameterMapFromFile, templateVersionParameter)
|
||||
@@ -368,7 +306,6 @@ PromptRichParamLoop:
|
||||
// Run a dry-run with the given parameters to check correctness
|
||||
dryRun, err := client.CreateTemplateVersionDryRun(inv.Context(), templateVersion.ID, codersdk.CreateTemplateVersionDryRunRequest{
|
||||
WorkspaceName: args.NewWorkspaceName,
|
||||
ParameterValues: legacyParameters,
|
||||
RichParameterValues: richParameters,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -410,7 +347,20 @@ PromptRichParamLoop:
|
||||
}
|
||||
|
||||
return &buildParameters{
|
||||
parameters: legacyParameters,
|
||||
richParameters: richParameters,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func workspaceBuildParameterExists(ctx context.Context, client *codersdk.Client, workspaceID uuid.UUID, templateVersionParameter codersdk.TemplateVersionParameter) (bool, error) {
|
||||
lastBuildParameters, err := client.WorkspaceBuildParameters(ctx, workspaceID)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("can't fetch last workspace build parameters: %w", err)
|
||||
}
|
||||
|
||||
for _, p := range lastBuildParameters {
|
||||
if p.Name == templateVersionParameter.Name {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
+67
-216
@@ -2,7 +2,6 @@ package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
@@ -15,6 +14,7 @@ import (
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/coderd/gitauth"
|
||||
"github.com/coder/coder/coderd/util/ptr"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/provisioner/echo"
|
||||
"github.com/coder/coder/provisionersdk/proto"
|
||||
@@ -79,6 +79,63 @@ func TestCreate(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("CreateForOtherUser", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
owner := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, owner.OrganizationID, &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionApply: provisionCompleteWithAgent,
|
||||
ProvisionPlan: provisionCompleteWithAgent,
|
||||
})
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, owner.OrganizationID, version.ID)
|
||||
_, user := coderdtest.CreateAnotherUser(t, client, owner.OrganizationID)
|
||||
args := []string{
|
||||
"create",
|
||||
user.Username + "/their-workspace",
|
||||
"--template", template.Name,
|
||||
"--start-at", "9:30AM Mon-Fri US/Central",
|
||||
"--stop-after", "8h",
|
||||
}
|
||||
|
||||
inv, root := clitest.New(t, args...)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
doneChan := make(chan struct{})
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
matches := []struct {
|
||||
match string
|
||||
write string
|
||||
}{
|
||||
{match: "compute.main"},
|
||||
{match: "smith (linux, i386)"},
|
||||
{match: "Confirm create", write: "yes"},
|
||||
}
|
||||
for _, m := range matches {
|
||||
pty.ExpectMatch(m.match)
|
||||
if len(m.write) > 0 {
|
||||
pty.WriteLine(m.write)
|
||||
}
|
||||
}
|
||||
<-doneChan
|
||||
|
||||
ws, err := client.WorkspaceByOwnerAndName(context.Background(), user.Username, "their-workspace", codersdk.WorkspaceOptions{})
|
||||
if assert.NoError(t, err, "expected workspace to be created") {
|
||||
assert.Equal(t, ws.TemplateName, template.Name)
|
||||
if assert.NotNil(t, ws.AutostartSchedule) {
|
||||
assert.Equal(t, *ws.AutostartSchedule, "CRON_TZ=US/Central 30 9 * * Mon-Fri")
|
||||
}
|
||||
if assert.NotNil(t, ws.TTLMillis) {
|
||||
assert.Equal(t, *ws.TTLMillis, 8*time.Hour.Milliseconds())
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("InheritStopAfterFromTemplate", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
@@ -180,181 +237,6 @@ func TestCreate(t *testing.T) {
|
||||
assert.Nil(t, ws.AutostartSchedule, "expected workspace autostart schedule to be nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("WithParameter", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
defaultValue := "something"
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: createTestParseResponseWithDefault(defaultValue),
|
||||
ProvisionApply: echo.ProvisionComplete,
|
||||
ProvisionPlan: echo.ProvisionComplete,
|
||||
})
|
||||
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
_ = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
inv, root := clitest.New(t, "create", "")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
doneChan := make(chan struct{})
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
matches := []string{
|
||||
"Specify a name", "my-workspace",
|
||||
fmt.Sprintf("Enter a value (default: %q):", defaultValue), "bingo",
|
||||
"Enter a value:", "boingo",
|
||||
"Confirm create?", "yes",
|
||||
}
|
||||
for i := 0; i < len(matches); i += 2 {
|
||||
match := matches[i]
|
||||
value := matches[i+1]
|
||||
pty.ExpectMatch(match)
|
||||
pty.WriteLine(value)
|
||||
}
|
||||
<-doneChan
|
||||
})
|
||||
|
||||
t.Run("WithParameterFileContainingTheValue", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
defaultValue := "something"
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: createTestParseResponseWithDefault(defaultValue),
|
||||
ProvisionApply: echo.ProvisionComplete,
|
||||
ProvisionPlan: echo.ProvisionComplete,
|
||||
})
|
||||
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
_ = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
tempDir := t.TempDir()
|
||||
removeTmpDirUntilSuccessAfterTest(t, tempDir)
|
||||
parameterFile, _ := os.CreateTemp(tempDir, "testParameterFile*.yaml")
|
||||
_, _ = parameterFile.WriteString("region: \"bingo\"\nusername: \"boingo\"")
|
||||
inv, root := clitest.New(t, "create", "", "--parameter-file", parameterFile.Name())
|
||||
clitest.SetupConfig(t, client, root)
|
||||
doneChan := make(chan struct{})
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
matches := []string{
|
||||
"Specify a name", "my-workspace",
|
||||
"Confirm create?", "yes",
|
||||
}
|
||||
for i := 0; i < len(matches); i += 2 {
|
||||
match := matches[i]
|
||||
value := matches[i+1]
|
||||
pty.ExpectMatch(match)
|
||||
pty.WriteLine(value)
|
||||
}
|
||||
<-doneChan
|
||||
})
|
||||
|
||||
t.Run("WithParameterFileNotContainingTheValue", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
defaultValue := "something"
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: createTestParseResponseWithDefault(defaultValue),
|
||||
ProvisionApply: echo.ProvisionComplete,
|
||||
ProvisionPlan: echo.ProvisionComplete,
|
||||
})
|
||||
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
_ = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
tempDir := t.TempDir()
|
||||
removeTmpDirUntilSuccessAfterTest(t, tempDir)
|
||||
parameterFile, _ := os.CreateTemp(tempDir, "testParameterFile*.yaml")
|
||||
_, _ = parameterFile.WriteString("username: \"boingo\"")
|
||||
|
||||
inv, root := clitest.New(t, "create", "", "--parameter-file", parameterFile.Name())
|
||||
clitest.SetupConfig(t, client, root)
|
||||
doneChan := make(chan struct{})
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
matches := []struct {
|
||||
match string
|
||||
write string
|
||||
}{
|
||||
{
|
||||
match: "Specify a name",
|
||||
write: "my-workspace",
|
||||
},
|
||||
{
|
||||
match: fmt.Sprintf("Enter a value (default: %q):", defaultValue),
|
||||
write: "bingo",
|
||||
},
|
||||
{
|
||||
match: "Confirm create?",
|
||||
write: "yes",
|
||||
},
|
||||
}
|
||||
|
||||
for _, m := range matches {
|
||||
pty.ExpectMatch(m.match)
|
||||
pty.WriteLine(m.write)
|
||||
}
|
||||
<-doneChan
|
||||
})
|
||||
t.Run("FailedDryRun", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, &echo.Responses{
|
||||
Parse: []*proto.Parse_Response{{
|
||||
Type: &proto.Parse_Response_Complete{
|
||||
Complete: &proto.Parse_Complete{
|
||||
ParameterSchemas: echo.ParameterSuccess,
|
||||
},
|
||||
},
|
||||
}},
|
||||
ProvisionPlan: []*proto.Provision_Response{
|
||||
{
|
||||
Type: &proto.Provision_Response_Complete{
|
||||
Complete: &proto.Provision_Complete{},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
tempDir := t.TempDir()
|
||||
parameterFile, err := os.CreateTemp(tempDir, "testParameterFile*.yaml")
|
||||
require.NoError(t, err)
|
||||
defer parameterFile.Close()
|
||||
_, _ = parameterFile.WriteString(fmt.Sprintf("%s: %q", echo.ParameterExecKey, echo.ParameterError("fail")))
|
||||
|
||||
// The template import job should end up failed, but we need it to be
|
||||
// succeeded so the dry-run can begin.
|
||||
version = coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
require.Equal(t, codersdk.ProvisionerJobSucceeded, version.Job.Status, "job is not failed")
|
||||
|
||||
_ = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
inv, root := clitest.New(t, "create", "test", "--parameter-file", parameterFile.Name(), "-y")
|
||||
clitest.SetupConfig(t, client, root)
|
||||
ptytest.New(t).Attach(inv)
|
||||
|
||||
err = inv.Run()
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "dry-run workspace")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreateWithRichParameters(t *testing.T) {
|
||||
@@ -366,12 +248,13 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
firstParameterValue = "1"
|
||||
|
||||
secondParameterName = "second_parameter"
|
||||
secondParameterDisplayName = "Second Parameter"
|
||||
secondParameterDescription = "This is second parameter"
|
||||
secondParameterValue = "2"
|
||||
|
||||
immutableParameterName = "third_parameter"
|
||||
immutableParameterDescription = "This is not mutable parameter"
|
||||
immutableParameterValue = "3"
|
||||
immutableParameterValue = "4"
|
||||
)
|
||||
|
||||
echoResponses := &echo.Responses{
|
||||
@@ -382,7 +265,7 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
Complete: &proto.Provision_Complete{
|
||||
Parameters: []*proto.RichParameter{
|
||||
{Name: firstParameterName, Description: firstParameterDescription, Mutable: true},
|
||||
{Name: secondParameterName, Description: secondParameterDescription, Mutable: true},
|
||||
{Name: secondParameterName, DisplayName: secondParameterDisplayName, Description: secondParameterDescription, Mutable: true},
|
||||
{Name: immutableParameterName, Description: immutableParameterDescription, Mutable: false},
|
||||
},
|
||||
},
|
||||
@@ -418,6 +301,7 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
|
||||
matches := []string{
|
||||
firstParameterDescription, firstParameterValue,
|
||||
secondParameterDisplayName, "",
|
||||
secondParameterDescription, secondParameterValue,
|
||||
immutableParameterDescription, immutableParameterValue,
|
||||
"Confirm create?", "yes",
|
||||
@@ -426,7 +310,10 @@ func TestCreateWithRichParameters(t *testing.T) {
|
||||
match := matches[i]
|
||||
value := matches[i+1]
|
||||
pty.ExpectMatch(match)
|
||||
pty.WriteLine(value)
|
||||
|
||||
if value != "" {
|
||||
pty.WriteLine(value)
|
||||
}
|
||||
}
|
||||
<-doneChan
|
||||
})
|
||||
@@ -489,7 +376,7 @@ func TestCreateValidateRichParameters(t *testing.T) {
|
||||
)
|
||||
|
||||
numberRichParameters := []*proto.RichParameter{
|
||||
{Name: numberParameterName, Type: "number", Mutable: true, ValidationMin: 3, ValidationMax: 10},
|
||||
{Name: numberParameterName, Type: "number", Mutable: true, ValidationMin: ptr.Ref(int32(3)), ValidationMax: ptr.Ref(int32(10))},
|
||||
}
|
||||
|
||||
stringRichParameters := []*proto.RichParameter{
|
||||
@@ -745,39 +632,3 @@ func TestCreateWithGitAuth(t *testing.T) {
|
||||
pty.ExpectMatch("Confirm create?")
|
||||
pty.WriteLine("yes")
|
||||
}
|
||||
|
||||
func createTestParseResponseWithDefault(defaultValue string) []*proto.Parse_Response {
|
||||
return []*proto.Parse_Response{{
|
||||
Type: &proto.Parse_Response_Complete{
|
||||
Complete: &proto.Parse_Complete{
|
||||
ParameterSchemas: []*proto.ParameterSchema{
|
||||
{
|
||||
AllowOverrideSource: true,
|
||||
Name: "region",
|
||||
Description: "description 1",
|
||||
DefaultSource: &proto.ParameterSource{
|
||||
Scheme: proto.ParameterSource_DATA,
|
||||
Value: defaultValue,
|
||||
},
|
||||
DefaultDestination: &proto.ParameterDestination{
|
||||
Scheme: proto.ParameterDestination_PROVISIONER_VARIABLE,
|
||||
},
|
||||
},
|
||||
{
|
||||
AllowOverrideSource: true,
|
||||
Name: "username",
|
||||
Description: "description 2",
|
||||
DefaultSource: &proto.ParameterSource{
|
||||
Scheme: proto.ParameterSource_DATA,
|
||||
// No default value
|
||||
Value: "",
|
||||
},
|
||||
DefaultDestination: &proto.ParameterDestination{
|
||||
Scheme: proto.ParameterDestination_PROVISIONER_VARIABLE,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
+1
-9
@@ -37,14 +37,6 @@ func (r *RootCmd) deleteWorkspace() *clibase.Cmd {
|
||||
}
|
||||
|
||||
var state []byte
|
||||
|
||||
if orphan {
|
||||
cliui.Warn(
|
||||
inv.Stderr,
|
||||
"Orphaning workspace requires template edit permission",
|
||||
)
|
||||
}
|
||||
|
||||
build, err := client.CreateWorkspaceBuild(inv.Context(), workspace.ID, codersdk.CreateWorkspaceBuildRequest{
|
||||
Transition: codersdk.WorkspaceTransitionDelete,
|
||||
ProvisionerState: state,
|
||||
@@ -59,7 +51,7 @@ func (r *RootCmd) deleteWorkspace() *clibase.Cmd {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "\nThe %s workspace has been deleted at %s!\n", cliui.Styles.Keyword.Render(workspace.Name), cliui.Styles.DateTimeStamp.Render(time.Now().Format(time.Stamp)))
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "\nThe %s workspace has been deleted at %s!\n", cliui.DefaultStyles.Keyword.Render(workspace.Name), cliui.DefaultStyles.DateTimeStamp.Render(time.Now().Format(time.Stamp)))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
@@ -10,8 +11,11 @@ import (
|
||||
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/coderd/database"
|
||||
"github.com/coder/coder/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/codersdk"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
@@ -68,6 +72,51 @@ func TestDelete(t *testing.T) {
|
||||
<-doneChan
|
||||
})
|
||||
|
||||
// Super orphaned, as the workspace doesn't even have a user.
|
||||
// This is not a scenario we should ever get into, as we do not allow users
|
||||
// to be deleted if they have workspaces. However issue #7872 shows that
|
||||
// it is possible to get into this state. An admin should be able to still
|
||||
// force a delete action on the workspace.
|
||||
t.Run("OrphanDeletedUser", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
client, _, api := coderdtest.NewWithAPI(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
deleteMeClient, deleteMeUser := coderdtest.CreateAnotherUser(t, client, user.OrganizationID)
|
||||
version := coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
|
||||
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
|
||||
workspace := coderdtest.CreateWorkspace(t, deleteMeClient, user.OrganizationID, template.ID)
|
||||
coderdtest.AwaitWorkspaceBuildJob(t, deleteMeClient, workspace.LatestBuild.ID)
|
||||
|
||||
// The API checks if the user has any workspaces, so we cannot delete a user
|
||||
// this way.
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
// nolint:gocritic // Unit test
|
||||
err := api.Database.UpdateUserDeletedByID(dbauthz.AsSystemRestricted(ctx), database.UpdateUserDeletedByIDParams{
|
||||
ID: deleteMeUser.ID,
|
||||
Deleted: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
inv, root := clitest.New(t, "delete", fmt.Sprintf("%s/%s", deleteMeUser.ID, workspace.Name), "-y", "--orphan")
|
||||
|
||||
clitest.SetupConfig(t, client, root)
|
||||
doneChan := make(chan struct{})
|
||||
pty := ptytest.New(t).Attach(inv)
|
||||
inv.Stderr = pty.Output()
|
||||
go func() {
|
||||
defer close(doneChan)
|
||||
err := inv.Run()
|
||||
// When running with the race detector on, we sometimes get an EOF.
|
||||
if err != nil {
|
||||
assert.ErrorIs(t, err, io.EOF)
|
||||
}
|
||||
}()
|
||||
pty.ExpectMatch("workspace has been deleted")
|
||||
<-doneChan
|
||||
})
|
||||
|
||||
t.Run("DifferentUser", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
adminClient := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
|
||||
+88
-1
@@ -1,6 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
@@ -18,6 +19,8 @@ import (
|
||||
|
||||
func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
var symlinkDir string
|
||||
var gitbranch string
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "dotfiles <git_repo_url>",
|
||||
Middleware: clibase.RequireNArgs(1),
|
||||
@@ -102,6 +105,9 @@ func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
}
|
||||
gitCmdDir = cfgDir
|
||||
subcommands = []string{"clone", inv.Args[0], dotfilesRepoDir}
|
||||
if gitbranch != "" {
|
||||
subcommands = append(subcommands, "--branch", gitbranch)
|
||||
}
|
||||
promptText = fmt.Sprintf("Cloning %s into directory %s.\n\n Continue?", gitRepo, dotfilesDir)
|
||||
}
|
||||
|
||||
@@ -137,7 +143,24 @@ func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
return err
|
||||
}
|
||||
// if the repo exists we soft fail the update operation and try to continue
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.Styles.Error.Render("Failed to update repo, continuing..."))
|
||||
_, _ = fmt.Fprintln(inv.Stdout, cliui.DefaultStyles.Error.Render("Failed to update repo, continuing..."))
|
||||
}
|
||||
|
||||
if dotfilesExists && gitbranch != "" {
|
||||
// If the repo exists and the git-branch is specified, we need to check out the branch. We do this after
|
||||
// git pull to make sure the branch was pulled down locally. If we do this before the pull, we could be
|
||||
// trying to checkout a branch that does not yet exist locally and get a git error.
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Dotfiles git branch %q specified\n", gitbranch)
|
||||
err := ensureCorrectGitBranch(inv, ensureCorrectGitBranchParams{
|
||||
repoDir: dotfilesDir,
|
||||
gitSSHCommand: gitsshCmd,
|
||||
gitBranch: gitbranch,
|
||||
})
|
||||
if err != nil {
|
||||
// Do not block on this error, just log it and continue
|
||||
_, _ = fmt.Fprintln(inv.Stdout,
|
||||
cliui.DefaultStyles.Error.Render(fmt.Sprintf("Failed to use branch %q (%s), continuing...", err.Error(), gitbranch)))
|
||||
}
|
||||
}
|
||||
|
||||
// save git repo url so we can detect changes next time
|
||||
@@ -170,6 +193,18 @@ func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(inv.Stdout, "Running %s...\n", script)
|
||||
|
||||
// Check if the script is executable and notify on error
|
||||
scriptPath := filepath.Join(dotfilesDir, script)
|
||||
fi, err := os.Stat(scriptPath)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat %s: %w", scriptPath, err)
|
||||
}
|
||||
|
||||
if fi.Mode()&0o111 == 0 {
|
||||
return xerrors.Errorf("script %q is not executable. See https://coder.com/docs/v2/latest/dotfiles for information on how to resolve the issue.", script)
|
||||
}
|
||||
|
||||
// it is safe to use a variable command here because it's from
|
||||
// a filtered list of pre-approved install scripts
|
||||
// nolint:gosec
|
||||
@@ -225,6 +260,10 @@ func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
}
|
||||
}
|
||||
|
||||
// attempt to delete the file before creating a new symlink. This overwrites any existing symlinks
|
||||
// which are typically leftover from a previous call to coder dotfiles. We do this best effort and
|
||||
// ignore errors because the symlink may or may not exist. Any regular files are backed up above.
|
||||
_ = os.Remove(to)
|
||||
err = os.Symlink(from, to)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("symlinking %s to %s: %w", from, to, err)
|
||||
@@ -242,11 +281,59 @@ func (r *RootCmd) dotfiles() *clibase.Cmd {
|
||||
Description: "Specifies the directory for the dotfiles symlink destinations. If empty, will use $HOME.",
|
||||
Value: clibase.StringOf(&symlinkDir),
|
||||
},
|
||||
{
|
||||
Flag: "branch",
|
||||
FlagShorthand: "b",
|
||||
Description: "Specifies which branch to clone. " +
|
||||
"If empty, will default to cloning the default branch or using the existing branch in the cloned repo on disk.",
|
||||
Value: clibase.StringOf(&gitbranch),
|
||||
},
|
||||
cliui.SkipPromptOption(),
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
type ensureCorrectGitBranchParams struct {
|
||||
repoDir string
|
||||
gitSSHCommand string
|
||||
gitBranch string
|
||||
}
|
||||
|
||||
func ensureCorrectGitBranch(baseInv *clibase.Invocation, params ensureCorrectGitBranchParams) error {
|
||||
dotfileCmd := func(cmd string, args ...string) *exec.Cmd {
|
||||
c := exec.CommandContext(baseInv.Context(), cmd, args...)
|
||||
c.Dir = params.repoDir
|
||||
c.Env = append(baseInv.Environ.ToOS(), fmt.Sprintf(`GIT_SSH_COMMAND=%s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no`, params.gitSSHCommand))
|
||||
c.Stdout = baseInv.Stdout
|
||||
c.Stderr = baseInv.Stderr
|
||||
return c
|
||||
}
|
||||
c := dotfileCmd("git", "branch", "--show-current")
|
||||
// Save the output
|
||||
var out bytes.Buffer
|
||||
c.Stdout = &out
|
||||
err := c.Run()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting current git branch: %w", err)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(out.String()) != params.gitBranch {
|
||||
// Checkout and pull the branch
|
||||
c := dotfileCmd("git", "checkout", params.gitBranch)
|
||||
err := c.Run()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checkout git branch %q: %w", params.gitBranch, err)
|
||||
}
|
||||
|
||||
c = dotfileCmd("git", "pull", "--ff-only")
|
||||
err = c.Run()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("pull git branch %q: %w", params.gitBranch, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dirExists checks if the path exists and is a directory.
|
||||
func dirExists(name string) (bool, error) {
|
||||
fi, err := os.Stat(name)
|
||||
|
||||
@@ -80,6 +80,52 @@ func TestDotfiles(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "wow\n")
|
||||
})
|
||||
t.Run("InstallScriptChangeBranch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("install scripts on windows require sh and aren't very practical")
|
||||
}
|
||||
_, root := clitest.New(t)
|
||||
testRepo := testGitRepo(t, root)
|
||||
|
||||
// We need an initial commit to start the `main` branch
|
||||
c := exec.Command("git", "commit", "--allow-empty", "-m", `"initial commit"`)
|
||||
c.Dir = testRepo
|
||||
err := c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
// nolint:gosec
|
||||
err = os.WriteFile(filepath.Join(testRepo, "install.sh"), []byte("#!/bin/bash\necho wow > "+filepath.Join(string(root), ".bashrc")), 0o750)
|
||||
require.NoError(t, err)
|
||||
|
||||
c = exec.Command("git", "checkout", "-b", "other_branch")
|
||||
c.Dir = testRepo
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
c = exec.Command("git", "add", "install.sh")
|
||||
c.Dir = testRepo
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
c = exec.Command("git", "commit", "-m", `"add install.sh"`)
|
||||
c.Dir = testRepo
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
c = exec.Command("git", "checkout", "main")
|
||||
c.Dir = testRepo
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
inv, _ := clitest.New(t, "dotfiles", "--global-config", string(root), "--symlink-dir", string(root), "-y", testRepo, "-b", "other_branch")
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err := os.ReadFile(filepath.Join(string(root), ".bashrc"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "wow\n")
|
||||
})
|
||||
t.Run("SymlinkBackup", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, root := clitest.New(t)
|
||||
@@ -116,6 +162,17 @@ func TestDotfiles(t *testing.T) {
|
||||
b, err = os.ReadFile(filepath.Join(string(root), ".bashrc.bak"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "backup")
|
||||
|
||||
// check for idempotency
|
||||
inv, _ = clitest.New(t, "dotfiles", "--global-config", string(root), "--symlink-dir", string(root), "-y", testRepo)
|
||||
err = inv.Run()
|
||||
require.NoError(t, err)
|
||||
b, err = os.ReadFile(filepath.Join(string(root), ".bashrc"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "wow")
|
||||
b, err = os.ReadFile(filepath.Join(string(root), ".bashrc.bak"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(b), "backup")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -141,5 +198,10 @@ func testGitRepo(t *testing.T, root config.Root) string {
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
c = exec.Command("git", "checkout", "-b", "main")
|
||||
c.Dir = dir
|
||||
err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
+18
@@ -0,0 +1,18 @@
|
||||
package cli
|
||||
|
||||
import "github.com/coder/coder/cli/clibase"
|
||||
|
||||
func (r *RootCmd) expCmd() *clibase.Cmd {
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "exp",
|
||||
Short: "Internal commands for testing and experimentation. These are prone to breaking changes with no notice.",
|
||||
Handler: func(i *clibase.Invocation) error {
|
||||
return i.Command.HelpHandler(i)
|
||||
},
|
||||
Hidden: true,
|
||||
Children: []*clibase.Cmd{
|
||||
r.scaletestCmd(),
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
@@ -14,9 +14,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"cdr.dev/slog/sloggers/sloghuman"
|
||||
|
||||
"github.com/coder/coder/cli/clibase"
|
||||
"github.com/coder/coder/cli/cliui"
|
||||
"github.com/coder/coder/coderd/httpapi"
|
||||
@@ -25,14 +30,16 @@ import (
|
||||
"github.com/coder/coder/cryptorand"
|
||||
"github.com/coder/coder/scaletest/agentconn"
|
||||
"github.com/coder/coder/scaletest/createworkspaces"
|
||||
"github.com/coder/coder/scaletest/dashboard"
|
||||
"github.com/coder/coder/scaletest/harness"
|
||||
"github.com/coder/coder/scaletest/reconnectingpty"
|
||||
"github.com/coder/coder/scaletest/workspacebuild"
|
||||
"github.com/coder/coder/scaletest/workspacetraffic"
|
||||
)
|
||||
|
||||
const scaletestTracerName = "coder_scaletest"
|
||||
|
||||
func (r *RootCmd) scaletest() *clibase.Cmd {
|
||||
func (r *RootCmd) scaletestCmd() *clibase.Cmd {
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "scaletest",
|
||||
Short: "Run a scale test against the Coder API",
|
||||
@@ -41,7 +48,9 @@ func (r *RootCmd) scaletest() *clibase.Cmd {
|
||||
},
|
||||
Children: []*clibase.Cmd{
|
||||
r.scaletestCleanup(),
|
||||
r.scaletestDashboard(),
|
||||
r.scaletestCreateWorkspaces(),
|
||||
r.scaletestWorkspaceTraffic(),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -107,7 +116,10 @@ func (s *scaletestTracingFlags) provider(ctx context.Context) (trace.TracerProvi
|
||||
return tracerProvider, func(ctx context.Context) error {
|
||||
var err error
|
||||
closeTracingOnce.Do(func() {
|
||||
err = closeTracing(ctx)
|
||||
// Allow time to upload traces even if ctx is canceled
|
||||
traceCtx, traceCancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer traceCancel()
|
||||
err = closeTracing(traceCtx)
|
||||
})
|
||||
|
||||
return err
|
||||
@@ -307,6 +319,30 @@ func (s *scaletestOutputFlags) parse() ([]scaleTestOutput, error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type scaletestPrometheusFlags struct {
|
||||
Address string
|
||||
Wait time.Duration
|
||||
}
|
||||
|
||||
func (s *scaletestPrometheusFlags) attach(opts *clibase.OptionSet) {
|
||||
*opts = append(*opts,
|
||||
clibase.Option{
|
||||
Flag: "scaletest-prometheus-address",
|
||||
Env: "CODER_SCALETEST_PROMETHEUS_ADDRESS",
|
||||
Default: "0.0.0.0:21112",
|
||||
Description: "Address on which to expose scaletest Prometheus metrics.",
|
||||
Value: clibase.StringOf(&s.Address),
|
||||
},
|
||||
clibase.Option{
|
||||
Flag: "scaletest-prometheus-wait",
|
||||
Env: "CODER_SCALETEST_PROMETHEUS_WAIT",
|
||||
Default: "15s",
|
||||
Description: "How long to wait before exiting in order to allow Prometheus metrics to be scraped.",
|
||||
Value: clibase.DurationOf(&s.Wait),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func requireAdmin(ctx context.Context, client *codersdk.Client) (codersdk.User, error) {
|
||||
me, err := client.User(ctx, codersdk.Me)
|
||||
if err != nil {
|
||||
@@ -384,33 +420,9 @@ func (r *RootCmd) scaletestCleanup() *clibase.Cmd {
|
||||
}
|
||||
|
||||
cliui.Infof(inv.Stdout, "Fetching scaletest workspaces...")
|
||||
var (
|
||||
pageNumber = 0
|
||||
limit = 100
|
||||
workspaces []codersdk.Workspace
|
||||
)
|
||||
for {
|
||||
page, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
Name: "scaletest-",
|
||||
Offset: pageNumber * limit,
|
||||
Limit: limit,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch scaletest workspaces page %d: %w", pageNumber, err)
|
||||
}
|
||||
|
||||
pageNumber++
|
||||
if len(page.Workspaces) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
pageWorkspaces := make([]codersdk.Workspace, 0, len(page.Workspaces))
|
||||
for _, w := range page.Workspaces {
|
||||
if isScaleTestWorkspace(w) {
|
||||
pageWorkspaces = append(pageWorkspaces, w)
|
||||
}
|
||||
}
|
||||
workspaces = append(workspaces, pageWorkspaces...)
|
||||
workspaces, err := getScaletestWorkspaces(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cliui.Errorf(inv.Stderr, "Found %d scaletest workspaces\n", len(workspaces))
|
||||
@@ -441,37 +453,13 @@ func (r *RootCmd) scaletestCleanup() *clibase.Cmd {
|
||||
}
|
||||
|
||||
cliui.Infof(inv.Stdout, "Fetching scaletest users...")
|
||||
pageNumber = 0
|
||||
limit = 100
|
||||
var users []codersdk.User
|
||||
for {
|
||||
page, err := client.Users(ctx, codersdk.UsersRequest{
|
||||
Search: "scaletest-",
|
||||
Pagination: codersdk.Pagination{
|
||||
Offset: pageNumber * limit,
|
||||
Limit: limit,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fetch scaletest users page %d: %w", pageNumber, err)
|
||||
}
|
||||
|
||||
pageNumber++
|
||||
if len(page.Users) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
pageUsers := make([]codersdk.User, 0, len(page.Users))
|
||||
for _, u := range page.Users {
|
||||
if isScaleTestUser(u) {
|
||||
pageUsers = append(pageUsers, u)
|
||||
}
|
||||
}
|
||||
users = append(users, pageUsers...)
|
||||
users, err := getScaletestUsers(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cliui.Errorf(inv.Stderr, "Found %d scaletest users\n", len(users))
|
||||
if len(workspaces) != 0 {
|
||||
if len(users) != 0 {
|
||||
cliui.Infof(inv.Stdout, "Deleting scaletest users..."+"\n")
|
||||
harness := harness.NewTestHarness(cleanupStrategy.toStrategy(), harness.ConcurrentExecutionStrategy{})
|
||||
|
||||
@@ -510,10 +498,8 @@ func (r *RootCmd) scaletestCleanup() *clibase.Cmd {
|
||||
|
||||
func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
var (
|
||||
count int64
|
||||
template string
|
||||
parametersFile string
|
||||
parameters []string // key=value
|
||||
count int64
|
||||
template string
|
||||
|
||||
noPlan bool
|
||||
noCleanup bool
|
||||
@@ -535,6 +521,8 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
connectInterval time.Duration
|
||||
connectTimeout time.Duration
|
||||
|
||||
useHostUser bool
|
||||
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
strategy = &scaletestStrategyFlags{}
|
||||
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
|
||||
@@ -607,51 +595,11 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
return xerrors.Errorf("get template version %q: %w", tpl.ActiveVersionID, err)
|
||||
}
|
||||
|
||||
parameterSchemas, err := client.TemplateVersionSchema(ctx, templateVersion.ID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get template version schema %q: %w", templateVersion.ID, err)
|
||||
}
|
||||
|
||||
paramsMap := map[string]string{}
|
||||
if parametersFile != "" {
|
||||
fileMap, err := createParameterMapFromFile(parametersFile)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("read parameters file %q: %w", parametersFile, err)
|
||||
}
|
||||
|
||||
paramsMap = fileMap
|
||||
}
|
||||
|
||||
for _, p := range parameters {
|
||||
parts := strings.SplitN(p, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return xerrors.Errorf("invalid parameter %q", p)
|
||||
}
|
||||
|
||||
paramsMap[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
|
||||
}
|
||||
|
||||
params := []codersdk.CreateParameterRequest{}
|
||||
for _, p := range parameterSchemas {
|
||||
value, ok := paramsMap[p.Name]
|
||||
if !ok {
|
||||
value = ""
|
||||
}
|
||||
|
||||
params = append(params, codersdk.CreateParameterRequest{
|
||||
Name: p.Name,
|
||||
SourceValue: value,
|
||||
SourceScheme: codersdk.ParameterSourceSchemeData,
|
||||
DestinationScheme: p.DefaultDestinationScheme,
|
||||
})
|
||||
}
|
||||
|
||||
// Do a dry-run to ensure the template and parameters are valid
|
||||
// before we start creating users and workspaces.
|
||||
if !noPlan {
|
||||
dryRun, err := client.CreateTemplateVersionDryRun(ctx, templateVersion.ID, codersdk.CreateTemplateVersionDryRunRequest{
|
||||
WorkspaceName: "scaletest",
|
||||
ParameterValues: params,
|
||||
WorkspaceName: "scaletest",
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("start dry run workspace creation: %w", err)
|
||||
@@ -681,10 +629,11 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
}
|
||||
defer func() {
|
||||
// Allow time for traces to flush even if command context is
|
||||
// canceled.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
_ = closeTracing(ctx)
|
||||
// canceled. This is a no-op if tracing is not enabled.
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
|
||||
if err := closeTracing(ctx); err != nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
|
||||
}
|
||||
}()
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
|
||||
@@ -693,35 +642,36 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
const name = "workspacebuild"
|
||||
id := strconv.Itoa(i)
|
||||
|
||||
username, email, err := newScaleTestUser(id)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create scaletest username and email: %w", err)
|
||||
}
|
||||
workspaceName, err := newScaleTestWorkspace(id)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create scaletest workspace name: %w", err)
|
||||
}
|
||||
|
||||
config := createworkspaces.Config{
|
||||
User: createworkspaces.UserConfig{
|
||||
// TODO: configurable org
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
Username: username,
|
||||
Email: email,
|
||||
},
|
||||
Workspace: workspacebuild.Config{
|
||||
OrganizationID: me.OrganizationIDs[0],
|
||||
// UserID is set by the test automatically.
|
||||
Request: codersdk.CreateWorkspaceRequest{
|
||||
TemplateID: tpl.ID,
|
||||
Name: workspaceName,
|
||||
ParameterValues: params,
|
||||
TemplateID: tpl.ID,
|
||||
},
|
||||
NoWaitForAgents: noWaitForAgents,
|
||||
},
|
||||
NoCleanup: noCleanup,
|
||||
}
|
||||
|
||||
if useHostUser {
|
||||
config.User.SessionToken = client.SessionToken()
|
||||
} else {
|
||||
config.User.Username, config.User.Email, err = newScaleTestUser(id)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create scaletest username and email: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
config.Workspace.Request.Name, err = newScaleTestWorkspace(id)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create scaletest workspace name: %w", err)
|
||||
}
|
||||
|
||||
if runCommand != "" {
|
||||
config.ReconnectingPTY = &reconnectingpty.Config{
|
||||
// AgentID is set by the test automatically.
|
||||
@@ -796,17 +746,6 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
return xerrors.Errorf("cleanup tests: %w", err)
|
||||
}
|
||||
|
||||
// Upload traces.
|
||||
if tracingEnabled {
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
defer cancel()
|
||||
err := closeTracing(ctx)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if res.TotalFail > 0 {
|
||||
return xerrors.New("load test failed, see above for more details")
|
||||
}
|
||||
@@ -831,18 +770,6 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
Description: "Required: Name or ID of the template to use for workspaces.",
|
||||
Value: clibase.StringOf(&template),
|
||||
},
|
||||
{
|
||||
Flag: "parameters-file",
|
||||
Env: "CODER_SCALETEST_PARAMETERS_FILE",
|
||||
Description: "Path to a YAML file containing the parameters to use for each workspace.",
|
||||
Value: clibase.StringOf(¶metersFile),
|
||||
},
|
||||
{
|
||||
Flag: "parameter",
|
||||
Env: "CODER_SCALETEST_PARAMETERS",
|
||||
Description: "Parameters to use for each workspace. Can be specified multiple times. Overrides any existing parameters with the same name from --parameters-file. Format: key=value.",
|
||||
Value: clibase.StringArrayOf(¶meters),
|
||||
},
|
||||
{
|
||||
Flag: "no-plan",
|
||||
Env: "CODER_SCALETEST_NO_PLAN",
|
||||
@@ -927,6 +854,13 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
Description: "Timeout for each request to the --connect-url.",
|
||||
Value: clibase.DurationOf(&connectTimeout),
|
||||
},
|
||||
{
|
||||
Flag: "use-host-login",
|
||||
Env: "CODER_SCALETEST_USE_HOST_LOGIN",
|
||||
Default: "false",
|
||||
Description: "Use the use logged in on the host machine, instead of creating users.",
|
||||
Value: clibase.BoolOf(&useHostUser),
|
||||
},
|
||||
}
|
||||
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
@@ -936,6 +870,311 @@ func (r *RootCmd) scaletestCreateWorkspaces() *clibase.Cmd {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) scaletestWorkspaceTraffic() *clibase.Cmd {
|
||||
var (
|
||||
tickInterval time.Duration
|
||||
bytesPerTick int64
|
||||
ssh bool
|
||||
|
||||
client = &codersdk.Client{}
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
strategy = &scaletestStrategyFlags{}
|
||||
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
|
||||
output = &scaletestOutputFlags{}
|
||||
prometheusFlags = &scaletestPrometheusFlags{}
|
||||
)
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "workspace-traffic",
|
||||
Short: "Generate traffic to scaletest workspaces through coderd",
|
||||
Middleware: clibase.Chain(
|
||||
r.InitClient(client),
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics := workspacetraffic.NewMetrics(reg, "username", "workspace_name", "agent_name")
|
||||
|
||||
logger := slog.Make(sloghuman.Sink(io.Discard))
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
|
||||
// Bypass rate limiting
|
||||
client.HTTPClient = &http.Client{
|
||||
Transport: &headerTransport{
|
||||
transport: http.DefaultTransport,
|
||||
header: map[string][]string{
|
||||
codersdk.BypassRatelimitHeader: {"true"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
workspaces, err := getScaletestWorkspaces(inv.Context(), client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(workspaces) == 0 {
|
||||
return xerrors.Errorf("no scaletest workspaces exist")
|
||||
}
|
||||
|
||||
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
// Allow time for traces to flush even if command context is
|
||||
// canceled. This is a no-op if tracing is not enabled.
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
|
||||
if err := closeTracing(ctx); err != nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
|
||||
}
|
||||
// Wait for prometheus metrics to be scraped
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
|
||||
<-time.After(prometheusFlags.Wait)
|
||||
}()
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
|
||||
outputs, err := output.parse()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse --output flags")
|
||||
}
|
||||
|
||||
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
|
||||
for idx, ws := range workspaces {
|
||||
var (
|
||||
agentID uuid.UUID
|
||||
agentName string
|
||||
name = "workspace-traffic"
|
||||
id = strconv.Itoa(idx)
|
||||
)
|
||||
|
||||
for _, res := range ws.LatestBuild.Resources {
|
||||
if len(res.Agents) == 0 {
|
||||
continue
|
||||
}
|
||||
agentID = res.Agents[0].ID
|
||||
agentName = res.Agents[0].Name
|
||||
}
|
||||
|
||||
if agentID == uuid.Nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "WARN: skipping workspace %s: no agent\n", ws.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Setup our workspace agent connection.
|
||||
config := workspacetraffic.Config{
|
||||
AgentID: agentID,
|
||||
BytesPerTick: bytesPerTick,
|
||||
Duration: strategy.timeout,
|
||||
TickInterval: tickInterval,
|
||||
ReadMetrics: metrics.ReadMetrics(ws.OwnerName, ws.Name, agentName),
|
||||
WriteMetrics: metrics.WriteMetrics(ws.OwnerName, ws.Name, agentName),
|
||||
SSH: ssh,
|
||||
}
|
||||
|
||||
if err := config.Validate(); err != nil {
|
||||
return xerrors.Errorf("validate config: %w", err)
|
||||
}
|
||||
var runner harness.Runnable = workspacetraffic.NewRunner(client, config)
|
||||
if tracingEnabled {
|
||||
runner = &runnableTraceWrapper{
|
||||
tracer: tracer,
|
||||
spanName: fmt.Sprintf("%s/%s", name, id),
|
||||
runner: runner,
|
||||
}
|
||||
}
|
||||
|
||||
th.AddRun(name, id, runner)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Running load test...")
|
||||
testCtx, testCancel := strategy.toContext(ctx)
|
||||
defer testCancel()
|
||||
err = th.Run(testCtx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
|
||||
}
|
||||
|
||||
res := th.Results()
|
||||
for _, o := range outputs {
|
||||
err = o.write(res, inv.Stdout)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if res.TotalFail > 0 {
|
||||
return xerrors.New("load test failed, see above for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = []clibase.Option{
|
||||
{
|
||||
Flag: "bytes-per-tick",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_TRAFFIC_BYTES_PER_TICK",
|
||||
Default: "1024",
|
||||
Description: "How much traffic to generate per tick.",
|
||||
Value: clibase.Int64Of(&bytesPerTick),
|
||||
},
|
||||
{
|
||||
Flag: "tick-interval",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_TRAFFIC_TICK_INTERVAL",
|
||||
Default: "100ms",
|
||||
Description: "How often to send traffic.",
|
||||
Value: clibase.DurationOf(&tickInterval),
|
||||
},
|
||||
{
|
||||
Flag: "ssh",
|
||||
Env: "CODER_SCALETEST_WORKSPACE_TRAFFIC_SSH",
|
||||
Default: "",
|
||||
Description: "Send traffic over SSH.",
|
||||
Value: clibase.BoolOf(&ssh),
|
||||
},
|
||||
}
|
||||
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
strategy.attach(&cmd.Options)
|
||||
cleanupStrategy.attach(&cmd.Options)
|
||||
output.attach(&cmd.Options)
|
||||
prometheusFlags.attach(&cmd.Options)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) scaletestDashboard() *clibase.Cmd {
|
||||
var (
|
||||
count int64
|
||||
minWait time.Duration
|
||||
maxWait time.Duration
|
||||
|
||||
client = &codersdk.Client{}
|
||||
tracingFlags = &scaletestTracingFlags{}
|
||||
strategy = &scaletestStrategyFlags{}
|
||||
cleanupStrategy = &scaletestStrategyFlags{cleanup: true}
|
||||
output = &scaletestOutputFlags{}
|
||||
prometheusFlags = &scaletestPrometheusFlags{}
|
||||
)
|
||||
|
||||
cmd := &clibase.Cmd{
|
||||
Use: "dashboard",
|
||||
Short: "Generate traffic to the HTTP API to simulate use of the dashboard.",
|
||||
Middleware: clibase.Chain(
|
||||
r.InitClient(client),
|
||||
),
|
||||
Handler: func(inv *clibase.Invocation) error {
|
||||
ctx := inv.Context()
|
||||
logger := slog.Make(sloghuman.Sink(inv.Stdout)).Leveled(slog.LevelInfo)
|
||||
tracerProvider, closeTracing, tracingEnabled, err := tracingFlags.provider(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create tracer provider: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
// Allow time for traces to flush even if command context is
|
||||
// canceled. This is a no-op if tracing is not enabled.
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "\nUploading traces...")
|
||||
if err := closeTracing(ctx); err != nil {
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "\nError uploading traces: %+v\n", err)
|
||||
}
|
||||
// Wait for prometheus metrics to be scraped
|
||||
_, _ = fmt.Fprintf(inv.Stderr, "Waiting %s for prometheus metrics to be scraped\n", prometheusFlags.Wait)
|
||||
<-time.After(prometheusFlags.Wait)
|
||||
}()
|
||||
tracer := tracerProvider.Tracer(scaletestTracerName)
|
||||
outputs, err := output.parse()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse --output flags")
|
||||
}
|
||||
reg := prometheus.NewRegistry()
|
||||
prometheusSrvClose := ServeHandler(ctx, logger, promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), prometheusFlags.Address, "prometheus")
|
||||
defer prometheusSrvClose()
|
||||
metrics := dashboard.NewMetrics(reg)
|
||||
|
||||
th := harness.NewTestHarness(strategy.toStrategy(), cleanupStrategy.toStrategy())
|
||||
|
||||
for i := int64(0); i < count; i++ {
|
||||
name := fmt.Sprintf("dashboard-%d", i)
|
||||
config := dashboard.Config{
|
||||
MinWait: minWait,
|
||||
MaxWait: maxWait,
|
||||
Trace: tracingEnabled,
|
||||
Logger: logger.Named(name),
|
||||
RollTable: dashboard.DefaultActions,
|
||||
}
|
||||
if err := config.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
var runner harness.Runnable = dashboard.NewRunner(client, metrics, config)
|
||||
if tracingEnabled {
|
||||
runner = &runnableTraceWrapper{
|
||||
tracer: tracer,
|
||||
spanName: name,
|
||||
runner: runner,
|
||||
}
|
||||
}
|
||||
th.AddRun("dashboard", name, runner)
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintln(inv.Stderr, "Running load test...")
|
||||
testCtx, testCancel := strategy.toContext(ctx)
|
||||
defer testCancel()
|
||||
err = th.Run(testCtx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("run test harness (harness failure, not a test failure): %w", err)
|
||||
}
|
||||
|
||||
res := th.Results()
|
||||
for _, o := range outputs {
|
||||
err = o.write(res, inv.Stdout)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("write output %q to %q: %w", o.format, o.path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if res.TotalFail > 0 {
|
||||
return xerrors.New("load test failed, see above for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Options = []clibase.Option{
|
||||
{
|
||||
Flag: "count",
|
||||
Env: "CODER_SCALETEST_DASHBOARD_COUNT",
|
||||
Default: "1",
|
||||
Description: "Number of concurrent workers.",
|
||||
Value: clibase.Int64Of(&count),
|
||||
},
|
||||
{
|
||||
Flag: "min-wait",
|
||||
Env: "CODER_SCALETEST_DASHBOARD_MIN_WAIT",
|
||||
Default: "100ms",
|
||||
Description: "Minimum wait between fetches.",
|
||||
Value: clibase.DurationOf(&minWait),
|
||||
},
|
||||
{
|
||||
Flag: "max-wait",
|
||||
Env: "CODER_SCALETEST_DASHBOARD_MAX_WAIT",
|
||||
Default: "1s",
|
||||
Description: "Maximum wait between fetches.",
|
||||
Value: clibase.DurationOf(&maxWait),
|
||||
},
|
||||
}
|
||||
|
||||
tracingFlags.attach(&cmd.Options)
|
||||
strategy.attach(&cmd.Options)
|
||||
cleanupStrategy.attach(&cmd.Options)
|
||||
output.attach(&cmd.Options)
|
||||
prometheusFlags.attach(&cmd.Options)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type runnableTraceWrapper struct {
|
||||
tracer trace.Tracer
|
||||
spanName string
|
||||
@@ -1009,9 +1248,75 @@ func isScaleTestUser(user codersdk.User) bool {
|
||||
}
|
||||
|
||||
func isScaleTestWorkspace(workspace codersdk.Workspace) bool {
|
||||
if !strings.HasPrefix(workspace.OwnerName, "scaletest-") {
|
||||
return false
|
||||
return strings.HasPrefix(workspace.OwnerName, "scaletest-") ||
|
||||
strings.HasPrefix(workspace.Name, "scaletest-")
|
||||
}
|
||||
|
||||
func getScaletestWorkspaces(ctx context.Context, client *codersdk.Client) ([]codersdk.Workspace, error) {
|
||||
var (
|
||||
pageNumber = 0
|
||||
limit = 100
|
||||
workspaces []codersdk.Workspace
|
||||
)
|
||||
|
||||
for {
|
||||
page, err := client.Workspaces(ctx, codersdk.WorkspaceFilter{
|
||||
Name: "scaletest-",
|
||||
Offset: pageNumber * limit,
|
||||
Limit: limit,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("fetch scaletest workspaces page %d: %w", pageNumber, err)
|
||||
}
|
||||
|
||||
pageNumber++
|
||||
if len(page.Workspaces) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
pageWorkspaces := make([]codersdk.Workspace, 0, len(page.Workspaces))
|
||||
for _, w := range page.Workspaces {
|
||||
if isScaleTestWorkspace(w) {
|
||||
pageWorkspaces = append(pageWorkspaces, w)
|
||||
}
|
||||
}
|
||||
workspaces = append(workspaces, pageWorkspaces...)
|
||||
}
|
||||
return workspaces, nil
|
||||
}
|
||||
|
||||
func getScaletestUsers(ctx context.Context, client *codersdk.Client) ([]codersdk.User, error) {
|
||||
var (
|
||||
pageNumber = 0
|
||||
limit = 100
|
||||
users []codersdk.User
|
||||
)
|
||||
|
||||
for {
|
||||
page, err := client.Users(ctx, codersdk.UsersRequest{
|
||||
Search: "scaletest-",
|
||||
Pagination: codersdk.Pagination{
|
||||
Offset: pageNumber * limit,
|
||||
Limit: limit,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("fetch scaletest users page %d: %w", pageNumber, err)
|
||||
}
|
||||
|
||||
pageNumber++
|
||||
if len(page.Users) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
pageUsers := make([]codersdk.User, 0, len(page.Users))
|
||||
for _, u := range page.Users {
|
||||
if isScaleTestUser(u) {
|
||||
pageUsers = append(pageUsers, u)
|
||||
}
|
||||
}
|
||||
users = append(users, pageUsers...)
|
||||
}
|
||||
|
||||
return strings.HasPrefix(workspace.Name, "scaletest-")
|
||||
return users, nil
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coder/coder/cli/clitest"
|
||||
"github.com/coder/coder/coderd/coderdtest"
|
||||
"github.com/coder/coder/pty/ptytest"
|
||||
"github.com/coder/coder/testutil"
|
||||
)
|
||||
|
||||
func TestScaleTestCreateWorkspaces(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// This test only validates that the CLI command accepts known arguments.
|
||||
// More thorough testing is done in scaletest/createworkspaces/run_test.go.
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitLong)
|
||||
defer cancelFunc()
|
||||
|
||||
client := coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
// Write a parameters file.
|
||||
tDir := t.TempDir()
|
||||
outputFile := filepath.Join(tDir, "output.json")
|
||||
|
||||
inv, root := clitest.New(t, "exp", "scaletest", "create-workspaces",
|
||||
"--count", "2",
|
||||
"--template", "doesnotexist",
|
||||
"--no-cleanup",
|
||||
"--no-wait-for-agents",
|
||||
"--concurrency", "2",
|
||||
"--timeout", "30s",
|
||||
"--job-timeout", "15s",
|
||||
"--cleanup-concurrency", "1",
|
||||
"--cleanup-timeout", "30s",
|
||||
"--cleanup-job-timeout", "15s",
|
||||
"--output", "text",
|
||||
"--output", "json:"+outputFile,
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
pty := ptytest.New(t)
|
||||
inv.Stdout = pty.Output()
|
||||
inv.Stderr = pty.Output()
|
||||
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "could not find template \"doesnotexist\" in any organization")
|
||||
}
|
||||
|
||||
// This test just validates that the CLI command accepts its known arguments.
|
||||
// A more comprehensive test is performed in workspacetraffic/run_test.go
|
||||
func TestScaleTestWorkspaceTraffic(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancelFunc()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
inv, root := clitest.New(t, "exp", "scaletest", "workspace-traffic",
|
||||
"--timeout", "1s",
|
||||
"--bytes-per-tick", "1024",
|
||||
"--tick-interval", "100ms",
|
||||
"--scaletest-prometheus-address", "127.0.0.1:0",
|
||||
"--scaletest-prometheus-wait", "0s",
|
||||
"--ssh",
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
var stdout, stderr bytes.Buffer
|
||||
inv.Stdout = &stdout
|
||||
inv.Stderr = &stderr
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.ErrorContains(t, err, "no scaletest workspaces exist")
|
||||
}
|
||||
|
||||
// This test just validates that the CLI command accepts its known arguments.
|
||||
func TestScaleTestDashboard(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), testutil.WaitMedium)
|
||||
defer cancelFunc()
|
||||
|
||||
client := coderdtest.New(t, nil)
|
||||
_ = coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
inv, root := clitest.New(t, "exp", "scaletest", "dashboard",
|
||||
"--count", "1",
|
||||
"--min-wait", "100ms",
|
||||
"--max-wait", "1s",
|
||||
"--timeout", "1s",
|
||||
"--scaletest-prometheus-address", "127.0.0.1:0",
|
||||
"--scaletest-prometheus-wait", "0s",
|
||||
)
|
||||
clitest.SetupConfig(t, client, root)
|
||||
var stdout, stderr bytes.Buffer
|
||||
inv.Stdout = &stdout
|
||||
inv.Stderr = &stderr
|
||||
err := inv.WithContext(ctx).Run()
|
||||
require.NoError(t, err, "")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user