Compare commits
61 Commits
control-ma
...
Qubasa-rep
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e60c3d984d | ||
|
|
f8723ab897 | ||
|
|
b877df4c6e | ||
|
|
bf04eabc21 | ||
|
|
5149ed9318 | ||
|
|
4d84180dd9 | ||
|
|
7571fdef74 | ||
|
|
7d55511d6f | ||
|
|
cbd7157cfc | ||
|
|
25faba4795 | ||
|
|
0f0bab7976 | ||
|
|
f0e18bbdfb | ||
|
|
6d4db71ea3 | ||
|
|
b21c98db7f | ||
|
|
df3fe00b8a | ||
|
|
7371085c05 | ||
|
|
6804327bca | ||
|
|
ec76d5f8e5 | ||
|
|
864cdf33a7 | ||
|
|
e6dbccd8e5 | ||
|
|
30ac51b313 | ||
|
|
00b12c2c51 | ||
|
|
085d726217 | ||
|
|
3e65a76dfe | ||
|
|
929632049e | ||
|
|
0d1e642dfd | ||
|
|
8af68cbd9d | ||
|
|
a44fee9eab | ||
|
|
b27f34aed3 | ||
|
|
cd23c9ff41 | ||
|
|
852a673839 | ||
|
|
763a403e9f | ||
|
|
78f8a080a8 | ||
|
|
af2a00bde3 | ||
|
|
47c44ec7ec | ||
|
|
1f66e90db1 | ||
|
|
89fbf723ca | ||
|
|
548fdfb877 | ||
|
|
e44d34ef99 | ||
|
|
acc6797c22 | ||
|
|
9fbd943f44 | ||
|
|
d42d9ad943 | ||
|
|
3c16ccdb38 | ||
|
|
7b9e431da7 | ||
|
|
05388c9c7e | ||
|
|
fe36aa4161 | ||
|
|
6829c7f2dd | ||
|
|
aa2e8eef88 | ||
|
|
9e07064ec5 | ||
|
|
305a361f56 | ||
|
|
db32e99082 | ||
|
|
50ac0266f5 | ||
|
|
b01691cb64 | ||
|
|
44b237d9be | ||
|
|
f8bbd91c4a | ||
|
|
401de330f8 | ||
|
|
51da7ed5e8 | ||
|
|
e170cc2641 | ||
|
|
8434f0fc35 | ||
|
|
d6bbb42dda | ||
|
|
6539a6a24f |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -14,6 +14,7 @@ example_clan
|
||||
nixos.qcow2
|
||||
**/*.glade~
|
||||
/docs/out
|
||||
/pkgs/clan-cli/clan_cli/select
|
||||
**/.local.env
|
||||
|
||||
# MacOS stuff
|
||||
|
||||
2
CODEOWNERS
Normal file
2
CODEOWNERS
Normal file
@@ -0,0 +1,2 @@
|
||||
nixosModules/clanCore/vars/.* @lopter
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @lopter
|
||||
@@ -26,6 +26,7 @@ clanLib.test.makeTestClan {
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
roles.peer.machines.peer1 = { };
|
||||
@@ -40,15 +41,23 @@ clanLib.test.makeTestClan {
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
# ./pkgs/scripts/update-vars.py
|
||||
# nix run .#generate-test-vars -- checks/dummy-inventory-test dummy-inventory-test
|
||||
clan.core.vars.generators.new-service = {
|
||||
files.hello = {
|
||||
files.not-a-secret = {
|
||||
secret = false;
|
||||
deploy = true;
|
||||
};
|
||||
files.a-secret = {
|
||||
secret = true;
|
||||
deploy = true;
|
||||
owner = "nobody";
|
||||
group = "users";
|
||||
mode = "0644";
|
||||
};
|
||||
script = ''
|
||||
# This is a dummy script that does nothing
|
||||
echo "This is a dummy script" > $out/hello
|
||||
echo -n "not-a-secret" > $out/not-a-secret
|
||||
echo -n "a-secret" > $out/a-secret
|
||||
'';
|
||||
};
|
||||
};
|
||||
@@ -69,7 +78,15 @@ clanLib.test.makeTestClan {
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.hello.path}")
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
|
||||
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
|
||||
# Check that the file is owned by 'nobody'
|
||||
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
|
||||
# Check that the file is in the 'users' group
|
||||
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
'';
|
||||
}
|
||||
);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1hd2exjq88h7538y6mvjvexx3u5gp6a03yfn5nj32h2667yyksyaqcuk5qs",
|
||||
"publickey": "age12yt078p9ewxy2sh0a36nxdpgglv8wqqftmj4dkj9rgy5fuyn4p0q5nje9m",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age19urkt89q45a2wk6a4yaramzufjtnw6nq2snls0v7hmf7tqf73axsfx50tk",
|
||||
"publickey": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:hhuFgZcPqht0h3tKxGtheS4GlrVDo4TxH0a9lxgPYj2i12QUmE04rB07A+hu4Z8WNWLYvdM5069mEOZYm3lSeTzBHQPxYZRuVj0=,iv:sA1srRFQqsMlJTAjFcb09tI/Jg2WjOVJL5NZkPwiLoU=,tag:6xXo9FZpmAJw6hCBsWzf8Q==,type:str]",
|
||||
"data": "ENC[AES256_GCM,data:GPpsUhSzWPtTP8EUNKsobFXjYqDldhkkIH6hBk11RsDLAGWdhVrwcISGbhsWpYhvAdPKA84DB6Zqyh9lL2bLM9//ybC1kzY20BQ=,iv:NrxMLdedT2FCkUAD00SwsAHchIsxWvqe7BQekWuJcxw=,tag:pMDXcMyHnLF2t3Qhb1KolA==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBGaGVHeTgrN3dJQ2VITFBM\neWVzbDhjb0pwNUhBUjdUc0p5OTVta1dvSno4ClJxeUc4Z0hiaFRkVlJ1YTA4Lyta\neWdwV005WGYvMUNRVG1qOVdicTk0NUkKLS0tIFQvaDNFS1JMSFlHRXlhc3lsZm03\nYVhDaHNsam5wN1VqdzA3WTZwM1JwV2sKZk/SiZJgjllADdfHLSWuQcU4+LttDpt/\nqqDUATEuqYaALljC/y3COT+grTM2bwGjj6fsfsfiO/EL9iwzD3+7oA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzb2tWb1ExKzdmUTRzaGVj\nK3cyYTBHZTJwVjM1SzUvbHFiMnVhY05iKzFZCnJTSE1VSVdpcUFLSEJuaE1CZzJD\nWjZxYzN2cUltdThNMVRKU3FIb20vUXMKLS0tIFlHQXRIdnMybDZFUVEzWlQrc1dw\nbUxhZURXblhHd0pka0JIK1FTZEVqdUEKI/rfxQRBc+xGRelhswkJQ9GcZs6lzfgy\nuCxS5JI9npdPLQ/131F3b21+sP5YWqks41uZG+vslM1zQ+BlENNhDw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-09T15:10:16Z",
|
||||
"mac": "ENC[AES256_GCM,data:xuXj4833G6nhvcRo2ekDxz8G5phltmU8h1GgGofH9WndzrqLKeRSqm/n03IHRW0f4F68XxnyAkfvokVh6vW3LRQAFkqIlXz5U4+zFNcaVaPobS5gHTgxsCoTUoalWPvHWtXd50hUVXeAt8rPfTfeveVGja8bOERk8mvwUPxb6h4=,iv:yP1usA9m8tKl6Z/UK9PaVMJlZlF5qpY4EiM4+ByVlik=,tag:8DgoIhLstp3MRki90VfEvw==,type:str]",
|
||||
"lastmodified": "2025-05-04T12:44:13Z",
|
||||
"mac": "ENC[AES256_GCM,data:fWxLHXBWolHVxv6Q7utcy6OVLV13ziswrIYyNKiwy1vsU8i7xvvuGO1HlnE+q43D2WuHR53liKq1UHuf1JMrWzTwZ0PYe+CVugtoEtbR2qu3rK/jAkOyMyhmmHzmf6Rp4ZMCzKgZeC/X2bDKY/z0firHAvjWydEyogutHpvtznM=,iv:OQI3FfkLneqbdztAXVQB3UkHwDPK+0hWu5hZ9m8Oczg=,tag:em6GfS2QHsXs391QKPxfmA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:rwPhbayGf6mE1E9NCN+LuL7VfWWOfhoJW6H2tNSoyebtyTpM3GO2jWca1+N7hI0juhNkUk+rIsYQYbCa/5DZQiV0/2Jgu4US1XY=,iv:B5mcaQsDjb6BacxGB4Kk88/qLCpVOjQNRvGN+fgUiEo=,tag:Uz0A8kAF5NzFetbv9yHIjQ==,type:str]",
|
||||
"data": "ENC[AES256_GCM,data:W3cOkUYL5/YulW2pEISyTlMaA/t7/WBE7BoCdFlqrqgaCL7tG4IV2HgjiPWzIVMs0zvDSaghdEvAIoB4wOf470d1nSWs0/E8SDk=,iv:wXXaZIw3sPY8L/wxsu7+C5v+d3RQRuwxZRP4YLkS8K4=,tag:HeK4okj7O7XDA9JDz2KULw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBWY0hKQ1dnV0tMYytDMCtj\nTDV4Zk5NeVN0bCtqaWRQV3d4M0VlcGVZMkhZCm02dHZyOGVlYzJ5Z3FlUWNXMVQ0\nb2ZrTXZQRzRNdzFDeWZCVGhlTS9rMm8KLS0tIEJkY1QwOENRYWw3cjIwd3I0bzdz\nOEtQNm1saE5wNWt2UUVnYlN4NWtGdFkKmWHU5ttZoQ3NZu/zkX5VxfC2sMpSOyod\neb7LRhFqPfo5N1XphJcCqr5QUoZOfnH0xFhZ2lxWUS3ItiRpU4VDwg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxRC83b3dtSVpXcGovNnVs\nTzFka2J2MEFhYkF1ajVrdjMrNUtPWGRObjM4Cm5zSUR5OGw0T0FaL3BaWmR6L29W\nU2syMFIyMUhFRUZpWFpCT28vWko2ZU0KLS0tIFpHK3BjU1V1L0FrMGtwTGFuU3Mz\nRkV5VjI2Vndod202bUR3RWQwNXpmVzQKNk8/y7M62wTIIKqY4r3ZRk5aUCRUfine\n1LUSHMKa2bRe+hR7nS7AF4BGXp03h2UPY0FP5+U5q8XuIj1jfMX8kg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-09T15:10:41Z",
|
||||
"mac": "ENC[AES256_GCM,data:pab0G2GPjgs59sbiZ8XIV5SdRtq5NPU0yq18FcqiMV8noAL94fyVAY7fb+9HILQWQsEjcykgk9mA2MQ0KpK/XG8+tDQKcBH+F+2aQnw5GJevXmfi7KLTU0P224SNo7EnKlfFruB/+NZ0WBtkbbg1OzekrbplchpSI6BxWz/jASE=,iv:TCj9FCxgfMF2+PJejr67zgGnF+CFS+YeJiejnHbf7j0=,tag:s7r9SqxeqpAkncohYvIQ2Q==,type:str]",
|
||||
"lastmodified": "2025-05-04T12:44:16Z",
|
||||
"mac": "ENC[AES256_GCM,data:yTkQeFvKrN1+5FP+yInsaRWSAG+ZGG0uWF3+gVRvzJTFxab8kT2XkAMc+4D7SKgcjsmwBBb77GNoAKaKByhZ92UaCfZ2X66i7ZmYUwLM1NVVmm+xiwwjsh7PJXlZO/70anTzd1evtlZse0jEmRnV5Y0F0M6YqXmuwU+qGUJU2F8=,iv:sy6ozhXonWVruaQfa7pdEoV5GkNZR/UbbINKAPbgWeg=,tag:VMruQ1KExmlMR7TsGNgMlg==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:bxM9aYMK,iv:SMNYtk9FSyZ1PIfEzayTKKdCnZWdhcyUEiTwFUNb988=,tag:qJYW4+VQyhF1tGPQPTKlOQ==,type:str]",
|
||||
"data": "ENC[AES256_GCM,data:T8edCvw=,iv:7/G5xt5fv38I9uFzk7WMIr9xQdz/6lFxqOC+18HBg8Q=,tag:F39Cxbgmzml+lZLsZ59Kmg==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1hd2exjq88h7538y6mvjvexx3u5gp6a03yfn5nj32h2667yyksyaqcuk5qs",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAvZDZYYXdpcXVqRFRnQ2Jx\nTFhFWEJTR290cHZhTXZadFFvcHM4MHVIN3lFCmJhOEZrL3g4TFBZVllxdDFZakJn\nR3NxdXo0eE8vTDh3QlhWOFpVZ0lNUHcKLS0tIEE4dkpCalNzaXJ0Qks3VHJSUzZF\nb2N3NGdjNHJnSUN6bW8welZ1VDdJakEKGKZ7nn1p11IyJB6DMxu2HJMvZ+0+5WpE\nPLWh2NlGJO3XrrL4Fw7xetwbqE+QUZPNl/JbEbu4KLIUGLjqk9JDhQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"recipient": "age12yt078p9ewxy2sh0a36nxdpgglv8wqqftmj4dkj9rgy5fuyn4p0q5nje9m",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPNUhiYkZWK3dPMHNiRTVM\nRHNvaHFsOFp1c0UxQitwVG0zY01MNDZRV1E4CjEybENoTVIzN29vQ3FtUTRSYmFU\nNXIzQllVSllXRGN2M1B6WXJLdHZSajgKLS0tIDllZ0ZmZUcxMHhDQUpUOEdWbmkv\neUQweHArYTdFSmNteVpuQ3BKdnh0Y0UKs8Hm3D+rXRRfpUVSZM3zYjs6b9z8g10D\nGTkvreUMim4CS22pjdQ3eNA9TGeDXfWXE7XzwXLCb+wVcf7KwbDmvg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHckJCQVFyb21aT1R0d2Rr\nMWxNMHVqcGxabHBmS0RibW9sN0gyZDI1b1dFCnRWUk5LSWdxV3c4RWVZdUtEN1Fv\nRk4xVmwwT2xrdWVERkJXUVVlVXJjTVUKLS0tIC9ERG9KMGxTNEsrbzFHUGRiVUlm\nRi9qakxoc1FOVVV1TkUrckwxRUVnajQKE8ms/np2NMswden3xkjdC8cXccASLOoN\nu+EaEk69UvBvnOg9VBjyPAraIKgNrTc4WWwz+DOBj1pCwVbu9XxUlA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKSDhpT3cvck9PenZYVEZH\ndFQreVRBdG93L1dBUGlvYjFWcDlHWUJsZUVBCm9DMTJ4UytiYzlEVHNWdUcwS1ds\nT0dhbzAzNDdmbDBCU0dvL2xNeHpXcGsKLS0tIFArbmpsbzU3WnpJdUt1MGN0L1d0\nV1JkTDJYWUxsbmhTQVNOeVRaSUhTODQKk9Vph2eldS5nwuvVX0SCsxEm4B+sO76Z\ndIjJ3OQxzoZmXMaOOuKHC5U0Y75Qn7eXC43w5KHsl2CMIUYsBGJOZw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-09T15:10:30Z",
|
||||
"mac": "ENC[AES256_GCM,data:cIwWctUbAFI8TRMxYWy5xqlKDVLMqBIxVv4LInnLqi3AauL0rJ3Z7AxK/wb2dCQM07E1N7YaORNqgUpFC1xo0hObAA8mrPaToPotKDkjua0zuyTUNS1COoraYjZpI/LKwmik/qtk399LMhiC7aHs+IliT9Dd41B8LSMBXwdMldY=,iv:sZ+//BrYH5Ay2JJAGs7K+WfO2ASK82syDlilQjGmgFs=,tag:nY+Af9eQRLwkiHZe85dQ9A==,type:str]",
|
||||
"lastmodified": "2025-05-04T12:44:14Z",
|
||||
"mac": "ENC[AES256_GCM,data:6fKrS1eLLUWlHkQpxLFXBRk6f2wa5ADLMViVvYXXGU24ayl9UuNSKrCRHp9cbzhqhti3HdwyNt6TM+2X6qhiiAQanKEB2PF7JRYX74NfNKil9BEDjt5AqqtpSgVv5l7Ku/uSHaPkd2sDmzHsy5Q4bSGxJQokStk1kidrwle+mbc=,iv:I/Aad82L/TCxStM8d8IZICUrwdjRbGx2fuGWqexr21o=,tag:BfgRbGUxhPZzK2fLik1kxA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
13898
|
||||
18650
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:ImlGIKxE,iv:UUWxjLNRKJCD2WHNpw8lfvCc8rnXPCqc2pni1ODckjE=,tag:HFCqiv31E9bShIIaAEjF0A==,type:str]",
|
||||
"data": "ENC[AES256_GCM,data:vp0yW0Gt,iv:FO2cy+UpEl5aRay/LUGu//c82QiVxuKuGSaVh0rGJvc=,tag:vf2RAOPpcRW0HwxHoGy17A==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age19urkt89q45a2wk6a4yaramzufjtnw6nq2snls0v7hmf7tqf73axsfx50tk",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBpTEROZjh6NjBhSlJSc1Av\nSHhjdkhwVUd3VzBZemhQb3dhMlJXalBmZlFjCkZPYkhZZGVOVTNjUWdFU0s4cWFn\nL2NXbkRCdUlMdElnK2lGbG5iV0w1cHMKLS0tIFREcmxDdHlUNVBFVGRVZSt0c0E5\nbnpHaW1Vb3R3ZFFnZVMxY3djSjJmOU0KIwqCSQf5S9oA59BXu7yC/V6yqvCh88pa\nYgmNyBjulytPh1aAfOuNWIGdIxBpcEf+gFjz3EiJY9Kft3fTmhp2bw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"recipient": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjaFVNMEd2YUxpSm5XVVRi\nY2ZUc3NTOStJUFNMWWVPQTgxZ2tCK1QrMW1ZCjYwMlA4dkIzSlc0TGtvZjcyK3Bi\nM3pob2JOOFUyeVJ6M2JpaTRCZlc1R0kKLS0tIDJMb1dFcVRWckhwYWNCQng0RlFO\nTkw3OGt4dkFIZVY5aVEzZE5mMzJSM0EKUv8bUqg48L2FfYVUVlpXvyZvPye699of\nG6PcjLh1ZMbNCfnsCzr+P8Vdk/F4J/ifxL66lRGfu2xOLxwciwQ+5Q==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArN3R4TThibjdYbE9TMDE1\naUhuNDlscExjaktIR2VmTk1OMWtVM0NpTUJZClJUNEcwVDlibExWQk84TTNEWFhp\nMjYyZStHc1N0ZTh1S3VTVk45WGxlWWMKLS0tIHFab25LY1R1d1l6NE5XbHJvQ3lj\nNGsxUldFVHQ5RVJERDlGbi9NY29hNWsKENBTcAS/R/dTGRYdaWv5Mc/YG4bkah5w\nb421ZMQF+r4CYnzUqnwivTG8TMRMqJLavfkutE6ZUfJbbLufrTk5Lw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnZ2dDbVhoQngxM3lTSmZF\nUTAwS1lCTGhEMU1GVXpFUzlIUFdqZy9LajF3Ck9mdVpBRjlyVUNhZXZIUFZjUzF1\nNlhFN28vNmwzcUVkNmlzUnpkWjJuZE0KLS0tIHpXVHVlNk9vU1ZPTGRrYStWbmRO\nbDM4U2o1SlEwYWtqOXBqd3BFUTAvMHcKkI8UVd0v+x+ELZ5CoGq9DzlA6DnVNU2r\nrV9wLfbFd7RHxS0/TYZh5tmU42nO3iMYA9FqERQXCtZgXS9KvfqHwQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-09T15:11:04Z",
|
||||
"mac": "ENC[AES256_GCM,data:JdJzocQZWVprOmZ4Ni04k1tpD1TpFcK5neKy3+0/c3+uPBwjwaMayISKRaa/ILUXlalg60oTqxB4fUFoYVm8KGQVhDwPhO/T1hyYVQqidonrcYfJfCYg00mVSREV/AWqXb7RTnaEBfrdnRJvaAQF9g2qDXGVgzp3eACdlItclv4=,iv:nOw1jQjIWHWwU3SiKpuQgMKXyu8MZYI+zI9UYYd9fCI=,tag:ewUkemIPm/5PkmuUD0EcAQ==,type:str]",
|
||||
"lastmodified": "2025-05-04T12:44:18Z",
|
||||
"mac": "ENC[AES256_GCM,data:1ZZ+ZI1JsHmxTov1bRijfol3kTkyheg2o3ivLsMHRhCmScsUry97hQJchF78+y2Izt7avaQEHYn6pVbYt/0rLrSYD7Ru7ITVxXoYHOiN5Qb98masUzpibZjrdyg5nO+LW5/Hmmwsc3yn/+o3IH1AUYpsxlJRdnHHCmoSOFaiFFM=,iv:OQlgmpOTw4ljujNzqwQ5/0Mz8pQpCSUtqRvj3FJAxDs=,tag:foZvdeW7gK9ZVKkWqnlxGA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
30661
|
||||
6745
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../../sops/machines/peer1
|
||||
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:prFl0EJy8bM=,iv:zITWxf+6Ebk0iB5vhhd7SBQa1HFrIJXm8xpSM+D9I0M=,tag:NZCRMCs1SzNKLBu/KUDKMQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0S0RZRWxaZVZvTUhjdWVL\naU9WZmtEcm1qa2JsRmdvdmZmNENMaWFEVUFRCmdoVnRXSGlpRlFjNmVVbDJ5VnFT\nMnVJUlVnM3lxNmZCRTdoRVJ4NW1oYWcKLS0tIFFNbXBFUk1RWnlUTW1SeG1vYzlM\nVVpEclFVOE9PWWQxVkZ0eEgwWndoRWcKDAOHe+FIxqGsc6LhxMy164qjwG6t2Ei2\nP0FSs+bcKMDpudxeuxCjnDm/VoLxOWeuqkB+9K2vSm2W/c/fHTSbrA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB2VU5jOEpwYUtDVEVFcVpU\nQkExTVZ3ejZHcGo5TG8zdUQwNktoV09WdUZvCmQ0dE1TOWRFbTlxdVd4WWRxd3VF\nQUNTTkNNT3NKYjQ5dEJDY0xVZ3pZVUUKLS0tIDFjajRZNFJZUTdNeS8yN05FMFZU\ncEtjRjhRbGE0MnRLdk10NkFLMkxqencKGzJ66dHluIghH04RV/FccfEQP07yqnfb\n25Hi0XIVJfXBwje4UEyszrWTxPPwVXdQDQmoNKf76Qy2jYqJ56uksw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-05-04T12:44:20Z",
|
||||
"mac": "ENC[AES256_GCM,data:FIkilsni5kOdNlVwDuLsQ/zExypHRWdqIBQDNWMLTwe8OrsNPkX+KYutUvt9GaSoGv4iDULaMRoizO/OZUNfc2d8XYSdj0cxOG1Joov4GPUcC/UGyNuQneAejZBKolvlnidKZArofnuK9g+lOTANEUtEXUTnx8L+VahqPZayQas=,iv:NAo6sT3L8OOB3wv1pjr3RY2FwXgVmZ4N0F4BEX4YPUY=,tag:zHwmXygyvkdpASZCodQT9Q==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
This is a dummy script
|
||||
@@ -0,0 +1 @@
|
||||
not-a-secret
|
||||
@@ -21,6 +21,7 @@ in
|
||||
pkgs,
|
||||
lib,
|
||||
self',
|
||||
system,
|
||||
...
|
||||
}:
|
||||
{
|
||||
@@ -83,7 +84,10 @@ in
|
||||
schema =
|
||||
(self.clanLib.inventory.evalClanService {
|
||||
modules = [ m ];
|
||||
key = "checks";
|
||||
prefix = [
|
||||
"checks"
|
||||
system
|
||||
];
|
||||
}).config.result.api.schema;
|
||||
in
|
||||
schema
|
||||
|
||||
@@ -4,7 +4,15 @@
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/hello-word";
|
||||
|
||||
roles.peer = { };
|
||||
roles.peer = {
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.foo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perMachine =
|
||||
{ machine, ... }:
|
||||
|
||||
@@ -50,6 +50,7 @@ in
|
||||
hello-service = import ./tests/vm/default.nix {
|
||||
inherit module;
|
||||
inherit self inputs pkgs;
|
||||
# clanLib is exposed from inputs.clan-core
|
||||
clanLib = self.clanLib;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -19,7 +19,7 @@ We might not be sure whether all of those will exist but the architecture should
|
||||
## Decision
|
||||
|
||||
This leads to the conclusion that we should do `library` centric development.
|
||||
With the current `clan` python code beeing a library that can be imported to create various tools ontop of it.
|
||||
With the current `clan` python code being a library that can be imported to create various tools ontop of it.
|
||||
All **CLI** or **UI** related parts should be moved out of the main library.
|
||||
|
||||
*Note: The next person who wants implement any new frontend should do this first. Currently it looks like the TUI is the next one.*
|
||||
|
||||
47
decisions/03-adr-numbering-process.md
Normal file
47
decisions/03-adr-numbering-process.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# ADR Numbering process
|
||||
|
||||
## Status
|
||||
|
||||
Proposed after some conversation between @lassulus, @Mic92, & @lopter.
|
||||
|
||||
## Context
|
||||
|
||||
It can be useful to refer to ADRs by their numbers, rather than their full title. To that end, short and sequential numbers are useful.
|
||||
|
||||
The issue is that an ADR number is effectively assigned when the ADR is merged, before being merged its number is provisional. Because multiple ADRs can be written at the same time, you end-up with multiple provisional ADRs with the same number, for example this is the third ADR-3:
|
||||
|
||||
1. ADR-3-clan-compat: see [#3212];
|
||||
2. ADR-3-fetching-nix-from-python: see [#3452];
|
||||
3. ADR-3-numbering-process: this ADR.
|
||||
|
||||
This situation makes it impossible to refer to an ADR by its number, and why I (@lopter) went with the arbitrary number 7 in [#3196].
|
||||
|
||||
We could solve this problem by using the PR number as the ADR number (@lassulus). The issue is that PR numbers are getting big in clan-core which does not make them easy to remember, or use in conversation and code (@lopter).
|
||||
|
||||
Another approach would be to move the ADRs in a different repository, this would reset the counter back to 1, and make it straightforward to keep ADR and PR numbers in sync (@lopter). The issue then is that ADR are not in context with their changes which makes them more difficult to review (@Mic92).
|
||||
|
||||
## Decision
|
||||
|
||||
A third approach would be to:
|
||||
|
||||
1. Commit ADRs before they are approved, so that the next ADR number gets assigned;
|
||||
1. Open a PR for the proposed ADR;
|
||||
1. Update the ADR file committed in step 1, so that its markdown contents point to the PR that tracks it.
|
||||
|
||||
## Consequences
|
||||
|
||||
### ADR have unique and memorable numbers trough their entire life cycle
|
||||
|
||||
This makes it easier to refer to them in conversation or in code.
|
||||
|
||||
### You need to have commit access to get an ADR number assigned
|
||||
|
||||
This makes it more difficult for someone external to the project to contribute an ADR.
|
||||
|
||||
### Creating a new ADR requires multiple commits
|
||||
|
||||
Maybe a script or CI flow could help with that if it becomes painful.
|
||||
|
||||
[#3212]: https://git.clan.lol/clan/clan-core/pulls/3212/
|
||||
[#3452]: https://git.clan.lol/clan/clan-core/pulls/3452/
|
||||
[#3196]: https://git.clan.lol/clan/clan-core/pulls/3196/
|
||||
26
flake.lock
generated
26
flake.lock
generated
@@ -16,11 +16,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1746334246,
|
||||
"narHash": "sha256-YU4wtH9Y5yRjqbMwczOdDakOjSiTkOUP/JAYd1f3jBc=",
|
||||
"rev": "607ce65fbfe20bb38170b76826a11006f526c05d",
|
||||
"lastModified": 1746459034,
|
||||
"narHash": "sha256-VHHc8EFPu2uk8mf4ItTHwxgrQxFixNHkclPQMXZfYig=",
|
||||
"rev": "d63db1621463918966e8e0ec2eb7ddbe8aae332e",
|
||||
"type": "tarball",
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/607ce65fbfe20bb38170b76826a11006f526c05d.tar.gz"
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/d63db1621463918966e8e0ec2eb7ddbe8aae332e.tar.gz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
@@ -34,11 +34,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745812220,
|
||||
"narHash": "sha256-hotBG0EJ9VmAHJYF0yhWuTVZpENHvwcJ2SxvIPrXm+g=",
|
||||
"lastModified": 1746411114,
|
||||
"narHash": "sha256-mLlkVX1kKbAa/Ns5u26wDYw4YW4ziMFM21fhtRmfirU=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "d0c543d740fad42fe2c035b43c9d41127e073c78",
|
||||
"rev": "b5d1320ebc2f34dbea4655f95167f55e2130cdb3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -118,10 +118,10 @@
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 315532800,
|
||||
"narHash": "sha256-pxwYhAgOyComW58BCfboADZWr4b5oS8hP9E9fQ489HM=",
|
||||
"rev": "f21e4546e3ede7ae34d12a84602a22246b31f7e0",
|
||||
"narHash": "sha256-EbVl0wIdDYZWrxpQoxPlXfliaR4KHA9xP5dVjG1CZxI=",
|
||||
"rev": "ed30f8aba41605e3ab46421e3dcb4510ec560ff8",
|
||||
"type": "tarball",
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre793694.f21e4546e3ed/nixexprs.tar.xz"
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre794180.ed30f8aba416/nixexprs.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
@@ -149,11 +149,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745310711,
|
||||
"narHash": "sha256-ePyTpKEJTgX0gvgNQWd7tQYQ3glIkbqcW778RpHlqgA=",
|
||||
"lastModified": 1746485181,
|
||||
"narHash": "sha256-PxrrSFLaC7YuItShxmYbMgSuFFuwxBB+qsl9BZUnRvg=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "5e3e92b16d6fdf9923425a8d4df7496b2434f39c",
|
||||
"rev": "e93ee1d900ad264d65e9701a5c6f895683433386",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -10,6 +10,11 @@ let
|
||||
in
|
||||
{
|
||||
options = {
|
||||
_prefix = lib.mkOption {
|
||||
type = types.listOf types.str;
|
||||
internal = true;
|
||||
default = [ ];
|
||||
};
|
||||
self = lib.mkOption {
|
||||
type = types.raw;
|
||||
default = self;
|
||||
@@ -168,6 +173,7 @@ in
|
||||
inventoryFile = lib.mkOption { type = lib.types.raw; };
|
||||
# The machine 'imports' generated by the inventory per machine
|
||||
inventoryClass = lib.mkOption { type = lib.types.raw; };
|
||||
evalServiceSchema = lib.mkOption { };
|
||||
# clan-core's modules
|
||||
clanModules = lib.mkOption { type = lib.types.raw; };
|
||||
source = lib.mkOption { type = lib.types.raw; };
|
||||
|
||||
@@ -44,6 +44,8 @@ let
|
||||
buildInventory {
|
||||
inherit inventory directory;
|
||||
flakeInputs = config.self.inputs;
|
||||
prefix = config._prefix ++ [ "inventoryClass" ];
|
||||
localModuleSet = config.self.clan.modules;
|
||||
}
|
||||
);
|
||||
|
||||
@@ -204,6 +206,9 @@ in
|
||||
|
||||
inherit inventoryClass;
|
||||
|
||||
# Endpoint that can be called to get a service schema
|
||||
evalServiceSchema = clan-core.clanLib.evalServiceSchema config.self;
|
||||
|
||||
# TODO: unify this interface
|
||||
# We should have only clan.modules. (consistent with clan.templates)
|
||||
inherit (clan-core) clanModules clanLib;
|
||||
|
||||
@@ -15,10 +15,27 @@ lib.fix (clanLib: {
|
||||
*/
|
||||
callLib = file: args: import file ({ inherit lib clanLib; } // args);
|
||||
|
||||
# ------------------------------------
|
||||
buildClan = clanLib.buildClanModule.buildClanWith {
|
||||
clan-core = self;
|
||||
inherit nixpkgs nix-darwin;
|
||||
};
|
||||
evalServiceSchema =
|
||||
self:
|
||||
{
|
||||
moduleSpec,
|
||||
flakeInputs ? self.inputs,
|
||||
localModuleSet ? self.clan.modules,
|
||||
}:
|
||||
let
|
||||
resolvedModule = clanLib.inventory.resolveModule {
|
||||
inherit moduleSpec flakeInputs localModuleSet;
|
||||
};
|
||||
in
|
||||
(clanLib.inventory.evalClanService {
|
||||
modules = [ resolvedModule ];
|
||||
prefix = [ ];
|
||||
}).config.result.api.schema;
|
||||
# ------------------------------------
|
||||
# ClanLib functions
|
||||
evalClan = clanLib.callLib ./inventory/eval-clan-modules { };
|
||||
|
||||
@@ -12,25 +12,34 @@ let
|
||||
inventory,
|
||||
directory,
|
||||
flakeInputs,
|
||||
prefix ? [ ],
|
||||
localModuleSet ? { },
|
||||
}:
|
||||
(lib.evalModules {
|
||||
# TODO: remove clanLib from specialArgs
|
||||
specialArgs = {
|
||||
inherit clanLib;
|
||||
};
|
||||
modules = [
|
||||
./builder
|
||||
(lib.modules.importApply ./service-list-from-inputs.nix {
|
||||
inherit flakeInputs clanLib localModuleSet;
|
||||
})
|
||||
{ inherit directory inventory; }
|
||||
(
|
||||
# config.distributedServices.allMachines.${name} or [ ];
|
||||
{ config, ... }:
|
||||
{
|
||||
|
||||
distributedServices = clanLib.inventory.mapInstances {
|
||||
inherit (config) inventory;
|
||||
inherit flakeInputs;
|
||||
prefix = prefix ++ [ "distributedServices" ];
|
||||
};
|
||||
machines = lib.mapAttrs (_machineName: v: {
|
||||
machineImports = v;
|
||||
}) config.distributedServices.allMachines;
|
||||
|
||||
}
|
||||
)
|
||||
(lib.modules.importApply ./inventory-introspection.nix { inherit clanLib; })
|
||||
|
||||
43
lib/inventory/build-inventory/service-list-from-inputs.nix
Normal file
43
lib/inventory/build-inventory/service-list-from-inputs.nix
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
flakeInputs,
|
||||
clanLib,
|
||||
localModuleSet,
|
||||
}:
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
|
||||
inspectModule =
|
||||
inputName: moduleName: module:
|
||||
let
|
||||
eval = clanLib.inventory.evalClanService {
|
||||
modules = [ module ];
|
||||
prefix = [
|
||||
inputName
|
||||
"clan"
|
||||
"modules"
|
||||
moduleName
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
manifest = eval.config.manifest;
|
||||
roles = lib.mapAttrs (_n: _v: { }) eval.config.roles;
|
||||
};
|
||||
in
|
||||
{
|
||||
options.modulesPerSource = lib.mkOption {
|
||||
# { sourceName :: { moduleName :: {} }}
|
||||
default =
|
||||
let
|
||||
inputsWithModules = lib.filterAttrs (_inputName: v: v ? clan.modules) flakeInputs;
|
||||
|
||||
in
|
||||
lib.mapAttrs (
|
||||
inputName: v: lib.mapAttrs (inspectModule inputName) v.clan.modules
|
||||
) inputsWithModules;
|
||||
};
|
||||
options.localModules = lib.mkOption {
|
||||
default = lib.mapAttrs (inspectModule "self") localModuleSet;
|
||||
};
|
||||
}
|
||||
@@ -3,7 +3,7 @@ let
|
||||
services = clanLib.callLib ./distributed-service/inventory-adapter.nix { };
|
||||
in
|
||||
{
|
||||
inherit (services) evalClanService mapInstances;
|
||||
inherit (services) evalClanService mapInstances resolveModule;
|
||||
inherit (import ./build-inventory { inherit lib clanLib; }) buildInventory;
|
||||
interface = ./build-inventory/interface.nix;
|
||||
# Returns the list of machine names
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# This module enables itself if
|
||||
# manifest.features.API = true
|
||||
# It converts the roles.interface to a json-schema
|
||||
{ clanLib, attrName }:
|
||||
{ clanLib, prefix }:
|
||||
let
|
||||
converter = clanLib.jsonschema {
|
||||
includeDefaults = true;
|
||||
@@ -45,7 +45,7 @@ in
|
||||
|
||||
To see the evaluation problem run
|
||||
|
||||
nix eval .#clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.${attrName}.config.result.api.schema.${roleName}
|
||||
nix eval .#${lib.concatStringsSep "." prefix}.config.result.api.schema.${roleName}
|
||||
'';
|
||||
assertion = (builtins.tryEval (lib.deepSeq config.result.api.schema.${roleName} true)).success;
|
||||
};
|
||||
|
||||
@@ -16,27 +16,72 @@
|
||||
}:
|
||||
let
|
||||
evalClanService =
|
||||
{ modules, key }:
|
||||
{ modules, prefix }:
|
||||
(lib.evalModules {
|
||||
class = "clan.service";
|
||||
modules = [
|
||||
./service-module.nix
|
||||
# feature modules
|
||||
(lib.modules.importApply ./api-feature.nix {
|
||||
inherit clanLib;
|
||||
attrName = key;
|
||||
inherit clanLib prefix;
|
||||
})
|
||||
] ++ modules;
|
||||
});
|
||||
|
||||
resolveModule =
|
||||
{
|
||||
moduleSpec,
|
||||
flakeInputs,
|
||||
localModuleSet,
|
||||
}:
|
||||
let
|
||||
# TODO:
|
||||
resolvedModuleSet =
|
||||
# If the module.name is self then take the modules defined in the flake
|
||||
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
|
||||
if moduleSpec.input == null then
|
||||
localModuleSet
|
||||
else
|
||||
let
|
||||
input =
|
||||
flakeInputs.${moduleSpec.input} or (throw ''
|
||||
Flake doesn't provide input with name '${moduleSpec.input}'
|
||||
|
||||
Choose one of the following inputs:
|
||||
- ${
|
||||
builtins.concatStringsSep "\n- " (
|
||||
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flakeInputs)
|
||||
)
|
||||
}
|
||||
|
||||
To import a local module from 'inventory.modules' remove the 'input' attribute from the module definition
|
||||
Remove the following line from the module definition:
|
||||
|
||||
...
|
||||
- module.input = "${moduleSpec.input}"
|
||||
|
||||
'');
|
||||
clanAttrs =
|
||||
input.clan
|
||||
or (throw "It seems the flake input ${moduleSpec.input} doesn't export any clan resources");
|
||||
in
|
||||
clanAttrs.modules;
|
||||
|
||||
resolvedModule =
|
||||
resolvedModuleSet.${moduleSpec.name}
|
||||
or (throw "flake doesn't provide clan-module with name ${moduleSpec.name}");
|
||||
in
|
||||
resolvedModule;
|
||||
in
|
||||
{
|
||||
inherit evalClanService;
|
||||
inherit evalClanService resolveModule;
|
||||
mapInstances =
|
||||
{
|
||||
# This is used to resolve the module imports from 'flake.inputs'
|
||||
flakeInputs,
|
||||
# The clan inventory
|
||||
inventory,
|
||||
prefix ? [ ],
|
||||
}:
|
||||
let
|
||||
# machineHasTag = machineName: tagName: lib.elem tagName inventory.machines.${machineName}.tags;
|
||||
@@ -45,42 +90,11 @@ in
|
||||
importedModuleWithInstances = lib.mapAttrs (
|
||||
instanceName: instance:
|
||||
let
|
||||
# TODO:
|
||||
resolvedModuleSet =
|
||||
# If the module.name is self then take the modules defined in the flake
|
||||
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
|
||||
if instance.module.input == null then
|
||||
inventory.modules
|
||||
else
|
||||
let
|
||||
input =
|
||||
flakeInputs.${instance.module.input} or (throw ''
|
||||
Flake doesn't provide input with name '${instance.module.input}'
|
||||
|
||||
Choose one of the following inputs:
|
||||
- ${
|
||||
builtins.concatStringsSep "\n- " (
|
||||
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flakeInputs)
|
||||
)
|
||||
}
|
||||
|
||||
To import a local module from 'inventory.modules' remove the 'input' attribute from the module definition
|
||||
Remove the following line from the module definition:
|
||||
|
||||
...
|
||||
- module.input = "${instance.module.input}"
|
||||
|
||||
|
||||
'');
|
||||
clanAttrs =
|
||||
input.clan
|
||||
or (throw "It seems the flake input ${instance.module.input} doesn't export any clan resources");
|
||||
in
|
||||
clanAttrs.modules;
|
||||
|
||||
resolvedModule =
|
||||
resolvedModuleSet.${instance.module.name}
|
||||
or (throw "flake doesn't provide clan-module with name ${instance.module.name}");
|
||||
resolvedModule = resolveModule {
|
||||
moduleSpec = instance.module;
|
||||
localModuleSet = inventory.modules;
|
||||
inherit flakeInputs;
|
||||
};
|
||||
|
||||
# Every instance includes machines via roles
|
||||
# :: { client :: ... }
|
||||
@@ -138,7 +152,7 @@ in
|
||||
importedModulesEvaluated = lib.mapAttrs (
|
||||
module_ident: instances:
|
||||
evalClanService {
|
||||
key = module_ident;
|
||||
prefix = prefix ++ [ module_ident ];
|
||||
modules =
|
||||
[
|
||||
# Import the resolved module.
|
||||
|
||||
@@ -255,7 +255,9 @@ in
|
||||
{
|
||||
options.API = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
# This is read only, because we don't support turning it off yet
|
||||
readOnly = true;
|
||||
default = true;
|
||||
description = ''
|
||||
Enables automatic API schema conversion for the interface of this module.
|
||||
'';
|
||||
|
||||
@@ -92,7 +92,7 @@ in
|
||||
lib.lazyDerivation {
|
||||
# lazyDerivation improves performance when only passthru items and/or meta are used.
|
||||
derivation = hostPkgs.stdenv.mkDerivation {
|
||||
name = "vm-test-run-${config.name}";
|
||||
name = "container-test-run-${config.name}";
|
||||
|
||||
requiredSystemFeatures = [ "uid-range" ];
|
||||
|
||||
|
||||
@@ -187,6 +187,22 @@ class Machine:
|
||||
if line_pattern.match(line)
|
||||
)
|
||||
|
||||
def nsenter_command(self, command: str) -> list[str]:
|
||||
return [
|
||||
"nsenter",
|
||||
"--target",
|
||||
str(self.container_pid),
|
||||
"--mount",
|
||||
"--uts",
|
||||
"--ipc",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--cgroup",
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
command,
|
||||
]
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
@@ -231,20 +247,7 @@ class Machine:
|
||||
command = f"set -eo pipefail; source /etc/profile; set -u; {command}"
|
||||
|
||||
proc = subprocess.run(
|
||||
[
|
||||
"nsenter",
|
||||
"--target",
|
||||
str(self.container_pid),
|
||||
"--mount",
|
||||
"--uts",
|
||||
"--ipc",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--cgroup",
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
command,
|
||||
],
|
||||
self.nsenter_command(command),
|
||||
timeout=timeout,
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
@@ -465,6 +468,9 @@ class Driver:
|
||||
print(f"Starting {machine.name}")
|
||||
machine.start()
|
||||
|
||||
for machine in self.machines:
|
||||
print(" ".join(machine.nsenter_command("bash")))
|
||||
|
||||
def test_symbols(self) -> dict[str, Any]:
|
||||
general_symbols = {
|
||||
"start_all": self.start_all,
|
||||
|
||||
@@ -22,6 +22,9 @@ in
|
||||
pkgs,
|
||||
self,
|
||||
useContainers ? true,
|
||||
# Displayed for better error messages, otherwise the placeholder
|
||||
system ? "<system>",
|
||||
attrName ? "<check_name>",
|
||||
...
|
||||
}:
|
||||
let
|
||||
@@ -35,7 +38,7 @@ in
|
||||
{
|
||||
imports = [
|
||||
nixosTest
|
||||
] ++ lib.optionals (useContainers) [ ./container-test-driver/driver-module.nix ];
|
||||
] ++ lib.optionals useContainers [ ./container-test-driver/driver-module.nix ];
|
||||
options = {
|
||||
clanSettings = mkOption {
|
||||
default = { };
|
||||
@@ -60,6 +63,15 @@ in
|
||||
};
|
||||
modules = [
|
||||
clanLib.buildClanModule.flakePartsModule
|
||||
{
|
||||
_prefix = [
|
||||
"checks"
|
||||
system
|
||||
attrName
|
||||
"config"
|
||||
"clan"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
@@ -39,9 +39,35 @@ in
|
||||
type = submodule { imports = [ ./interface.nix ]; };
|
||||
};
|
||||
|
||||
config.system.clan.deployment.data = {
|
||||
vars = config.clan.core.vars._serialized;
|
||||
inherit (config.clan.core.networking) targetHost buildHost;
|
||||
inherit (config.clan.core.deployment) requireExplicitUpdate;
|
||||
config = {
|
||||
# check all that all non-secret files have no owner/group/mode set
|
||||
warnings = lib.foldl' (
|
||||
warnings: generator:
|
||||
warnings
|
||||
++ lib.foldl' (
|
||||
warnings: file:
|
||||
warnings
|
||||
++
|
||||
lib.optional
|
||||
(
|
||||
!file.secret
|
||||
&& (
|
||||
file.owner != "root"
|
||||
|| file.group != (if _class == "darwin" then "wheel" else "root")
|
||||
|| file.mode != "0400"
|
||||
)
|
||||
)
|
||||
''
|
||||
The config.clan.core.vars.generators.${generator.name}.files.${file.name} is not secret, but has non-default owner/group/mode set.
|
||||
This doesn't work because the file will be added to the nix store
|
||||
''
|
||||
) [ ] (lib.attrValues generator.files)
|
||||
) [ ] (lib.attrValues config.clan.core.vars.generators);
|
||||
|
||||
system.clan.deployment.data = {
|
||||
vars = config.clan.core.vars._serialized;
|
||||
inherit (config.clan.core.networking) targetHost buildHost;
|
||||
inherit (config.clan.core.deployment) requireExplicitUpdate;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ in
|
||||
internal = true;
|
||||
description = ''
|
||||
JSON serialization of the generators.
|
||||
This is read from the python client to generate the specified ressources.
|
||||
This is read from the python client to generate the specified resources.
|
||||
'';
|
||||
default = {
|
||||
# TODO: We don't support per-machine choice of backends
|
||||
@@ -258,7 +258,7 @@ in
|
||||
defaultText = ''
|
||||
builtins.path {
|
||||
name = "$${generator.config._module.args.name}_$${file.config._module.args.name}";
|
||||
path = file.config.inRepoPath;
|
||||
path = file.config.flakePath;
|
||||
}
|
||||
'';
|
||||
default = builtins.path {
|
||||
|
||||
@@ -29,6 +29,8 @@ mkShell {
|
||||
export GIT_ROOT=$(git rev-parse --show-toplevel)
|
||||
export PKG_ROOT=$GIT_ROOT/pkgs/clan-app
|
||||
|
||||
export CLAN_CORE_PATH="$GIT_ROOT"
|
||||
|
||||
# Add current package to PYTHONPATH
|
||||
export PYTHONPATH="$PKG_ROOT''${PYTHONPATH:+:$PYTHONPATH:}"
|
||||
|
||||
|
||||
@@ -6,13 +6,13 @@ from pathlib import Path
|
||||
from types import ModuleType
|
||||
|
||||
# These imports are unused, but necessary for @API.register to run once.
|
||||
from clan_lib.api import admin, directory, disk, iwd, mdns_discovery, modules
|
||||
from clan_lib.api import directory, disk, iwd, mdns_discovery, modules
|
||||
|
||||
from .arg_actions import AppendOptionAction
|
||||
from .clan import show, update
|
||||
|
||||
# API endpoints that are not used in the cli.
|
||||
__all__ = ["admin", "directory", "disk", "iwd", "mdns_discovery", "modules", "update"]
|
||||
__all__ = ["directory", "disk", "iwd", "mdns_discovery", "modules", "update"]
|
||||
|
||||
from . import (
|
||||
backups,
|
||||
|
||||
@@ -19,21 +19,23 @@ def create_backup(machine: Machine, provider: str | None = None) -> None:
|
||||
if not backup_scripts["providers"]:
|
||||
msg = "No providers specified"
|
||||
raise ClanError(msg)
|
||||
for provider in backup_scripts["providers"]:
|
||||
proc = machine.target_host.run(
|
||||
[backup_scripts["providers"][provider]["create"]],
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
msg = "failed to start backup"
|
||||
raise ClanError(msg)
|
||||
print("successfully started backup")
|
||||
with machine.target_host() as host:
|
||||
for provider in backup_scripts["providers"]:
|
||||
proc = host.run(
|
||||
[backup_scripts["providers"][provider]["create"]],
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
msg = "failed to start backup"
|
||||
raise ClanError(msg)
|
||||
print("successfully started backup")
|
||||
else:
|
||||
if provider not in backup_scripts["providers"]:
|
||||
msg = f"provider {provider} not found"
|
||||
raise ClanError(msg)
|
||||
proc = machine.target_host.run(
|
||||
[backup_scripts["providers"][provider]["create"]],
|
||||
)
|
||||
with machine.target_host() as host:
|
||||
proc = host.run(
|
||||
[backup_scripts["providers"][provider]["create"]],
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
msg = "failed to start backup"
|
||||
raise ClanError(msg)
|
||||
|
||||
@@ -10,6 +10,7 @@ from clan_cli.completions import (
|
||||
)
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -18,11 +19,11 @@ class Backup:
|
||||
job_name: str | None = None
|
||||
|
||||
|
||||
def list_provider(machine: Machine, provider: str) -> list[Backup]:
|
||||
def list_provider(machine: Machine, host: Host, provider: str) -> list[Backup]:
|
||||
results = []
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
list_command = backup_metadata["providers"][provider]["list"]
|
||||
proc = machine.target_host.run(
|
||||
proc = host.run(
|
||||
[list_command],
|
||||
RunOpts(log=Log.NONE, check=False),
|
||||
)
|
||||
@@ -48,12 +49,13 @@ def list_provider(machine: Machine, provider: str) -> list[Backup]:
|
||||
def list_backups(machine: Machine, provider: str | None = None) -> list[Backup]:
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
results = []
|
||||
if provider is None:
|
||||
for _provider in backup_metadata["providers"]:
|
||||
results += list_provider(machine, _provider)
|
||||
with machine.target_host() as host:
|
||||
if provider is None:
|
||||
for _provider in backup_metadata["providers"]:
|
||||
results += list_provider(machine, host, _provider)
|
||||
|
||||
else:
|
||||
results += list_provider(machine, provider)
|
||||
else:
|
||||
results += list_provider(machine, host, provider)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@@ -8,9 +8,12 @@ from clan_cli.completions import (
|
||||
)
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
|
||||
def restore_service(machine: Machine, name: str, provider: str, service: str) -> None:
|
||||
def restore_service(
|
||||
machine: Machine, host: Host, name: str, provider: str, service: str
|
||||
) -> None:
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
backup_folders = machine.eval_nix("config.clan.core.state")
|
||||
|
||||
@@ -25,7 +28,7 @@ def restore_service(machine: Machine, name: str, provider: str, service: str) ->
|
||||
env["FOLDERS"] = ":".join(set(folders))
|
||||
|
||||
if pre_restore := backup_folders[service]["preRestoreCommand"]:
|
||||
proc = machine.target_host.run(
|
||||
proc = host.run(
|
||||
[pre_restore],
|
||||
RunOpts(log=Log.STDERR),
|
||||
extra_env=env,
|
||||
@@ -34,7 +37,7 @@ def restore_service(machine: Machine, name: str, provider: str, service: str) ->
|
||||
msg = f"failed to run preRestoreCommand: {pre_restore}, error was: {proc.stdout}"
|
||||
raise ClanError(msg)
|
||||
|
||||
proc = machine.target_host.run(
|
||||
proc = host.run(
|
||||
[backup_metadata["providers"][provider]["restore"]],
|
||||
RunOpts(log=Log.STDERR),
|
||||
extra_env=env,
|
||||
@@ -44,7 +47,7 @@ def restore_service(machine: Machine, name: str, provider: str, service: str) ->
|
||||
raise ClanError(msg)
|
||||
|
||||
if post_restore := backup_folders[service]["postRestoreCommand"]:
|
||||
proc = machine.target_host.run(
|
||||
proc = host.run(
|
||||
[post_restore],
|
||||
RunOpts(log=Log.STDERR),
|
||||
extra_env=env,
|
||||
@@ -61,18 +64,19 @@ def restore_backup(
|
||||
service: str | None = None,
|
||||
) -> None:
|
||||
errors = []
|
||||
if service is None:
|
||||
backup_folders = machine.eval_nix("config.clan.core.state")
|
||||
for _service in backup_folders:
|
||||
with machine.target_host() as host:
|
||||
if service is None:
|
||||
backup_folders = machine.eval_nix("config.clan.core.state")
|
||||
for _service in backup_folders:
|
||||
try:
|
||||
restore_service(machine, host, name, provider, _service)
|
||||
except ClanError as e:
|
||||
errors.append(f"{_service}: {e}")
|
||||
else:
|
||||
try:
|
||||
restore_service(machine, name, provider, _service)
|
||||
restore_service(machine, host, name, provider, service)
|
||||
except ClanError as e:
|
||||
errors.append(f"{_service}: {e}")
|
||||
else:
|
||||
try:
|
||||
restore_service(machine, name, provider, service)
|
||||
except ClanError as e:
|
||||
errors.append(f"{service}: {e}")
|
||||
errors.append(f"{service}: {e}")
|
||||
if errors:
|
||||
raise ClanError(
|
||||
"Restore failed for the following services:\n" + "\n".join(errors)
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
from clan_cli.cmd import run
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from clan_cli.cmd import Log, RunOpts, run
|
||||
from clan_cli.nix import nix_shell
|
||||
|
||||
_works: bool | None = None
|
||||
@@ -12,6 +16,11 @@ def bubblewrap_works() -> bool:
|
||||
|
||||
|
||||
def _bubblewrap_works() -> bool:
|
||||
real_bash_path = Path("bash")
|
||||
if os.environ.get("IN_NIX_SANDBOX"):
|
||||
bash_executable_path = Path(str(shutil.which("bash")))
|
||||
real_bash_path = bash_executable_path.resolve()
|
||||
|
||||
# fmt: off
|
||||
cmd = nix_shell(
|
||||
[
|
||||
@@ -30,13 +39,10 @@ def _bubblewrap_works() -> bool:
|
||||
"--gid", "1000",
|
||||
"--",
|
||||
# do nothing, just test if bash executes
|
||||
"bash", "-c", ":"
|
||||
str(real_bash_path), "-c", ":"
|
||||
],
|
||||
)
|
||||
|
||||
# fmt: on
|
||||
try:
|
||||
run(cmd)
|
||||
except Exception:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
res = run(cmd, RunOpts(log=Log.BOTH, check=False))
|
||||
return res.returncode == 0
|
||||
|
||||
@@ -107,7 +107,7 @@ def create_clan(opts: CreateOptions) -> CreateClanResponse:
|
||||
response.flake_update = flake_update
|
||||
|
||||
if opts.initial:
|
||||
init_inventory(str(opts.dest), init=opts.initial)
|
||||
init_inventory(Flake(str(opts.dest)), init=opts.initial)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@@ -2,20 +2,21 @@ from dataclasses import dataclass
|
||||
|
||||
from clan_lib.api import API
|
||||
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import Inventory, Meta, load_inventory_json, set_inventory
|
||||
|
||||
|
||||
@dataclass
|
||||
class UpdateOptions:
|
||||
directory: str
|
||||
flake: Flake
|
||||
meta: Meta
|
||||
|
||||
|
||||
@API.register
|
||||
def update_clan_meta(options: UpdateOptions) -> Inventory:
|
||||
inventory = load_inventory_json(options.directory)
|
||||
inventory = load_inventory_json(options.flake)
|
||||
inventory["meta"] = options.meta
|
||||
|
||||
set_inventory(inventory, options.directory, "Update clan metadata")
|
||||
set_inventory(inventory, options.flake, "Update clan metadata")
|
||||
|
||||
return inventory
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
import pytest
|
||||
|
||||
from clan_cli.custom_logger import setup_logging
|
||||
|
||||
# collect_ignore = ["./nixpkgs"]
|
||||
|
||||
pytest_plugins = [
|
||||
"clan_cli.tests.temporary_dir",
|
||||
"clan_cli.tests.root",
|
||||
@@ -19,13 +13,3 @@ pytest_plugins = [
|
||||
"clan_cli.tests.stdout",
|
||||
"clan_cli.tests.nix_config",
|
||||
]
|
||||
|
||||
|
||||
# Executed on pytest session start
|
||||
def pytest_sessionstart(session: pytest.Session) -> None:
|
||||
# This function will be called once at the beginning of the test session
|
||||
print("Starting pytest session")
|
||||
# You can access the session config, items, testsfailed, etc.
|
||||
print(f"Session config: {session.config}")
|
||||
|
||||
setup_logging(level="INFO")
|
||||
|
||||
@@ -2,7 +2,6 @@ import inspect
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import termios
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
@@ -75,16 +74,7 @@ class PrefixFormatter(logging.Formatter):
|
||||
if self.trace_prints:
|
||||
format_str += f"\nSource: {filepath}:%(lineno)d::%(funcName)s\n"
|
||||
|
||||
line = logging.Formatter(format_str).format(record)
|
||||
try:
|
||||
# Programs like sudo can set the terminal into raw mode.
|
||||
# This means newlines are no longer translated to include a carriage return to the end.
|
||||
# https://unix.stackexchange.com/questions/151916/why-is-this-binary-file-transferred-over-ssh-t-being-changed/151963#15196
|
||||
if not termios.tcgetattr(sys.stdout.fileno())[3] & termios.ECHO:
|
||||
line += "\r"
|
||||
except Exception: # not a tty or mocked sys.stdout
|
||||
pass
|
||||
return line
|
||||
return logging.Formatter(format_str).format(record)
|
||||
|
||||
def hostname_colorcode(self, hostname: str) -> tuple[int, int, int]:
|
||||
colorcodes = RgbColor.list_values()
|
||||
|
||||
@@ -4,9 +4,13 @@ import sys
|
||||
import urllib
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .errors import ClanError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from clan_cli.flake import Flake
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -131,12 +135,17 @@ def vm_state_dir(flake_url: str, vm_name: str) -> Path:
|
||||
return user_data_dir() / "clan" / "vmstate" / clan_key / vm_name
|
||||
|
||||
|
||||
def machines_dir(flake_dir: Path) -> Path:
|
||||
return flake_dir / "machines"
|
||||
def machines_dir(flake: "Flake") -> Path:
|
||||
if flake.is_local:
|
||||
return flake.path / "machines"
|
||||
|
||||
store_path = flake.store_path
|
||||
assert store_path is not None, "Invalid flake object. Doesn't have a store path"
|
||||
return Path(store_path) / "machines"
|
||||
|
||||
|
||||
def specific_machine_dir(flake_dir: Path, machine: str) -> Path:
|
||||
return machines_dir(flake_dir) / machine
|
||||
def specific_machine_dir(flake: "Flake", machine: str) -> Path:
|
||||
return machines_dir(flake) / machine
|
||||
|
||||
|
||||
def module_root() -> Path:
|
||||
|
||||
@@ -4,6 +4,7 @@ from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
|
||||
import clan_cli.machines.machines as machines
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
|
||||
class SecretStoreBase(ABC):
|
||||
@@ -25,7 +26,7 @@ class SecretStoreBase(ABC):
|
||||
def exists(self, service: str, name: str) -> bool:
|
||||
pass
|
||||
|
||||
def needs_upload(self) -> bool:
|
||||
def needs_upload(self, host: Host) -> bool:
|
||||
return True
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import override
|
||||
from clan_cli.cmd import Log, RunOpts
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_shell
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
from . import SecretStoreBase
|
||||
|
||||
@@ -93,9 +94,9 @@ class SecretStore(SecretStoreBase):
|
||||
return b"\n".join(hashes)
|
||||
|
||||
@override
|
||||
def needs_upload(self) -> bool:
|
||||
def needs_upload(self, host: Host) -> bool:
|
||||
local_hash = self.generate_hash()
|
||||
remote_hash = self.machine.target_host.run(
|
||||
remote_hash = host.run(
|
||||
# TODO get the path to the secrets from the machine
|
||||
["cat", f"{self.machine.secrets_upload_directory}/.pass_info"],
|
||||
RunOpts(log=Log.STDERR, check=False),
|
||||
|
||||
@@ -6,6 +6,7 @@ from clan_cli.secrets.folders import sops_secrets_folder
|
||||
from clan_cli.secrets.machines import add_machine, has_machine
|
||||
from clan_cli.secrets.secrets import decrypt_secret, encrypt_secret, has_secret
|
||||
from clan_cli.secrets.sops import generate_private_key
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
from . import SecretStoreBase
|
||||
|
||||
@@ -60,7 +61,7 @@ class SecretStore(SecretStoreBase):
|
||||
)
|
||||
|
||||
@override
|
||||
def needs_upload(self) -> bool:
|
||||
def needs_upload(self, host: Host) -> bool:
|
||||
return False
|
||||
|
||||
# We rely now on the vars backend to upload the age key
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import override
|
||||
|
||||
from clan_cli.dirs import vm_state_dir
|
||||
from clan_cli.machines.machines import Machine
|
||||
@@ -28,6 +29,7 @@ class SecretStore(SecretStoreBase):
|
||||
def exists(self, service: str, name: str) -> bool:
|
||||
return (self.dir / service / name).exists()
|
||||
|
||||
@override
|
||||
def upload(self, output_dir: Path) -> None:
|
||||
if output_dir.exists():
|
||||
shutil.rmtree(output_dir)
|
||||
|
||||
@@ -5,13 +5,14 @@ from tempfile import TemporaryDirectory
|
||||
|
||||
from clan_cli.completions import add_dynamic_completer, complete_machines
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
from clan_cli.ssh.upload import upload
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def upload_secrets(machine: Machine) -> None:
|
||||
if not machine.secret_facts_store.needs_upload():
|
||||
def upload_secrets(machine: Machine, host: Host) -> None:
|
||||
if not machine.secret_facts_store.needs_upload(host):
|
||||
machine.info("Secrets already uploaded")
|
||||
return
|
||||
|
||||
@@ -19,13 +20,13 @@ def upload_secrets(machine: Machine) -> None:
|
||||
local_secret_dir = Path(_tempdir).resolve()
|
||||
machine.secret_facts_store.upload(local_secret_dir)
|
||||
remote_secret_dir = Path(machine.secrets_upload_directory)
|
||||
|
||||
upload(machine.target_host, local_secret_dir, remote_secret_dir)
|
||||
upload(host, local_secret_dir, remote_secret_dir)
|
||||
|
||||
|
||||
def upload_command(args: argparse.Namespace) -> None:
|
||||
machine = Machine(name=args.machine, flake=args.flake)
|
||||
upload_secrets(machine)
|
||||
with machine.target_host() as host:
|
||||
upload_secrets(machine, host)
|
||||
|
||||
|
||||
def register_upload_parser(parser: argparse.ArgumentParser) -> None:
|
||||
|
||||
@@ -14,6 +14,7 @@ from clan_cli.nix import (
|
||||
nix_build,
|
||||
nix_command,
|
||||
nix_config,
|
||||
nix_eval,
|
||||
nix_metadata,
|
||||
nix_test_store,
|
||||
)
|
||||
@@ -619,11 +620,9 @@ class Flake:
|
||||
except Exception as e:
|
||||
log.warning(f"Failed load eval cache: {e}. Continue without cache")
|
||||
|
||||
def invalidate_cache(self) -> None:
|
||||
def prefetch(self) -> None:
|
||||
"""
|
||||
Invalidate the cache and reload it.
|
||||
|
||||
This method is used to refresh the cache by reloading it from the flake.
|
||||
Loads the flake into the store and populates self.store_path and self.hash such that the flake can evaluate locally and offline
|
||||
"""
|
||||
cmd = [
|
||||
"flake",
|
||||
@@ -642,6 +641,15 @@ class Flake:
|
||||
flake_metadata = json.loads(flake_prefetch.stdout)
|
||||
self.store_path = flake_metadata["storePath"]
|
||||
self.hash = flake_metadata["hash"]
|
||||
self.flake_metadata = flake_metadata
|
||||
|
||||
def invalidate_cache(self) -> None:
|
||||
"""
|
||||
Invalidate the cache and reload it.
|
||||
|
||||
This method is used to refresh the cache by reloading it from the flake.
|
||||
"""
|
||||
self.prefetch()
|
||||
|
||||
self._cache = FlakeCache()
|
||||
assert self.hash is not None
|
||||
@@ -651,17 +659,17 @@ class Flake:
|
||||
)
|
||||
self.load_cache()
|
||||
|
||||
if "original" not in flake_metadata:
|
||||
flake_metadata = nix_metadata(self.identifier)
|
||||
if "original" not in self.flake_metadata:
|
||||
self.flake_metadata = nix_metadata(self.identifier)
|
||||
|
||||
if flake_metadata["original"].get("url", "").startswith("file:"):
|
||||
if self.flake_metadata["original"].get("url", "").startswith("file:"):
|
||||
self._is_local = True
|
||||
path = flake_metadata["original"]["url"].removeprefix("file://")
|
||||
path = self.flake_metadata["original"]["url"].removeprefix("file://")
|
||||
path = path.removeprefix("file:")
|
||||
self._path = Path(path)
|
||||
elif flake_metadata["original"].get("path"):
|
||||
elif self.flake_metadata["original"].get("path"):
|
||||
self._is_local = True
|
||||
self._path = Path(flake_metadata["original"]["path"])
|
||||
self._path = Path(self.flake_metadata["original"]["path"])
|
||||
else:
|
||||
self._is_local = False
|
||||
assert self.store_path is not None
|
||||
@@ -755,6 +763,56 @@ class Flake:
|
||||
if self.flake_cache_path:
|
||||
self._cache.save_to_file(self.flake_cache_path)
|
||||
|
||||
def uncached_nix_eval_with_args(
|
||||
self,
|
||||
attr_path: str,
|
||||
f_args: dict[str, str],
|
||||
nix_options: list[str] | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Calls a nix function with the provided arguments 'f_args'
|
||||
The argument must be an attribute set.
|
||||
|
||||
Args:
|
||||
attr_path (str): The attribute path to the nix function
|
||||
f_args (dict[str, nix_expr]): A python dictionary mapping from the name of the argument to a raw nix expression.
|
||||
|
||||
Example
|
||||
|
||||
flake.uncached_nix_eval_with_args(
|
||||
"clanInternals.evalServiceSchema",
|
||||
{ "moduleSpec": "{ name = \"hello-world\"; input = null; }" }
|
||||
)
|
||||
> '{ ...JSONSchema... }'
|
||||
|
||||
"""
|
||||
# Always prefetch, so we don't get any stale information
|
||||
self.prefetch()
|
||||
|
||||
if nix_options is None:
|
||||
nix_options = []
|
||||
|
||||
arg_expr = "{"
|
||||
for arg_name, arg_value in f_args.items():
|
||||
arg_expr += f" {arg_name} = {arg_value}; "
|
||||
arg_expr += "}"
|
||||
|
||||
nix_code = f"""
|
||||
let
|
||||
flake = builtins.getFlake "path:{self.store_path}?narHash={self.hash}";
|
||||
in
|
||||
flake.{attr_path} {arg_expr}
|
||||
"""
|
||||
if tmp_store := nix_test_store():
|
||||
nix_options += ["--store", str(tmp_store)]
|
||||
nix_options.append("--impure")
|
||||
|
||||
output = run(
|
||||
nix_eval(["--expr", nix_code, *nix_options]), RunOpts(log=Log.NONE)
|
||||
).stdout.strip()
|
||||
|
||||
return output
|
||||
|
||||
def precache(
|
||||
self,
|
||||
selectors: list[str],
|
||||
|
||||
@@ -14,7 +14,7 @@ from clan_cli.facts.generate import generate_facts
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_shell
|
||||
from clan_cli.vars.generate import generate_vars
|
||||
from clan_cli.vars.upload import upload_secret_vars
|
||||
from clan_cli.vars.upload import populate_secret_vars
|
||||
|
||||
from .automount import pause_automounting
|
||||
from .list import list_possible_keymaps, list_possible_languages
|
||||
@@ -35,6 +35,7 @@ class Disk:
|
||||
device: str
|
||||
|
||||
|
||||
# TODO: unify this with machine install
|
||||
@API.register
|
||||
def flash_machine(
|
||||
machine: Machine,
|
||||
@@ -107,7 +108,7 @@ def flash_machine(
|
||||
|
||||
local_dir.mkdir(parents=True)
|
||||
machine.secret_facts_store.upload(local_dir)
|
||||
upload_secret_vars(machine, local_dir)
|
||||
populate_secret_vars(machine, local_dir)
|
||||
disko_install = []
|
||||
|
||||
if os.geteuid() != 0:
|
||||
|
||||
@@ -23,6 +23,7 @@ from clan_lib.api import API, dataclass_to_dict, from_dict
|
||||
|
||||
from clan_cli.cmd import run_no_stdout
|
||||
from clan_cli.errors import ClanCmdError, ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.git import commit_file
|
||||
from clan_cli.nix import nix_eval
|
||||
|
||||
@@ -49,11 +50,11 @@ __all__ = [
|
||||
]
|
||||
|
||||
|
||||
def get_inventory_path(flake_dir: str | Path) -> Path:
|
||||
def get_inventory_path(flake: Flake) -> Path:
|
||||
"""
|
||||
Get the path to the inventory file in the flake directory
|
||||
"""
|
||||
inventory_file = (Path(flake_dir) / "inventory.json").resolve()
|
||||
inventory_file = (flake.path / "inventory.json").resolve()
|
||||
return inventory_file
|
||||
|
||||
|
||||
@@ -61,8 +62,7 @@ def get_inventory_path(flake_dir: str | Path) -> Path:
|
||||
default_inventory: Inventory = {"meta": {"name": "New Clan"}}
|
||||
|
||||
|
||||
@API.register
|
||||
def load_inventory_eval(flake_dir: str | Path) -> Inventory:
|
||||
def load_inventory_eval(flake_dir: Flake) -> Inventory:
|
||||
"""
|
||||
Loads the evaluated inventory.
|
||||
After all merge operations with eventual nix code in buildClan.
|
||||
@@ -355,7 +355,7 @@ def determine_writeability(
|
||||
return results
|
||||
|
||||
|
||||
def get_inventory_current_priority(flake_dir: str | Path) -> dict:
|
||||
def get_inventory_current_priority(flake: Flake) -> dict:
|
||||
"""
|
||||
Returns the current priority of the inventory values
|
||||
|
||||
@@ -375,7 +375,7 @@ def get_inventory_current_priority(flake_dir: str | Path) -> dict:
|
||||
"""
|
||||
cmd = nix_eval(
|
||||
[
|
||||
f"{flake_dir}#clanInternals.inventoryClass.introspection",
|
||||
f"{flake}#clanInternals.inventoryClass.introspection",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
@@ -393,7 +393,7 @@ def get_inventory_current_priority(flake_dir: str | Path) -> dict:
|
||||
|
||||
|
||||
@API.register
|
||||
def load_inventory_json(flake_dir: str | Path) -> Inventory:
|
||||
def load_inventory_json(flake: Flake) -> Inventory:
|
||||
"""
|
||||
Load the inventory FILE from the flake directory
|
||||
If no file is found, returns an empty dictionary
|
||||
@@ -403,7 +403,7 @@ def load_inventory_json(flake_dir: str | Path) -> Inventory:
|
||||
Use load_inventory_eval instead
|
||||
"""
|
||||
|
||||
inventory_file = get_inventory_path(flake_dir)
|
||||
inventory_file = get_inventory_path(flake)
|
||||
|
||||
if not inventory_file.exists():
|
||||
return {}
|
||||
@@ -473,14 +473,14 @@ def patch(d: dict[str, Any], path: str, content: Any) -> None:
|
||||
|
||||
|
||||
@API.register
|
||||
def patch_inventory_with(base_dir: Path, section: str, content: dict[str, Any]) -> None:
|
||||
def patch_inventory_with(flake: Flake, section: str, content: dict[str, Any]) -> None:
|
||||
"""
|
||||
Pass only the section to update and the content to update with.
|
||||
Make sure you pass only attributes that you would like to persist.
|
||||
ATTENTION: Don't pass nix eval values unintentionally.
|
||||
"""
|
||||
|
||||
inventory_file = get_inventory_path(base_dir)
|
||||
inventory_file = get_inventory_path(flake)
|
||||
|
||||
curr_inventory = {}
|
||||
if inventory_file.exists():
|
||||
@@ -492,7 +492,9 @@ def patch_inventory_with(base_dir: Path, section: str, content: dict[str, Any])
|
||||
with inventory_file.open("w") as f:
|
||||
json.dump(curr_inventory, f, indent=2)
|
||||
|
||||
commit_file(inventory_file, base_dir, commit_message=f"inventory.{section}: Update")
|
||||
commit_file(
|
||||
inventory_file, flake.path, commit_message=f"inventory.{section}: Update"
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -504,16 +506,16 @@ class WriteInfo:
|
||||
|
||||
@API.register
|
||||
def get_inventory_with_writeable_keys(
|
||||
flake_dir: str | Path,
|
||||
flake: Flake,
|
||||
) -> WriteInfo:
|
||||
"""
|
||||
Load the inventory and determine the writeable keys
|
||||
Performs 2 nix evaluations to get the current priority and the inventory
|
||||
"""
|
||||
current_priority = get_inventory_current_priority(flake_dir)
|
||||
current_priority = get_inventory_current_priority(flake)
|
||||
|
||||
data_eval: Inventory = load_inventory_eval(flake_dir)
|
||||
data_disk: Inventory = load_inventory_json(flake_dir)
|
||||
data_eval: Inventory = load_inventory_eval(flake)
|
||||
data_disk: Inventory = load_inventory_json(flake)
|
||||
|
||||
writeables = determine_writeability(
|
||||
current_priority, dict(data_eval), dict(data_disk)
|
||||
@@ -522,16 +524,17 @@ def get_inventory_with_writeable_keys(
|
||||
return WriteInfo(writeables, data_eval, data_disk)
|
||||
|
||||
|
||||
# TODO: remove this function in favor of a proper read/write API
|
||||
@API.register
|
||||
def set_inventory(
|
||||
inventory: Inventory, flake_dir: str | Path, message: str, commit: bool = True
|
||||
inventory: Inventory, flake: Flake, message: str, commit: bool = True
|
||||
) -> None:
|
||||
"""
|
||||
Write the inventory to the flake directory
|
||||
and commit it to git with the given message
|
||||
"""
|
||||
|
||||
write_info = get_inventory_with_writeable_keys(flake_dir)
|
||||
write_info = get_inventory_with_writeable_keys(flake)
|
||||
|
||||
# Remove internals from the inventory
|
||||
inventory.pop("tags", None) # type: ignore
|
||||
@@ -552,43 +555,43 @@ def set_inventory(
|
||||
for delete_path in delete_set:
|
||||
delete_by_path(persisted, delete_path)
|
||||
|
||||
inventory_file = get_inventory_path(flake_dir)
|
||||
inventory_file = get_inventory_path(flake)
|
||||
with inventory_file.open("w") as f:
|
||||
json.dump(persisted, f, indent=2)
|
||||
|
||||
if commit:
|
||||
commit_file(inventory_file, Path(flake_dir), commit_message=message)
|
||||
commit_file(inventory_file, flake.path, commit_message=message)
|
||||
|
||||
|
||||
@API.register
|
||||
def delete(directory: str | Path, delete_set: set[str]) -> None:
|
||||
# TODO: wrap this in a proper persistence API
|
||||
def delete(flake: Flake, delete_set: set[str]) -> None:
|
||||
"""
|
||||
Delete keys from the inventory
|
||||
"""
|
||||
write_info = get_inventory_with_writeable_keys(directory)
|
||||
write_info = get_inventory_with_writeable_keys(flake)
|
||||
|
||||
data_disk = dict(write_info.data_disk)
|
||||
|
||||
for delete_path in delete_set:
|
||||
delete_by_path(data_disk, delete_path)
|
||||
|
||||
inventory_file = get_inventory_path(directory)
|
||||
inventory_file = get_inventory_path(flake)
|
||||
with inventory_file.open("w") as f:
|
||||
json.dump(data_disk, f, indent=2)
|
||||
|
||||
commit_file(
|
||||
inventory_file,
|
||||
Path(directory),
|
||||
flake.path,
|
||||
commit_message=f"Delete inventory keys {delete_set}",
|
||||
)
|
||||
|
||||
|
||||
def init_inventory(directory: str, init: Inventory | None = None) -> None:
|
||||
def init_inventory(flake: Flake, init: Inventory | None = None) -> None:
|
||||
inventory = None
|
||||
# Try reading the current flake
|
||||
if init is None:
|
||||
with contextlib.suppress(ClanCmdError):
|
||||
inventory = load_inventory_eval(directory)
|
||||
inventory = load_inventory_eval(flake)
|
||||
|
||||
if init is not None:
|
||||
inventory = init
|
||||
@@ -596,9 +599,9 @@ def init_inventory(directory: str, init: Inventory | None = None) -> None:
|
||||
# Write inventory.json file
|
||||
if inventory is not None:
|
||||
# Persist creates a commit message for each change
|
||||
set_inventory(inventory, directory, "Init inventory")
|
||||
set_inventory(inventory, flake, "Init inventory")
|
||||
|
||||
|
||||
@API.register
|
||||
def get_inventory(base_path: str | Path) -> Inventory:
|
||||
return load_inventory_eval(base_path)
|
||||
def get_inventory(flake: Flake) -> Inventory:
|
||||
return load_inventory_eval(flake)
|
||||
|
||||
@@ -110,7 +110,7 @@ def create_machine(opts: CreateOptions, commit: bool = True) -> None:
|
||||
new_machine["deploy"] = {"targetHost": target_host}
|
||||
|
||||
patch_inventory_with(
|
||||
clan_dir, f"machines.{machine_name}", dataclass_to_dict(new_machine)
|
||||
Flake(str(clan_dir)), f"machines.{machine_name}", dataclass_to_dict(new_machine)
|
||||
)
|
||||
|
||||
# Commit at the end in that order to avoid committing halve-baked machines
|
||||
|
||||
@@ -23,7 +23,7 @@ log = logging.getLogger(__name__)
|
||||
@API.register
|
||||
def delete_machine(flake: Flake, name: str) -> None:
|
||||
try:
|
||||
inventory.delete(str(flake.path), {f"machines.{name}"})
|
||||
inventory.delete(flake, {f"machines.{name}"})
|
||||
except KeyError as exc:
|
||||
# louis@(2025-03-09): test infrastructure does not seem to set the
|
||||
# inventory properly, but more importantly only one machine in my
|
||||
@@ -35,7 +35,7 @@ def delete_machine(flake: Flake, name: str) -> None:
|
||||
|
||||
changed_paths: list[Path] = []
|
||||
|
||||
folder = specific_machine_dir(flake.path, name)
|
||||
folder = specific_machine_dir(flake, name)
|
||||
if folder.exists():
|
||||
changed_paths.append(folder)
|
||||
shutil.rmtree(folder)
|
||||
|
||||
@@ -26,61 +26,39 @@ class HardwareConfig(Enum):
|
||||
NIXOS_GENERATE_CONFIG = "nixos-generate-config"
|
||||
NONE = "none"
|
||||
|
||||
def config_path(self, clan_dir: Path, machine_name: str) -> Path:
|
||||
machine_dir = specific_machine_dir(clan_dir, machine_name)
|
||||
def config_path(self, flake: Flake, machine_name: str) -> Path:
|
||||
machine_dir = specific_machine_dir(flake, machine_name)
|
||||
if self == HardwareConfig.NIXOS_FACTER:
|
||||
return machine_dir / "facter.json"
|
||||
return machine_dir / "hardware-configuration.nix"
|
||||
|
||||
@classmethod
|
||||
def detect_type(
|
||||
cls: type["HardwareConfig"], clan_dir: Path, machine_name: str
|
||||
cls: type["HardwareConfig"], flake: Flake, machine_name: str
|
||||
) -> "HardwareConfig":
|
||||
hardware_config = HardwareConfig.NIXOS_GENERATE_CONFIG.config_path(
|
||||
clan_dir, machine_name
|
||||
flake, machine_name
|
||||
)
|
||||
|
||||
if hardware_config.exists() and "throw" not in hardware_config.read_text():
|
||||
return HardwareConfig.NIXOS_GENERATE_CONFIG
|
||||
|
||||
if HardwareConfig.NIXOS_FACTER.config_path(clan_dir, machine_name).exists():
|
||||
if HardwareConfig.NIXOS_FACTER.config_path(flake, machine_name).exists():
|
||||
return HardwareConfig.NIXOS_FACTER
|
||||
|
||||
return HardwareConfig.NONE
|
||||
|
||||
|
||||
@API.register
|
||||
def show_machine_hardware_config(clan_dir: Path, machine_name: str) -> HardwareConfig:
|
||||
def show_machine_hardware_config(flake: Flake, machine_name: str) -> HardwareConfig:
|
||||
"""
|
||||
Show hardware information for a machine returns None if none exist.
|
||||
"""
|
||||
return HardwareConfig.detect_type(clan_dir, machine_name)
|
||||
return HardwareConfig.detect_type(flake, machine_name)
|
||||
|
||||
|
||||
@API.register
|
||||
def show_machine_deployment_target(clan_dir: Path, machine_name: str) -> str | None:
|
||||
"""
|
||||
Show deployment target for a machine returns None if none exist.
|
||||
"""
|
||||
config = nix_config()
|
||||
system = config["system"]
|
||||
cmd = nix_eval(
|
||||
[
|
||||
f"{clan_dir}#clanInternals.machines.{system}.{machine_name}",
|
||||
"--apply",
|
||||
"machine: { inherit (machine.config.clan.core.networking) targetHost; }",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
proc = run_no_stdout(cmd, RunOpts(prefix=machine_name))
|
||||
res = proc.stdout.strip()
|
||||
|
||||
target_host = json.loads(res)
|
||||
return target_host.get("targetHost", None)
|
||||
|
||||
|
||||
@API.register
|
||||
def show_machine_hardware_platform(clan_dir: Path, machine_name: str) -> str | None:
|
||||
def show_machine_hardware_platform(flake: Flake, machine_name: str) -> str | None:
|
||||
"""
|
||||
Show hardware information for a machine returns None if none exist.
|
||||
"""
|
||||
@@ -88,7 +66,7 @@ def show_machine_hardware_platform(clan_dir: Path, machine_name: str) -> str | N
|
||||
system = config["system"]
|
||||
cmd = nix_eval(
|
||||
[
|
||||
f"{clan_dir}#clanInternals.machines.{system}.{machine_name}",
|
||||
f"{flake}#clanInternals.machines.{system}.{machine_name}",
|
||||
"--apply",
|
||||
"machine: { inherit (machine.pkgs) system; }",
|
||||
"--json",
|
||||
@@ -118,15 +96,14 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
|
||||
and place the resulting *.nix file in the machine's directory.
|
||||
"""
|
||||
|
||||
machine = Machine(opts.machine, flake=opts.flake)
|
||||
machine = Machine(
|
||||
opts.machine,
|
||||
flake=opts.flake,
|
||||
private_key=Path(opts.keyfile) if opts.keyfile else None,
|
||||
override_target_host=opts.target_host,
|
||||
)
|
||||
|
||||
if opts.keyfile is not None:
|
||||
machine.private_key = Path(opts.keyfile)
|
||||
|
||||
if opts.target_host is not None:
|
||||
machine.override_target_host = opts.target_host
|
||||
|
||||
hw_file = opts.backend.config_path(opts.flake.path, opts.machine)
|
||||
hw_file = opts.backend.config_path(opts.flake, opts.machine)
|
||||
hw_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if opts.backend == HardwareConfig.NIXOS_FACTER:
|
||||
@@ -139,26 +116,26 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
|
||||
"--show-hardware-config",
|
||||
]
|
||||
|
||||
host = machine.target_host
|
||||
host.ssh_options["StrictHostKeyChecking"] = "accept-new"
|
||||
host.ssh_options["UserKnownHostsFile"] = "/dev/null"
|
||||
if opts.password:
|
||||
host.password = opts.password
|
||||
with machine.target_host() as host:
|
||||
host.ssh_options["StrictHostKeyChecking"] = "accept-new"
|
||||
host.ssh_options["UserKnownHostsFile"] = "/dev/null"
|
||||
if opts.password:
|
||||
host.password = opts.password
|
||||
|
||||
out = host.run(config_command, become_root=True, opts=RunOpts(check=False))
|
||||
if out.returncode != 0:
|
||||
if "nixos-facter" in out.stderr and "not found" in out.stderr:
|
||||
machine.error(str(out.stderr))
|
||||
msg = (
|
||||
"Please use our custom nixos install images from https://github.com/nix-community/nixos-images/releases/tag/nixos-unstable. "
|
||||
"nixos-factor only works on nixos / clan systems currently."
|
||||
)
|
||||
out = host.run(config_command, become_root=True, opts=RunOpts(check=False))
|
||||
if out.returncode != 0:
|
||||
if "nixos-facter" in out.stderr and "not found" in out.stderr:
|
||||
machine.error(str(out.stderr))
|
||||
msg = (
|
||||
"Please use our custom nixos install images from https://github.com/nix-community/nixos-images/releases/tag/nixos-unstable. "
|
||||
"nixos-factor only works on nixos / clan systems currently."
|
||||
)
|
||||
raise ClanError(msg)
|
||||
|
||||
machine.error(str(out))
|
||||
msg = f"Failed to inspect {opts.machine}. Address: {host.target}"
|
||||
raise ClanError(msg)
|
||||
|
||||
machine.error(str(out))
|
||||
msg = f"Failed to inspect {opts.machine}. Address: {host.target}"
|
||||
raise ClanError(msg)
|
||||
|
||||
backup_file = None
|
||||
if hw_file.exists():
|
||||
backup_file = hw_file.with_suffix(".bak")
|
||||
@@ -175,7 +152,7 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
|
||||
f"machines/{opts.machine}/{hw_file.name}: update hardware configuration",
|
||||
)
|
||||
try:
|
||||
show_machine_hardware_platform(opts.flake.path, opts.machine)
|
||||
show_machine_hardware_platform(opts.flake, opts.machine)
|
||||
if backup_file:
|
||||
backup_file.unlink(missing_ok=True)
|
||||
except ClanCmdError as e:
|
||||
|
||||
@@ -36,7 +36,6 @@ class BuildOn(Enum):
|
||||
@dataclass
|
||||
class InstallOptions:
|
||||
machine: Machine
|
||||
target_host: str
|
||||
kexec: str | None = None
|
||||
debug: bool = False
|
||||
no_reboot: bool = False
|
||||
@@ -52,17 +51,16 @@ class InstallOptions:
|
||||
@API.register
|
||||
def install_machine(opts: InstallOptions) -> None:
|
||||
machine = opts.machine
|
||||
machine.override_target_host = opts.target_host
|
||||
|
||||
machine.info(f"installing {machine.name}")
|
||||
|
||||
h = machine.target_host
|
||||
machine.info(f"target host: {h.target}")
|
||||
machine.debug(f"installing {machine.name}")
|
||||
|
||||
generate_facts([machine])
|
||||
generate_vars([machine])
|
||||
|
||||
with TemporaryDirectory(prefix="nixos-install-") as _base_directory:
|
||||
with (
|
||||
TemporaryDirectory(prefix="nixos-install-") as _base_directory,
|
||||
machine.target_host() as host,
|
||||
):
|
||||
base_directory = Path(_base_directory).resolve()
|
||||
activation_secrets = base_directory / "activation_secrets"
|
||||
upload_dir = activation_secrets / machine.secrets_upload_directory.lstrip("/")
|
||||
@@ -115,7 +113,7 @@ def install_machine(opts: InstallOptions) -> None:
|
||||
str(opts.update_hardware_config.value),
|
||||
str(
|
||||
opts.update_hardware_config.config_path(
|
||||
machine.flake.path, machine.name
|
||||
machine.flake, machine.name
|
||||
)
|
||||
),
|
||||
]
|
||||
@@ -134,14 +132,14 @@ def install_machine(opts: InstallOptions) -> None:
|
||||
if opts.build_on:
|
||||
cmd += ["--build-on", opts.build_on.value]
|
||||
|
||||
if h.port:
|
||||
cmd += ["--ssh-port", str(h.port)]
|
||||
if host.port:
|
||||
cmd += ["--ssh-port", str(host.port)]
|
||||
if opts.kexec:
|
||||
cmd += ["--kexec", opts.kexec]
|
||||
|
||||
if opts.debug:
|
||||
cmd.append("--debug")
|
||||
cmd.append(h.target)
|
||||
cmd.append(host.target)
|
||||
if opts.use_tor:
|
||||
# nix copy does not support tor socks proxy
|
||||
# cmd.append("--ssh-option")
|
||||
@@ -164,7 +162,32 @@ def install_machine(opts: InstallOptions) -> None:
|
||||
def install_command(args: argparse.Namespace) -> None:
|
||||
host_key_check = HostKeyCheck.from_str(args.host_key_check)
|
||||
try:
|
||||
machine = Machine(name=args.machine, flake=args.flake, nix_options=args.option)
|
||||
# Only if the caller did not specify a target_host via args.target_host
|
||||
# Find a suitable target_host that is reachable
|
||||
target_host = args.target_host
|
||||
deploy_info: DeployInfo | None = ssh_command_parse(args)
|
||||
|
||||
if deploy_info and not args.target_host:
|
||||
host = find_reachable_host(deploy_info, host_key_check)
|
||||
if host is None:
|
||||
use_tor = True
|
||||
target_host = f"root@{deploy_info.tor}"
|
||||
else:
|
||||
target_host = host.target
|
||||
|
||||
if args.password:
|
||||
password = args.password
|
||||
elif deploy_info and deploy_info.pwd:
|
||||
password = deploy_info.pwd
|
||||
else:
|
||||
password = None
|
||||
|
||||
machine = Machine(
|
||||
name=args.machine,
|
||||
flake=args.flake,
|
||||
nix_options=args.option,
|
||||
override_target_host=target_host,
|
||||
)
|
||||
use_tor = False
|
||||
|
||||
if machine._class_ == "darwin":
|
||||
@@ -175,41 +198,16 @@ def install_command(args: argparse.Namespace) -> None:
|
||||
msg = "Could not find clan flake toplevel directory"
|
||||
raise ClanError(msg)
|
||||
|
||||
deploy_info: DeployInfo | None = ssh_command_parse(args)
|
||||
|
||||
if args.target_host:
|
||||
target_host = args.target_host
|
||||
elif deploy_info:
|
||||
host = find_reachable_host(deploy_info, host_key_check)
|
||||
if host is None:
|
||||
use_tor = True
|
||||
target_host = f"root@{deploy_info.tor}"
|
||||
else:
|
||||
target_host = host.target
|
||||
password = deploy_info.pwd
|
||||
else:
|
||||
target_host = machine.target_host.target
|
||||
|
||||
if args.password:
|
||||
password = args.password
|
||||
elif deploy_info and deploy_info.pwd:
|
||||
password = deploy_info.pwd
|
||||
else:
|
||||
password = None
|
||||
|
||||
if not target_host:
|
||||
msg = "No target host provided, please provide a target host."
|
||||
raise ClanError(msg)
|
||||
|
||||
if not args.yes:
|
||||
ask = input(f"Install {args.machine} to {target_host}? [y/N] ")
|
||||
ask = input(
|
||||
f"Install {args.machine} to {machine.target_host_address}? [y/N] "
|
||||
)
|
||||
if ask != "y":
|
||||
return None
|
||||
|
||||
return install_machine(
|
||||
InstallOptions(
|
||||
machine=machine,
|
||||
target_host=target_host,
|
||||
kexec=args.kexec,
|
||||
phases=args.phases,
|
||||
debug=args.debug,
|
||||
|
||||
@@ -2,6 +2,7 @@ import argparse
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
@@ -11,38 +12,38 @@ from clan_lib.api.disk import MachineDiskMatter
|
||||
from clan_lib.api.modules import parse_frontmatter
|
||||
from clan_lib.api.serde import dataclass_to_dict
|
||||
|
||||
from clan_cli.cmd import RunOpts, run_no_stdout
|
||||
from clan_cli.cmd import RunOpts, run
|
||||
from clan_cli.completions import add_dynamic_completer, complete_tags
|
||||
from clan_cli.dirs import specific_machine_dir
|
||||
from clan_cli.errors import ClanCmdError, ClanError
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import (
|
||||
Machine,
|
||||
load_inventory_eval,
|
||||
patch_inventory_with,
|
||||
)
|
||||
from clan_cli.inventory.classes import Machine as InventoryMachine
|
||||
from clan_cli.machines.hardware import HardwareConfig
|
||||
from clan_cli.nix import nix_eval, nix_shell
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_eval
|
||||
from clan_cli.tags import list_nixos_machines_by_tags
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@API.register
|
||||
def set_machine(flake_url: Path, machine_name: str, machine: Machine) -> None:
|
||||
patch_inventory_with(
|
||||
flake_url, f"machines.{machine_name}", dataclass_to_dict(machine)
|
||||
)
|
||||
def set_machine(flake: Flake, machine_name: str, machine: InventoryMachine) -> None:
|
||||
patch_inventory_with(flake, f"machines.{machine_name}", dataclass_to_dict(machine))
|
||||
|
||||
|
||||
@API.register
|
||||
def list_inventory_machines(flake_url: str | Path) -> dict[str, Machine]:
|
||||
inventory = load_inventory_eval(flake_url)
|
||||
def list_machines(flake: Flake) -> dict[str, InventoryMachine]:
|
||||
inventory = load_inventory_eval(flake)
|
||||
return inventory.get("machines", {})
|
||||
|
||||
|
||||
@dataclass
|
||||
class MachineDetails:
|
||||
machine: Machine
|
||||
machine: InventoryMachine
|
||||
hw_config: HardwareConfig | None = None
|
||||
disk_schema: MachineDiskMatter | None = None
|
||||
|
||||
@@ -59,16 +60,16 @@ def extract_header(c: str) -> str:
|
||||
|
||||
|
||||
@API.register
|
||||
def get_inventory_machine_details(flake_url: Path, machine_name: str) -> MachineDetails:
|
||||
inventory = load_inventory_eval(flake_url)
|
||||
machine = inventory.get("machines", {}).get(machine_name)
|
||||
if machine is None:
|
||||
msg = f"Machine {machine_name} not found in inventory"
|
||||
def get_machine_details(machine: Machine) -> MachineDetails:
|
||||
inventory = load_inventory_eval(machine.flake)
|
||||
machine_inv = inventory.get("machines", {}).get(machine.name)
|
||||
if machine_inv is None:
|
||||
msg = f"Machine {machine.name} not found in inventory"
|
||||
raise ClanError(msg)
|
||||
|
||||
hw_config = HardwareConfig.detect_type(flake_url, machine_name)
|
||||
hw_config = HardwareConfig.detect_type(machine.flake, machine.name)
|
||||
|
||||
machine_dir = specific_machine_dir(flake_url, machine_name)
|
||||
machine_dir = specific_machine_dir(machine.flake, machine.name)
|
||||
disk_schema: MachineDiskMatter | None = None
|
||||
disk_path = machine_dir / "disko.nix"
|
||||
if disk_path.exists():
|
||||
@@ -79,7 +80,9 @@ def get_inventory_machine_details(flake_url: Path, machine_name: str) -> Machine
|
||||
if data:
|
||||
disk_schema = data # type: ignore
|
||||
|
||||
return MachineDetails(machine=machine, hw_config=hw_config, disk_schema=disk_schema)
|
||||
return MachineDetails(
|
||||
machine=machine_inv, hw_config=hw_config, disk_schema=disk_schema
|
||||
)
|
||||
|
||||
|
||||
def list_nixos_machines(flake_url: str | Path) -> list[str]:
|
||||
@@ -92,7 +95,7 @@ def list_nixos_machines(flake_url: str | Path) -> list[str]:
|
||||
]
|
||||
)
|
||||
|
||||
proc = run_no_stdout(cmd)
|
||||
proc = run(cmd)
|
||||
|
||||
try:
|
||||
res = proc.stdout.strip()
|
||||
@@ -106,53 +109,36 @@ def list_nixos_machines(flake_url: str | Path) -> list[str]:
|
||||
|
||||
@dataclass
|
||||
class ConnectionOptions:
|
||||
keyfile: str | None = None
|
||||
timeout: int = 2
|
||||
retries: int = 10
|
||||
|
||||
|
||||
from clan_cli.machines.machines import Machine
|
||||
|
||||
|
||||
@API.register
|
||||
def check_machine_online(
|
||||
flake_url: str | Path, machine_name: str, opts: ConnectionOptions | None
|
||||
machine: Machine, opts: ConnectionOptions | None = None
|
||||
) -> Literal["Online", "Offline"]:
|
||||
machine = load_inventory_eval(flake_url).get("machines", {}).get(machine_name)
|
||||
if not machine:
|
||||
msg = f"Machine {machine_name} not found in inventory"
|
||||
raise ClanError(msg)
|
||||
|
||||
hostname = machine.get("deploy", {}).get("targetHost")
|
||||
|
||||
hostname = machine.target_host_address
|
||||
if not hostname:
|
||||
msg = f"Machine {machine_name} does not specify a targetHost"
|
||||
msg = f"Machine {machine.name} does not specify a targetHost"
|
||||
raise ClanError(msg)
|
||||
|
||||
timeout = opts.timeout if opts and opts.timeout else 20
|
||||
timeout = opts.timeout if opts and opts.timeout else 2
|
||||
|
||||
cmd = nix_shell(
|
||||
["util-linux", *(["openssh"] if hostname else [])],
|
||||
[
|
||||
"ssh",
|
||||
*(["-i", f"{opts.keyfile}"] if opts and opts.keyfile else []),
|
||||
# Disable strict host key checking
|
||||
"-o",
|
||||
"StrictHostKeyChecking=accept-new",
|
||||
# Disable known hosts file
|
||||
"-o",
|
||||
"UserKnownHostsFile=/dev/null",
|
||||
"-o",
|
||||
f"ConnectTimeout={timeout}",
|
||||
f"{hostname}",
|
||||
"true",
|
||||
"&> /dev/null",
|
||||
],
|
||||
)
|
||||
try:
|
||||
proc = run_no_stdout(cmd, RunOpts(needs_user_terminal=True))
|
||||
if proc.returncode != 0:
|
||||
return "Offline"
|
||||
except ClanCmdError:
|
||||
return "Offline"
|
||||
else:
|
||||
return "Online"
|
||||
for _ in range(opts.retries if opts and opts.retries else 10):
|
||||
with machine.target_host() as target:
|
||||
res = target.run(
|
||||
["true"],
|
||||
RunOpts(timeout=timeout, check=False, needs_user_terminal=True),
|
||||
)
|
||||
|
||||
if res.returncode == 0:
|
||||
return "Online"
|
||||
time.sleep(timeout)
|
||||
|
||||
return "Offline"
|
||||
|
||||
|
||||
def list_command(args: argparse.Namespace) -> None:
|
||||
|
||||
@@ -2,6 +2,8 @@ import importlib
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import Iterator
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from functools import cached_property
|
||||
from pathlib import Path
|
||||
@@ -24,7 +26,7 @@ if TYPE_CHECKING:
|
||||
from clan_cli.vars.generate import Generator
|
||||
|
||||
|
||||
@dataclass
|
||||
@dataclass(frozen=True)
|
||||
class Machine:
|
||||
name: str
|
||||
flake: Flake
|
||||
@@ -145,34 +147,37 @@ class Machine:
|
||||
def flake_dir(self) -> Path:
|
||||
return self.flake.path
|
||||
|
||||
@property
|
||||
def target_host(self) -> Host:
|
||||
return parse_deployment_address(
|
||||
@contextmanager
|
||||
def target_host(self) -> Iterator[Host]:
|
||||
with parse_deployment_address(
|
||||
self.name,
|
||||
self.target_host_address,
|
||||
self.host_key_check,
|
||||
private_key=self.private_key,
|
||||
meta={"machine": self},
|
||||
)
|
||||
) as target_host:
|
||||
yield target_host
|
||||
|
||||
@property
|
||||
def build_host(self) -> Host:
|
||||
@contextmanager
|
||||
def build_host(self) -> Iterator[Host | None]:
|
||||
"""
|
||||
The host where the machine is built and deployed from.
|
||||
Can be the same as the target host.
|
||||
"""
|
||||
build_host = self.override_build_host or self.deployment.get("buildHost")
|
||||
if build_host is None:
|
||||
return self.target_host
|
||||
yield None
|
||||
return
|
||||
# enable ssh agent forwarding to allow the build host to access the target host
|
||||
return parse_deployment_address(
|
||||
with parse_deployment_address(
|
||||
self.name,
|
||||
build_host,
|
||||
self.host_key_check,
|
||||
forward_agent=True,
|
||||
private_key=self.private_key,
|
||||
meta={"machine": self, "target_host": self.target_host},
|
||||
)
|
||||
meta={"machine": self},
|
||||
) as build_host:
|
||||
yield build_host
|
||||
|
||||
@cached_property
|
||||
def deploy_as_root(self) -> bool:
|
||||
|
||||
@@ -5,11 +5,12 @@ import os
|
||||
import re
|
||||
import shlex
|
||||
import sys
|
||||
from contextlib import ExitStack
|
||||
|
||||
from clan_lib.api import API
|
||||
|
||||
from clan_cli.async_run import AsyncContext, AsyncOpts, AsyncRuntime, is_async_cancelled
|
||||
from clan_cli.cmd import Log, MsgColor, RunOpts, run
|
||||
from clan_cli.cmd import MsgColor, RunOpts, run
|
||||
from clan_cli.colors import AnsiColor
|
||||
from clan_cli.completions import (
|
||||
add_dynamic_completer,
|
||||
@@ -20,14 +21,13 @@ from clan_cli.facts.generate import generate_facts
|
||||
from clan_cli.facts.upload import upload_secrets
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import Machine as InventoryMachine
|
||||
from clan_cli.machines.list import list_machines
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_command, nix_config, nix_metadata
|
||||
from clan_cli.ssh.host import Host, HostKeyCheck
|
||||
from clan_cli.vars.generate import generate_vars
|
||||
from clan_cli.vars.upload import upload_secret_vars
|
||||
|
||||
from .inventory import get_all_machines, get_selected_machines
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -43,8 +43,7 @@ def is_local_input(node: dict[str, dict[str, str]]) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def upload_sources(machine: Machine) -> str:
|
||||
host = machine.build_host
|
||||
def upload_sources(machine: Machine, host: Host) -> str:
|
||||
env = host.nix_ssh_env(os.environ.copy())
|
||||
|
||||
flake_url = (
|
||||
@@ -111,37 +110,41 @@ def update_machines(base_path: str, machines: list[InventoryMachine]) -> None:
|
||||
flake = Flake(base_path)
|
||||
for machine in machines:
|
||||
name = machine.get("name")
|
||||
# prefer target host set via inventory, but fallback to the one set in the machine
|
||||
target_host = machine.get("deploy", {}).get("targetHost")
|
||||
|
||||
if not name:
|
||||
msg = "Machine name is not set"
|
||||
raise ClanError(msg)
|
||||
m = Machine(
|
||||
name,
|
||||
flake=flake,
|
||||
override_target_host=target_host,
|
||||
)
|
||||
# prefer target host set via inventory, but fallback to the one set in the machine
|
||||
if target_host := machine.get("deploy", {}).get("targetHost"):
|
||||
m.override_target_host = target_host
|
||||
group_machines.append(m)
|
||||
|
||||
deploy_machines(group_machines)
|
||||
|
||||
|
||||
def deploy_machines(machines: list[Machine]) -> None:
|
||||
"""
|
||||
Deploy to all hosts in parallel
|
||||
"""
|
||||
def deploy_machine(machine: Machine) -> None:
|
||||
with ExitStack() as stack:
|
||||
target_host = stack.enter_context(machine.target_host())
|
||||
build_host = stack.enter_context(machine.build_host())
|
||||
|
||||
if machine._class_ == "darwin":
|
||||
if not machine.deploy_as_root and target_host.user == "root":
|
||||
msg = f"'targetHost' should be set to a non-root user for deploying to nix-darwin on machine '{machine.name}'"
|
||||
raise ClanError(msg)
|
||||
|
||||
host = build_host or target_host
|
||||
|
||||
def deploy(machine: Machine) -> None:
|
||||
host = machine.build_host
|
||||
generate_facts([machine], service=None, regenerate=False)
|
||||
generate_vars([machine], generator_name=None, regenerate=False)
|
||||
|
||||
upload_secrets(machine)
|
||||
upload_secret_vars(machine)
|
||||
upload_secrets(machine, target_host)
|
||||
upload_secret_vars(machine, target_host)
|
||||
|
||||
path = upload_sources(
|
||||
machine=machine,
|
||||
)
|
||||
path = upload_sources(machine, host)
|
||||
|
||||
nix_options = [
|
||||
"--show-trace",
|
||||
@@ -158,7 +161,6 @@ def deploy_machines(machines: list[Machine]) -> None:
|
||||
]
|
||||
|
||||
become_root = machine.deploy_as_root
|
||||
needs_tty_for_sudo = become_root or machine._class_ == "darwin"
|
||||
|
||||
if machine._class_ == "nixos":
|
||||
nix_options += [
|
||||
@@ -167,26 +169,21 @@ def deploy_machines(machines: list[Machine]) -> None:
|
||||
"",
|
||||
]
|
||||
|
||||
target_host: Host | None = host.meta.get("target_host")
|
||||
if target_host:
|
||||
if build_host:
|
||||
become_root = False
|
||||
nix_options += ["--target-host", target_host.target]
|
||||
|
||||
if target_host.user != "root":
|
||||
nix_options += ["--use-remote-sudo"]
|
||||
needs_tty_for_sudo = become_root
|
||||
|
||||
switch_cmd = [f"{machine._class_}-rebuild", "switch", *nix_options]
|
||||
test_cmd = [f"{machine._class_}-rebuild", "test", *nix_options]
|
||||
|
||||
env = host.nix_ssh_env(None)
|
||||
remote_env = host.nix_ssh_env(None, local_ssh=False)
|
||||
ret = host.run(
|
||||
switch_cmd,
|
||||
RunOpts(
|
||||
check=False, msg_color=MsgColor(stderr=AnsiColor.DEFAULT), log=Log.BOTH
|
||||
),
|
||||
tty=needs_tty_for_sudo,
|
||||
extra_env=env,
|
||||
RunOpts(check=False, msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
|
||||
extra_env=remote_env,
|
||||
become_root=become_root,
|
||||
)
|
||||
|
||||
@@ -212,23 +209,23 @@ def deploy_machines(machines: list[Machine]) -> None:
|
||||
msg_color=MsgColor(stderr=AnsiColor.DEFAULT),
|
||||
needs_user_terminal=True,
|
||||
),
|
||||
extra_env=env,
|
||||
extra_env=remote_env,
|
||||
become_root=become_root,
|
||||
)
|
||||
|
||||
|
||||
def deploy_machines(machines: list[Machine]) -> None:
|
||||
"""
|
||||
Deploy to all hosts in parallel
|
||||
"""
|
||||
|
||||
with AsyncRuntime() as runtime:
|
||||
for machine in machines:
|
||||
if machine._class_ == "darwin":
|
||||
if not machine.deploy_as_root and machine.target_host.user == "root":
|
||||
msg = f"'targetHost' should be set to a non-root user for deploying to nix-darwin on machine '{machine.name}'"
|
||||
raise ClanError(msg)
|
||||
|
||||
machine.info(f"Updating {machine.name}")
|
||||
runtime.async_run(
|
||||
AsyncOpts(
|
||||
tid=machine.name, async_ctx=AsyncContext(prefix=machine.name)
|
||||
),
|
||||
deploy,
|
||||
deploy_machine,
|
||||
machine,
|
||||
)
|
||||
runtime.join_all()
|
||||
@@ -240,61 +237,73 @@ def update_command(args: argparse.Namespace) -> None:
|
||||
if args.flake is None:
|
||||
msg = "Could not find clan flake toplevel directory"
|
||||
raise ClanError(msg)
|
||||
machines = []
|
||||
if len(args.machines) == 1 and args.target_host is not None:
|
||||
machine = Machine(
|
||||
name=args.machines[0], flake=args.flake, nix_options=args.option
|
||||
)
|
||||
machine.override_target_host = args.target_host
|
||||
machine.override_build_host = args.build_host
|
||||
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
|
||||
machines.append(machine)
|
||||
|
||||
elif args.target_host is not None:
|
||||
print("target host can only be specified for a single machine")
|
||||
exit(1)
|
||||
else:
|
||||
if len(args.machines) == 0:
|
||||
ignored_machines = []
|
||||
for machine in get_all_machines(args.flake, args.option):
|
||||
if machine.deployment.get("requireExplicitUpdate", False):
|
||||
continue
|
||||
try:
|
||||
machine.build_host # noqa: B018
|
||||
except ClanError: # check if we have a build host set
|
||||
ignored_machines.append(machine)
|
||||
continue
|
||||
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
|
||||
machine.override_build_host = args.build_host
|
||||
machines.append(machine)
|
||||
|
||||
if not machines and ignored_machines != []:
|
||||
print(
|
||||
"WARNING: No machines to update."
|
||||
"The following defined machines were ignored because they"
|
||||
"do not have the `clan.core.networking.targetHost` nixos option set:",
|
||||
file=sys.stderr,
|
||||
)
|
||||
for machine in ignored_machines:
|
||||
print(machine, file=sys.stderr)
|
||||
|
||||
else:
|
||||
machines = get_selected_machines(args.flake, args.option, args.machines)
|
||||
for machine in machines:
|
||||
machine.override_build_host = args.build_host
|
||||
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
|
||||
|
||||
config = nix_config()
|
||||
system = config["system"]
|
||||
machine_names = [machine.name for machine in machines]
|
||||
args.flake.precache(
|
||||
[
|
||||
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.clan.core.vars.generators.*.validationHash",
|
||||
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.system.clan.deployment.file",
|
||||
]
|
||||
machines: list[Machine] = []
|
||||
# if no machines are passed, we will update all machines
|
||||
selected_machines = (
|
||||
args.machines if args.machines else list_machines(args.flake).keys()
|
||||
)
|
||||
|
||||
deploy_machines(machines)
|
||||
if args.target_host is not None and len(args.machines) > 1:
|
||||
msg = "Target Host can only be set for one machines"
|
||||
raise ClanError(msg)
|
||||
|
||||
for machine_name in selected_machines:
|
||||
machine = Machine(
|
||||
name=machine_name,
|
||||
flake=args.flake,
|
||||
nix_options=args.option,
|
||||
override_target_host=args.target_host,
|
||||
override_build_host=args.build_host,
|
||||
host_key_check=HostKeyCheck.from_str(args.host_key_check),
|
||||
)
|
||||
machines.append(machine)
|
||||
|
||||
def filter_machine(m: Machine) -> bool:
|
||||
if m.deployment.get("requireExplicitUpdate", False):
|
||||
return False
|
||||
|
||||
try:
|
||||
# check if the machine has a target host set
|
||||
m.target_host # noqa: B018
|
||||
except ClanError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
machines_to_update = machines
|
||||
implicit_all: bool = len(args.machines) == 0
|
||||
if implicit_all:
|
||||
machines_to_update = list(filter(filter_machine, machines))
|
||||
|
||||
# machines that are in the list but not included in the update list
|
||||
ignored_machines = {m.name for m in machines if m not in machines_to_update}
|
||||
|
||||
if not machines_to_update and ignored_machines:
|
||||
print(
|
||||
"WARNING: No machines to update.\n"
|
||||
"The following defined machines were ignored because they\n"
|
||||
"- Require explicit update (see 'requireExplicitUpdate')\n",
|
||||
"- Might not have the `clan.core.networking.targetHost` nixos option set:\n",
|
||||
file=sys.stderr,
|
||||
)
|
||||
for m in ignored_machines:
|
||||
print(m, file=sys.stderr)
|
||||
|
||||
if machines_to_update:
|
||||
# Prepopulate the cache
|
||||
config = nix_config()
|
||||
system = config["system"]
|
||||
machine_names = [machine.name for machine in machines_to_update]
|
||||
args.flake.precache(
|
||||
[
|
||||
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.clan.core.vars.generators.*.validationHash",
|
||||
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.system.clan.deployment.file",
|
||||
]
|
||||
)
|
||||
# Run the deplyoyment
|
||||
deploy_machines(machines_to_update)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
log.warning("Interrupted by user")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -5,9 +5,11 @@ import os
|
||||
import shlex
|
||||
import socket
|
||||
import subprocess
|
||||
import types
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from shlex import quote
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
from clan_cli.cmd import CmdOut, RunOpts, run
|
||||
@@ -38,27 +40,29 @@ class Host:
|
||||
ssh_options: dict[str, str] = field(default_factory=dict)
|
||||
tor_socks: bool = False
|
||||
|
||||
_temp_dir: TemporaryDirectory | None = None
|
||||
|
||||
def __enter__(self) -> "Host":
|
||||
self._temp_dir = TemporaryDirectory(prefix="clan-ssh-")
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: types.TracebackType | None,
|
||||
) -> None:
|
||||
try:
|
||||
if self._temp_dir:
|
||||
self._temp_dir.cleanup()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if not self.command_prefix:
|
||||
self.command_prefix = self.host
|
||||
if not self.user:
|
||||
self.user = "root"
|
||||
home = Path.home()
|
||||
if home.exists() and os.access(home, os.W_OK):
|
||||
control_path = home / ".ssh"
|
||||
if not control_path.exists():
|
||||
try:
|
||||
control_path.mkdir(exist_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
self.ssh_options["ControlMaster"] = "auto"
|
||||
# Can we make this a temporary directory?
|
||||
self.ssh_options["ControlPath"] = str(
|
||||
control_path / "clan-%h-%p-%r"
|
||||
)
|
||||
# We use a short ttl because we want to mainly re-use the connection during the cli run
|
||||
self.ssh_options["ControlPersist"] = "1m"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.target
|
||||
@@ -178,15 +182,17 @@ class Host:
|
||||
# Run the ssh command
|
||||
return run(ssh_cmd, opts)
|
||||
|
||||
def nix_ssh_env(self, env: dict[str, str] | None) -> dict[str, str]:
|
||||
def nix_ssh_env(
|
||||
self, env: dict[str, str] | None, local_ssh: bool = True
|
||||
) -> dict[str, str]:
|
||||
if env is None:
|
||||
env = {}
|
||||
env["NIX_SSHOPTS"] = " ".join(self.ssh_cmd_opts)
|
||||
env["NIX_SSHOPTS"] = " ".join(self.ssh_cmd_opts(local_ssh=local_ssh))
|
||||
return env
|
||||
|
||||
@property
|
||||
def ssh_cmd_opts(
|
||||
self,
|
||||
local_ssh: bool = True,
|
||||
) -> list[str]:
|
||||
ssh_opts = ["-A"] if self.forward_agent else []
|
||||
if self.port:
|
||||
@@ -200,6 +206,16 @@ class Host:
|
||||
if self.private_key:
|
||||
ssh_opts.extend(["-i", str(self.private_key)])
|
||||
|
||||
if local_ssh and self._temp_dir:
|
||||
ssh_opts.extend(["-o", "ControlPersist=30m"])
|
||||
ssh_opts.extend(
|
||||
[
|
||||
"-o",
|
||||
f"ControlPath={Path(self._temp_dir.name) / 'clan-%h-%p-%r'}",
|
||||
]
|
||||
)
|
||||
ssh_opts.extend(["-o", "ControlMaster=auto"])
|
||||
|
||||
return ssh_opts
|
||||
|
||||
def ssh_cmd(
|
||||
@@ -217,7 +233,7 @@ class Host:
|
||||
self.password,
|
||||
]
|
||||
|
||||
ssh_opts = self.ssh_cmd_opts
|
||||
ssh_opts = self.ssh_cmd_opts()
|
||||
if verbose_ssh or self.verbose_ssh:
|
||||
ssh_opts.extend(["-v"])
|
||||
if tty:
|
||||
|
||||
@@ -2,84 +2,13 @@ import tarfile
|
||||
from pathlib import Path
|
||||
from shlex import quote
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import IO
|
||||
|
||||
from clan_cli.cmd import Log, RunOpts
|
||||
from clan_cli.cmd import run as run_local
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
|
||||
def unpack_archive_as_root(
|
||||
host: Host, f: IO[bytes], local_src: Path, remote_dest: Path, dir_mode: int = 0o700
|
||||
) -> None:
|
||||
if local_src.is_dir():
|
||||
cmd = 'rm -rf "$0" && mkdir -m "$1" -p "$0" && tar -C "$0" -xzf -'
|
||||
elif local_src.is_file():
|
||||
cmd = 'rm -f "$0" && tar -C "$(dirname "$0")" -xzf -'
|
||||
else:
|
||||
msg = f"Unsupported source file type: {local_src}"
|
||||
raise ClanError(msg)
|
||||
|
||||
host.run(
|
||||
[
|
||||
"sudo",
|
||||
"-p",
|
||||
f"Enter sudo password for {quote(host.host)}: ",
|
||||
"--",
|
||||
"bash",
|
||||
"-c",
|
||||
cmd,
|
||||
str(remote_dest),
|
||||
f"{dir_mode:o}",
|
||||
],
|
||||
RunOpts(
|
||||
input=f,
|
||||
log=Log.BOTH,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def unpack_archive_as_user(
|
||||
host: Host, f: IO[bytes], local_src: Path, remote_dest: Path, dir_mode: int = 0o700
|
||||
) -> None:
|
||||
archive = host.run(
|
||||
["bash", "-c", "f=$(mktemp); echo $f; cat > $f"],
|
||||
RunOpts(
|
||||
input=f,
|
||||
log=Log.BOTH,
|
||||
),
|
||||
).stdout.strip()
|
||||
|
||||
if local_src.is_dir():
|
||||
cmd = 'trap "rm -f $0" EXIT; rm -rf "$1" && mkdir -m "$2" -p "$1" && tar -C "$1" -xzf "$0"'
|
||||
elif local_src.is_file():
|
||||
cmd = 'trap "rm -f $0" EXIT; rm -f "$1" && tar -C "$(dirname "$1")" -xzf "$0"'
|
||||
else:
|
||||
msg = f"Unsupported source type: {local_src}"
|
||||
raise ClanError(msg)
|
||||
|
||||
# We also need some sort of locks in case we have multiple prompts
|
||||
host.run(
|
||||
[
|
||||
"sudo",
|
||||
"-p",
|
||||
f"Enter sudo password for {host.host}:\n",
|
||||
"--",
|
||||
"bash",
|
||||
"-c",
|
||||
cmd,
|
||||
archive,
|
||||
str(remote_dest),
|
||||
f"{dir_mode:o}",
|
||||
],
|
||||
tty=True,
|
||||
opts=RunOpts(
|
||||
log=Log.BOTH,
|
||||
prefix="",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def upload(
|
||||
host: Host,
|
||||
local_src: Path,
|
||||
@@ -160,22 +89,33 @@ def upload(
|
||||
with local_src.open("rb") as f:
|
||||
tar.addfile(tarinfo, f)
|
||||
|
||||
sudo = ""
|
||||
if host.user != "root":
|
||||
sudo = "sudo -- "
|
||||
|
||||
cmd = None
|
||||
if local_src.is_dir():
|
||||
cmd = 'rm -rf "$0" && mkdir -m "$1" -p "$0" && tar -C "$0" -xzf -'
|
||||
elif local_src.is_file():
|
||||
cmd = 'rm -f "$0" && tar -C "$(dirname "$0")" -xzf -'
|
||||
else:
|
||||
msg = f"Unsupported source type: {local_src}"
|
||||
raise ClanError(msg)
|
||||
|
||||
# TODO accept `input` to be an IO object instead of bytes so that we don't have to read the tarfile into memory.
|
||||
with tar_path.open("rb") as f:
|
||||
if host.user == "root":
|
||||
unpack_archive_as_root(
|
||||
host,
|
||||
f,
|
||||
local_src,
|
||||
remote_dest,
|
||||
dir_mode=dir_mode,
|
||||
)
|
||||
else:
|
||||
# For sudo we need to split the upload into two steps
|
||||
unpack_archive_as_user(
|
||||
host,
|
||||
f,
|
||||
local_src,
|
||||
remote_dest,
|
||||
dir_mode=dir_mode,
|
||||
)
|
||||
run_local(
|
||||
[
|
||||
*host.ssh_cmd(),
|
||||
"--",
|
||||
f"{sudo}bash -c {quote(cmd)}",
|
||||
str(remote_dest),
|
||||
f"{dir_mode:o}",
|
||||
],
|
||||
RunOpts(
|
||||
input=f.read(),
|
||||
log=Log.BOTH,
|
||||
prefix=host.command_prefix,
|
||||
needs_user_terminal=True,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -27,6 +27,14 @@ def test_root() -> Path:
|
||||
return TEST_ROOT
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def test_lib_root() -> Path:
|
||||
"""
|
||||
Root directory of the clan-lib tests
|
||||
"""
|
||||
return PROJECT_ROOT.parent / "clan_lib" / "tests"
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def clan_core() -> Path:
|
||||
"""
|
||||
|
||||
@@ -80,16 +80,6 @@ exec {bash} -l "${{@}}"
|
||||
|
||||
fake_sudo.write_text(
|
||||
f"""#!{bash}
|
||||
# skip over every sudo option
|
||||
for arg in "${{@}}"; do
|
||||
if [[ "$arg" == "-p" ]]; then
|
||||
shift
|
||||
shift
|
||||
continue
|
||||
fi
|
||||
break
|
||||
done
|
||||
|
||||
exec "${{@}}"
|
||||
"""
|
||||
)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import pytest
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import load_inventory_json
|
||||
from clan_cli.secrets.folders import sops_machines_folder
|
||||
from clan_cli.tests import fixtures_flakes
|
||||
@@ -24,7 +25,7 @@ def test_machine_subcommands(
|
||||
]
|
||||
)
|
||||
|
||||
inventory: dict = dict(load_inventory_json(str(test_flake_with_core.path)))
|
||||
inventory: dict = dict(load_inventory_json(Flake(str(test_flake_with_core.path))))
|
||||
assert "machine1" in inventory["machines"]
|
||||
assert "service" not in inventory
|
||||
|
||||
@@ -40,7 +41,7 @@ def test_machine_subcommands(
|
||||
["machines", "delete", "--flake", str(test_flake_with_core.path), "machine1"]
|
||||
)
|
||||
|
||||
inventory_2: dict = dict(load_inventory_json(str(test_flake_with_core.path)))
|
||||
inventory_2: dict = dict(load_inventory_json(Flake(str(test_flake_with_core.path))))
|
||||
assert "machine1" not in inventory_2["machines"]
|
||||
assert "service" not in inventory_2
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ def test_add_module_to_inventory(
|
||||
}
|
||||
}
|
||||
|
||||
set_inventory(inventory, base_path, "Add borgbackup service")
|
||||
set_inventory(inventory, Flake(str(base_path)), "Add borgbackup service")
|
||||
|
||||
# cmd = ["facts", "generate", "--flake", str(test_flake_with_core.path), "machine1"]
|
||||
cmd = [
|
||||
|
||||
@@ -982,10 +982,7 @@ def test_secrets_key_generate_gpg(
|
||||
]
|
||||
)
|
||||
assert "age private key" not in output.out
|
||||
|
||||
assert re.match(r"PGP key.+is already set", output.err), (
|
||||
f"expected /PGP key.+is already set/ =~ {output.err}"
|
||||
)
|
||||
assert re.match(r"PGP key.+is already set", output.err) is not None
|
||||
|
||||
with capture_output as output:
|
||||
cli.run(
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import TYPE_CHECKING
|
||||
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.machines import machines
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .generate import Generator, Var
|
||||
@@ -183,5 +184,5 @@ class StoreBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def upload(self, phases: list[str]) -> None:
|
||||
def upload(self, host: Host, phases: list[str]) -> None:
|
||||
pass
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from functools import cached_property
|
||||
@@ -86,6 +87,11 @@ class Generator:
|
||||
def bubblewrap_cmd(generator: str, tmpdir: Path) -> list[str]:
|
||||
test_store = nix_test_store()
|
||||
|
||||
real_bash_path = Path("bash")
|
||||
if os.environ.get("IN_NIX_SANDBOX"):
|
||||
bash_executable_path = Path(str(shutil.which("bash")))
|
||||
real_bash_path = bash_executable_path.resolve()
|
||||
|
||||
# fmt: off
|
||||
return nix_shell(
|
||||
[
|
||||
@@ -109,8 +115,8 @@ def bubblewrap_cmd(generator: str, tmpdir: Path) -> list[str]:
|
||||
"--uid", "1000",
|
||||
"--gid", "1000",
|
||||
"--",
|
||||
"bash", "-c", generator
|
||||
],
|
||||
str(real_bash_path), "-c", generator
|
||||
]
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
@@ -353,19 +359,20 @@ def _generate_vars_for_machine(
|
||||
@API.register
|
||||
def generate_vars_for_machine(
|
||||
machine_name: str,
|
||||
generators: list[Generator],
|
||||
generators: list[str],
|
||||
all_prompt_values: dict[str, dict[str, str]],
|
||||
base_dir: Path,
|
||||
no_sandbox: bool = False,
|
||||
) -> bool:
|
||||
from clan_cli.machines.machines import Machine
|
||||
|
||||
machine = Machine(name=machine_name, flake=Flake(str(base_dir)))
|
||||
generators_set = set(generators)
|
||||
generators_ = [g for g in machine.vars_generators if g.name in generators_set]
|
||||
|
||||
return _generate_vars_for_machine(
|
||||
machine=Machine(
|
||||
name=machine_name,
|
||||
flake=Flake(str(base_dir)),
|
||||
),
|
||||
generators=generators,
|
||||
machine=machine,
|
||||
generators=generators_,
|
||||
all_prompt_values=all_prompt_values,
|
||||
no_sandbox=no_sandbox,
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ from pathlib import Path
|
||||
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
from clan_cli.vars._types import StoreBase
|
||||
from clan_cli.vars.generate import Generator, Var
|
||||
|
||||
@@ -72,6 +73,6 @@ class FactStore(StoreBase):
|
||||
msg = "populate_dir is not implemented for public vars stores"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def upload(self, phases: list[str]) -> None:
|
||||
def upload(self, host: Host, phases: list[str]) -> None:
|
||||
msg = "upload is not implemented for public vars stores"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
@@ -6,6 +6,7 @@ from pathlib import Path
|
||||
from clan_cli.dirs import vm_state_dir
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
from clan_cli.vars._types import StoreBase
|
||||
from clan_cli.vars.generate import Generator, Var
|
||||
|
||||
@@ -69,6 +70,6 @@ class FactStore(StoreBase):
|
||||
msg = "populate_dir is not implemented for public vars stores"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def upload(self, phases: list[str]) -> None:
|
||||
def upload(self, host: Host, phases: list[str]) -> None:
|
||||
msg = "upload is not implemented for public vars stores"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
@@ -3,6 +3,7 @@ import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
from clan_cli.vars._types import StoreBase
|
||||
from clan_cli.vars.generate import Generator, Var
|
||||
|
||||
@@ -45,6 +46,6 @@ class SecretStore(StoreBase):
|
||||
shutil.copytree(self.dir, output_dir)
|
||||
shutil.rmtree(self.dir)
|
||||
|
||||
def upload(self, phases: list[str]) -> None:
|
||||
def upload(self, host: Host, phases: list[str]) -> None:
|
||||
msg = "Cannot upload secrets with FS backend"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
@@ -10,6 +10,7 @@ from tempfile import TemporaryDirectory
|
||||
from clan_cli.cmd import CmdOut, Log, RunOpts, run
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_shell
|
||||
from clan_cli.ssh.host import Host
|
||||
from clan_cli.ssh.upload import upload
|
||||
from clan_cli.vars._types import StoreBase
|
||||
from clan_cli.vars.generate import Generator, Var
|
||||
@@ -146,9 +147,9 @@ class SecretStore(StoreBase):
|
||||
manifest += hashes
|
||||
return b"\n".join(manifest)
|
||||
|
||||
def needs_upload(self) -> bool:
|
||||
def needs_upload(self, host: Host) -> bool:
|
||||
local_hash = self.generate_hash()
|
||||
remote_hash = self.machine.target_host.run(
|
||||
remote_hash = host.run(
|
||||
# TODO get the path to the secrets from the machine
|
||||
[
|
||||
"cat",
|
||||
@@ -224,11 +225,11 @@ class SecretStore(StoreBase):
|
||||
|
||||
(output_dir / f".{self._store_backend}_info").write_bytes(self.generate_hash())
|
||||
|
||||
def upload(self, phases: list[str]) -> None:
|
||||
def upload(self, host: Host, phases: list[str]) -> None:
|
||||
if "partitioning" in phases:
|
||||
msg = "Cannot upload partitioning secrets"
|
||||
raise NotImplementedError(msg)
|
||||
if not self.needs_upload():
|
||||
if not self.needs_upload(host):
|
||||
log.info("Secrets already uploaded")
|
||||
return
|
||||
with TemporaryDirectory(prefix="vars-upload-") as _tempdir:
|
||||
@@ -237,4 +238,4 @@ class SecretStore(StoreBase):
|
||||
upload_dir = Path(
|
||||
self.machine.deployment["password-store"]["secretLocation"]
|
||||
)
|
||||
upload(self.machine.target_host, pass_dir, upload_dir)
|
||||
upload(host, pass_dir, upload_dir)
|
||||
|
||||
@@ -23,6 +23,7 @@ from clan_cli.secrets.secrets import (
|
||||
groups_folder,
|
||||
has_secret,
|
||||
)
|
||||
from clan_cli.ssh.host import Host
|
||||
from clan_cli.ssh.upload import upload
|
||||
from clan_cli.vars._types import StoreBase
|
||||
from clan_cli.vars.generate import Generator
|
||||
@@ -220,14 +221,15 @@ class SecretStore(StoreBase):
|
||||
target_path.write_bytes(self.get(generator, file.name))
|
||||
target_path.chmod(file.mode)
|
||||
|
||||
def upload(self, phases: list[str]) -> None:
|
||||
@override
|
||||
def upload(self, host: Host, phases: list[str]) -> None:
|
||||
if "partitioning" in phases:
|
||||
msg = "Cannot upload partitioning secrets"
|
||||
raise NotImplementedError(msg)
|
||||
with TemporaryDirectory(prefix="sops-upload-") as _tempdir:
|
||||
sops_upload_dir = Path(_tempdir).resolve()
|
||||
self.populate_dir(sops_upload_dir, phases)
|
||||
upload(self.machine.target_host, sops_upload_dir, Path("/var/lib/sops-nix"))
|
||||
upload(host, sops_upload_dir, Path("/var/lib/sops-nix"))
|
||||
|
||||
def exists(self, generator: Generator, name: str) -> bool:
|
||||
secret_folder = self.secret_path(generator, name)
|
||||
@@ -260,7 +262,6 @@ class SecretStore(StoreBase):
|
||||
|
||||
return keys
|
||||
|
||||
# }
|
||||
def needs_fix(self, generator: Generator, name: str) -> tuple[bool, str | None]:
|
||||
secret_path = self.secret_path(generator, name)
|
||||
current_recipients = sops.get_recipients(secret_path)
|
||||
|
||||
@@ -4,6 +4,7 @@ from pathlib import Path
|
||||
|
||||
from clan_cli.dirs import vm_state_dir
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
from clan_cli.vars._types import StoreBase
|
||||
from clan_cli.vars.generate import Generator, Var
|
||||
|
||||
@@ -60,6 +61,6 @@ class SecretStore(StoreBase):
|
||||
shutil.rmtree(output_dir)
|
||||
shutil.copytree(self.dir, output_dir)
|
||||
|
||||
def upload(self, phases: list[str]) -> None:
|
||||
def upload(self, host: Host, phases: list[str]) -> None:
|
||||
msg = "Cannot upload secrets to VMs"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
@@ -4,17 +4,19 @@ from pathlib import Path
|
||||
|
||||
from clan_cli.completions import add_dynamic_completer, complete_machines
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def upload_secret_vars(machine: Machine, directory: Path | None = None) -> None:
|
||||
if directory:
|
||||
machine.secret_vars_store.populate_dir(
|
||||
directory, phases=["activation", "users", "services"]
|
||||
)
|
||||
else:
|
||||
machine.secret_vars_store.upload(phases=["activation", "users", "services"])
|
||||
def upload_secret_vars(machine: Machine, host: Host) -> None:
|
||||
machine.secret_vars_store.upload(host, phases=["activation", "users", "services"])
|
||||
|
||||
|
||||
def populate_secret_vars(machine: Machine, directory: Path) -> None:
|
||||
machine.secret_vars_store.populate_dir(
|
||||
directory, phases=["activation", "users", "services"]
|
||||
)
|
||||
|
||||
|
||||
def upload_command(args: argparse.Namespace) -> None:
|
||||
@@ -22,7 +24,11 @@ def upload_command(args: argparse.Namespace) -> None:
|
||||
directory = None
|
||||
if args.directory:
|
||||
directory = Path(args.directory)
|
||||
upload_secret_vars(machine, directory)
|
||||
populate_secret_vars(machine, directory)
|
||||
return
|
||||
|
||||
with machine.target_host() as host:
|
||||
upload_secret_vars(machine, host)
|
||||
|
||||
|
||||
def register_upload_parser(parser: argparse.ArgumentParser) -> None:
|
||||
|
||||
@@ -22,7 +22,7 @@ from clan_cli.nix import nix_shell
|
||||
from clan_cli.qemu.qga import QgaSession
|
||||
from clan_cli.qemu.qmp import QEMUMonitorProtocol
|
||||
from clan_cli.vars.generate import generate_vars
|
||||
from clan_cli.vars.upload import upload_secret_vars
|
||||
from clan_cli.vars.upload import populate_secret_vars
|
||||
|
||||
from .inspect import VmConfig, inspect_vm
|
||||
from .qemu import qemu_command
|
||||
@@ -84,7 +84,7 @@ def get_secrets(
|
||||
generate_vars([machine])
|
||||
|
||||
machine.secret_facts_store.upload(secrets_dir)
|
||||
upload_secret_vars(machine, secrets_dir)
|
||||
populate_secret_vars(machine, secrets_dir)
|
||||
return secrets_dir
|
||||
|
||||
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# @API.register
|
||||
# def set_admin_service(
|
||||
# base_url: str,
|
||||
# allowed_keys: dict[str, str],
|
||||
# instance_name: str = "admin",
|
||||
# extra_machines: list[str] | None = None,
|
||||
# ) -> None:
|
||||
# """
|
||||
# Set the admin service of a clan
|
||||
# Every machine is by default part of the admin service via the 'all' tag
|
||||
# """
|
||||
# if extra_machines is None:
|
||||
# extra_machines = []
|
||||
# inventory = load_inventory_eval(base_url)
|
||||
|
||||
# if not allowed_keys:
|
||||
# msg = "At least one key must be provided to ensure access"
|
||||
# raise ClanError(msg)
|
||||
|
||||
# instance = ServiceAdmin(
|
||||
# meta=ServiceMeta(name=instance_name),
|
||||
# roles=ServiceAdminRole(
|
||||
# default=ServiceAdminRoleDefault(
|
||||
# machines=extra_machines,
|
||||
# tags=["all"],
|
||||
# )
|
||||
# ),
|
||||
# config=AdminConfig(allowedKeys=allowed_keys),
|
||||
# )
|
||||
|
||||
# inventory.services.admin[instance_name] = instance
|
||||
|
||||
# save_inventory(
|
||||
# inventory,
|
||||
# base_url,
|
||||
# f"Set admin service: '{instance_name}'",
|
||||
# )
|
||||
@@ -2,12 +2,12 @@ import json
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, TypedDict
|
||||
from uuid import uuid4
|
||||
|
||||
from clan_cli.dirs import TemplateType, clan_templates
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.git import commit_file
|
||||
from clan_cli.machines.hardware import HardwareConfig, show_machine_hardware_config
|
||||
|
||||
@@ -75,7 +75,7 @@ templates: dict[str, dict[str, Callable[[dict[str, Any]], Placeholder]]] = {
|
||||
|
||||
@API.register
|
||||
def get_disk_schemas(
|
||||
base_path: Path, machine_name: str | None = None
|
||||
flake: Flake, machine_name: str | None = None
|
||||
) -> dict[str, DiskSchema]:
|
||||
"""
|
||||
Get the available disk schemas
|
||||
@@ -85,9 +85,7 @@ def get_disk_schemas(
|
||||
hw_report = {}
|
||||
|
||||
if machine_name is not None:
|
||||
hw_report_path = HardwareConfig.NIXOS_FACTER.config_path(
|
||||
base_path, machine_name
|
||||
)
|
||||
hw_report_path = HardwareConfig.NIXOS_FACTER.config_path(flake, machine_name)
|
||||
if not hw_report_path.exists():
|
||||
msg = "Hardware configuration missing"
|
||||
raise ClanError(msg)
|
||||
@@ -132,7 +130,7 @@ class MachineDiskMatter(TypedDict):
|
||||
|
||||
@API.register
|
||||
def set_machine_disk_schema(
|
||||
base_path: Path,
|
||||
flake: Flake,
|
||||
machine_name: str,
|
||||
schema_name: str,
|
||||
# Placeholders are used to fill in the disk schema
|
||||
@@ -144,8 +142,8 @@ def set_machine_disk_schema(
|
||||
Set the disk placeholders of the template
|
||||
"""
|
||||
# Assert the hw-config must exist before setting the disk
|
||||
hw_config = show_machine_hardware_config(base_path, machine_name)
|
||||
hw_config_path = hw_config.config_path(base_path, machine_name)
|
||||
hw_config = show_machine_hardware_config(flake, machine_name)
|
||||
hw_config_path = hw_config.config_path(flake, machine_name)
|
||||
|
||||
if not hw_config_path.exists():
|
||||
msg = "Hardware configuration must exist before applying disk schema"
|
||||
@@ -162,7 +160,7 @@ def set_machine_disk_schema(
|
||||
raise ClanError(msg)
|
||||
|
||||
# Check that the placeholders are valid
|
||||
disk_schema = get_disk_schemas(base_path, machine_name)[schema_name]
|
||||
disk_schema = get_disk_schemas(flake, machine_name)[schema_name]
|
||||
# check that all required placeholders are present
|
||||
for placeholder_name, schema_placeholder in disk_schema.placeholders.items():
|
||||
if schema_placeholder.required and placeholder_name not in placeholders:
|
||||
@@ -223,6 +221,6 @@ def set_machine_disk_schema(
|
||||
|
||||
commit_file(
|
||||
disko_file_path,
|
||||
base_path,
|
||||
flake.path,
|
||||
commit_message=f"Set disk schema of machine: {machine_name} to {schema_name}",
|
||||
)
|
||||
|
||||
0
pkgs/clan-cli/clan_lib/tests/__init__.py
Normal file
0
pkgs/clan-cli/clan_lib/tests/__init__.py
Normal file
3397
pkgs/clan-cli/clan_lib/tests/assets/facter.json
Normal file
3397
pkgs/clan-cli/clan_lib/tests/assets/facter.json
Normal file
File diff suppressed because it is too large
Load Diff
273
pkgs/clan-cli/clan_lib/tests/test_create.py
Normal file
273
pkgs/clan-cli/clan_lib/tests/test_create.py
Normal file
@@ -0,0 +1,273 @@
|
||||
import json
|
||||
import logging
|
||||
import shutil
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import clan_cli.clan.create
|
||||
import pytest
|
||||
from clan_cli.cmd import RunOpts, run
|
||||
from clan_cli.dirs import specific_machine_dir
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import patch_inventory_with
|
||||
from clan_cli.inventory.classes import Machine as InventoryMachine
|
||||
from clan_cli.inventory.classes import MachineDeploy
|
||||
from clan_cli.machines.create import CreateOptions as ClanCreateOptions
|
||||
from clan_cli.machines.create import create_machine
|
||||
from clan_cli.machines.list import check_machine_online
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_command
|
||||
from clan_cli.secrets.key import generate_key
|
||||
from clan_cli.secrets.sops import maybe_get_admin_public_key
|
||||
from clan_cli.secrets.users import add_user
|
||||
from clan_cli.ssh.host import Host
|
||||
from clan_cli.ssh.host_key import HostKeyCheck
|
||||
from clan_cli.vars.generate import generate_vars_for_machine, get_generators_closure
|
||||
|
||||
from clan_lib.api.disk import hw_main_disk_options, set_machine_disk_schema
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InventoryWrapper:
|
||||
services: dict[str, Any]
|
||||
|
||||
|
||||
@dataclass
|
||||
class InvSSHKeyEntry:
|
||||
username: str
|
||||
ssh_pubkey_txt: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class SSHKeyPair:
|
||||
private: Path
|
||||
public: Path
|
||||
|
||||
|
||||
def create_base_inventory(ssh_keys_pairs: list[SSHKeyPair]) -> InventoryWrapper:
|
||||
ssh_keys = [
|
||||
InvSSHKeyEntry("nixos-anywhere", ssh_keys_pairs[0].public.read_text()),
|
||||
]
|
||||
for num, ssh_key in enumerate(ssh_keys_pairs[1:]):
|
||||
ssh_keys.append(InvSSHKeyEntry(f"user_{num}", ssh_key.public.read_text()))
|
||||
|
||||
"""Create the base inventory structure."""
|
||||
inventory: dict[str, Any] = {
|
||||
"sshd": {
|
||||
"someid": {
|
||||
"roles": {
|
||||
"server": {
|
||||
"tags": ["all"],
|
||||
"config": {},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"state-version": {
|
||||
"someid": {
|
||||
"roles": {
|
||||
"default": {
|
||||
"tags": ["all"],
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"admin": {
|
||||
"someid": {
|
||||
"roles": {
|
||||
"default": {
|
||||
"tags": ["all"],
|
||||
"config": {
|
||||
"allowedKeys": {
|
||||
key.username: key.ssh_pubkey_txt for key in ssh_keys
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return InventoryWrapper(services=inventory)
|
||||
|
||||
|
||||
# TODO: We need a way to calculate the narHash of the current clan-core
|
||||
# and substitute it in a pregenerated flake.lock
|
||||
def fix_flake_inputs(clan_dir: Path, clan_core_dir: Path) -> None:
|
||||
flake_nix = clan_dir / "flake.nix"
|
||||
assert flake_nix.exists()
|
||||
clan_dir_flake = Flake(str(clan_dir))
|
||||
clan_dir_flake.invalidate_cache()
|
||||
content = flake_nix.read_text()
|
||||
content = content.replace(
|
||||
"https://git.clan.lol/clan/clan-core/archive/main.tar.gz",
|
||||
f"path://{clan_core_dir}",
|
||||
)
|
||||
flake_nix.write_text(content)
|
||||
|
||||
run(nix_command(["flake", "update"]), RunOpts(cwd=clan_dir))
|
||||
|
||||
|
||||
@pytest.mark.with_core
|
||||
@pytest.mark.skipif(sys.platform == "darwin", reason="sshd fails to start on darwin")
|
||||
def test_clan_create_api(
|
||||
temporary_home: Path, test_lib_root: Path, clan_core: Path, hosts: list[Host]
|
||||
) -> None:
|
||||
host_ip = hosts[0].host
|
||||
host_user = hosts[0].user
|
||||
vm_name = "test-clan"
|
||||
clan_core_dir_var = str(clan_core)
|
||||
priv_key_var = hosts[0].private_key
|
||||
ssh_port_var = str(hosts[0].port)
|
||||
|
||||
assert priv_key_var is not None
|
||||
private_key = Path(priv_key_var).expanduser()
|
||||
|
||||
assert host_user is not None
|
||||
|
||||
assert private_key.exists()
|
||||
assert private_key.is_file()
|
||||
|
||||
public_key = Path(f"{private_key}.pub")
|
||||
assert public_key.exists()
|
||||
assert public_key.is_file()
|
||||
|
||||
dest_clan_dir = Path("~/new-clan").expanduser()
|
||||
|
||||
# ===== CREATE CLAN ======
|
||||
# TODO: We need to generate a lock file for the templates
|
||||
clan_cli.clan.create.create_clan(
|
||||
clan_cli.clan.create.CreateOptions(
|
||||
template_name="minimal", dest=dest_clan_dir, update_clan=False
|
||||
)
|
||||
)
|
||||
assert dest_clan_dir.is_dir()
|
||||
assert (dest_clan_dir / "flake.nix").is_file()
|
||||
|
||||
clan_core_dir = Path(clan_core_dir_var)
|
||||
# TODO: We need a way to generate the lock file for the templates
|
||||
fix_flake_inputs(dest_clan_dir, clan_core_dir)
|
||||
|
||||
# ===== CREATE SOPS KEY ======
|
||||
sops_key = maybe_get_admin_public_key()
|
||||
if sops_key is None:
|
||||
# TODO: In the UI we need a view for this
|
||||
sops_key = generate_key()
|
||||
else:
|
||||
msg = "SOPS key already exists, please remove it before running this test"
|
||||
raise ClanError(msg)
|
||||
|
||||
# TODO: This needs to be exposed in the UI and we need a view for this
|
||||
add_user(
|
||||
dest_clan_dir,
|
||||
name="testuser",
|
||||
keys=[sops_key],
|
||||
force=False,
|
||||
)
|
||||
|
||||
# ===== CREATE MACHINE/s ======
|
||||
clan_dir_flake = Flake(str(dest_clan_dir))
|
||||
machines: list[Machine] = []
|
||||
|
||||
host = Host(user=host_user, host=host_ip, port=int(ssh_port_var))
|
||||
# TODO: We need to merge Host and Machine class these duplicate targetHost stuff is a nightmare
|
||||
inv_machine = InventoryMachine(
|
||||
name=vm_name, deploy=MachineDeploy(targetHost=f"{host.target}:{ssh_port_var}")
|
||||
)
|
||||
create_machine(
|
||||
ClanCreateOptions(
|
||||
clan_dir_flake, inv_machine, target_host=f"{host.target}:{ssh_port_var}"
|
||||
)
|
||||
)
|
||||
|
||||
machine = Machine(
|
||||
name=vm_name,
|
||||
flake=clan_dir_flake,
|
||||
host_key_check=HostKeyCheck.NONE,
|
||||
private_key=private_key,
|
||||
)
|
||||
machines.append(machine)
|
||||
assert len(machines) == 1
|
||||
|
||||
# Invalidate cache because of new machine creation
|
||||
clan_dir_flake.invalidate_cache()
|
||||
|
||||
result = check_machine_online(machine)
|
||||
assert result == "Online", f"Machine {machine.name} is not online"
|
||||
|
||||
ssh_keys = [
|
||||
SSHKeyPair(
|
||||
private=private_key,
|
||||
public=public_key,
|
||||
)
|
||||
]
|
||||
|
||||
# ===== CREATE BASE INVENTORY ======
|
||||
inventory = create_base_inventory(ssh_keys)
|
||||
patch_inventory_with(Flake(str(dest_clan_dir)), "services", inventory.services)
|
||||
|
||||
# Invalidate cache because of new inventory
|
||||
clan_dir_flake.invalidate_cache()
|
||||
|
||||
generators = get_generators_closure(machine.name, dest_clan_dir)
|
||||
all_prompt_values = {}
|
||||
for generator in generators:
|
||||
prompt_values = {}
|
||||
for prompt in generator.prompts:
|
||||
var_id = f"{generator.name}/{prompt.name}"
|
||||
if generator.name == "root-password" and prompt.name == "password":
|
||||
prompt_values[prompt.name] = "terraform"
|
||||
else:
|
||||
msg = f"Prompt {var_id} not handled in test, please fix it"
|
||||
raise ClanError(msg)
|
||||
all_prompt_values[generator.name] = prompt_values
|
||||
|
||||
generate_vars_for_machine(
|
||||
machine.name,
|
||||
generators=[gen.name for gen in generators],
|
||||
base_dir=dest_clan_dir,
|
||||
all_prompt_values=all_prompt_values,
|
||||
)
|
||||
|
||||
clan_dir_flake.invalidate_cache()
|
||||
|
||||
# ===== Select Disko Config ======
|
||||
facter_json = test_lib_root / "assets" / "facter.json"
|
||||
assert facter_json.exists(), f"Source facter file not found: {facter_json}"
|
||||
|
||||
dest_dir = specific_machine_dir(Flake(str(clan_dir_flake.path)), machine.name)
|
||||
# specific_machine_dir should create the directory, but ensure it exists just in case
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
dest_facter_path = dest_dir / "facter.json"
|
||||
|
||||
# Copy the file
|
||||
shutil.copy(facter_json, dest_facter_path)
|
||||
assert dest_facter_path.exists(), (
|
||||
f"Failed to copy facter file to {dest_facter_path}"
|
||||
)
|
||||
|
||||
# ===== Create Disko Config ======
|
||||
facter_path = (
|
||||
specific_machine_dir(Flake(str(clan_dir_flake.path)), machine.name)
|
||||
/ "facter.json"
|
||||
)
|
||||
with facter_path.open("r") as f:
|
||||
facter_report = json.load(f)
|
||||
|
||||
disk_devs = hw_main_disk_options(facter_report)
|
||||
|
||||
assert disk_devs is not None
|
||||
|
||||
placeholders = {"mainDisk": disk_devs[0]}
|
||||
set_machine_disk_schema(clan_dir_flake, machine.name, "single-disk", placeholders)
|
||||
clan_dir_flake.invalidate_cache()
|
||||
|
||||
with pytest.raises(ClanError) as exc_info:
|
||||
machine.build_nix("config.system.build.toplevel")
|
||||
assert "nixos-system-test-clan" in str(exc_info.value)
|
||||
22
pkgs/clan-cli/conftest.py
Normal file
22
pkgs/clan-cli/conftest.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import pytest
|
||||
from clan_cli.custom_logger import setup_logging
|
||||
|
||||
# Every fixture registered here will be available in clan_cli and clan_lib
|
||||
pytest_plugins = [
|
||||
"clan_cli.tests.temporary_dir",
|
||||
"clan_cli.tests.root",
|
||||
"clan_cli.tests.sshd",
|
||||
"clan_cli.tests.hosts",
|
||||
"clan_cli.tests.command",
|
||||
"clan_cli.tests.ports",
|
||||
]
|
||||
|
||||
|
||||
# Executed on pytest session start
|
||||
def pytest_sessionstart(session: pytest.Session) -> None:
|
||||
# This function will be called once at the beginning of the test session
|
||||
print("Starting pytest session")
|
||||
# You can access the session config, items, testsfailed, etc.
|
||||
print(f"Session config: {session.config}")
|
||||
|
||||
setup_logging(level="INFO")
|
||||
@@ -48,7 +48,7 @@ let
|
||||
&& !(stdenv.hostPlatform.system == "aarch64-linux" && attr == "age-plugin-se")
|
||||
) (lib.genAttrs deps (name: pkgs.${name}));
|
||||
testRuntimeDependenciesMap = generateRuntimeDependenciesMap allDependencies;
|
||||
testRuntimeDependencies = lib.attrValues testRuntimeDependenciesMap;
|
||||
testRuntimeDependencies = (lib.attrValues testRuntimeDependenciesMap);
|
||||
bundledRuntimeDependenciesMap = generateRuntimeDependenciesMap includedRuntimeDeps;
|
||||
bundledRuntimeDependencies = lib.attrValues bundledRuntimeDependenciesMap;
|
||||
|
||||
@@ -215,6 +215,59 @@ pythonRuntime.pkgs.buildPythonApplication {
|
||||
python -m pytest -m "not impure and with_core" ./clan_cli -n $jobs
|
||||
touch $out
|
||||
'';
|
||||
}
|
||||
// {
|
||||
# disabled on macOS until we fix all remaining issues
|
||||
clan-lib-pytest =
|
||||
runCommand "clan-lib-pytest"
|
||||
{
|
||||
nativeBuildInputs = testDependencies;
|
||||
buildInputs = [
|
||||
pkgs.bash
|
||||
pkgs.coreutils
|
||||
pkgs.nix
|
||||
|
||||
];
|
||||
closureInfo = pkgs.closureInfo {
|
||||
rootPaths = [
|
||||
templateDerivation
|
||||
pkgs.bash
|
||||
pkgs.coreutils
|
||||
pkgs.jq.dev
|
||||
pkgs.stdenv
|
||||
pkgs.stdenvNoCC
|
||||
pkgs.openssh
|
||||
pkgs.shellcheck-minimal
|
||||
pkgs.mkpasswd
|
||||
pkgs.xkcdpass
|
||||
nix-select
|
||||
];
|
||||
};
|
||||
}
|
||||
''
|
||||
set -euo pipefail
|
||||
cp -r ${source} ./src
|
||||
chmod +w -R ./src
|
||||
cd ./src
|
||||
|
||||
export CLAN_CORE_PATH=${clan-core-path}
|
||||
export NIX_STATE_DIR=$TMPDIR/nix
|
||||
export IN_NIX_SANDBOX=1
|
||||
export PYTHONWARNINGS=error
|
||||
export CLAN_TEST_STORE=$TMPDIR/store
|
||||
# required to prevent concurrent 'nix flake lock' operations
|
||||
export LOCK_NIX=$TMPDIR/nix_lock
|
||||
mkdir -p "$CLAN_TEST_STORE/nix/store"
|
||||
mkdir -p "$CLAN_TEST_STORE/nix/var/nix/gcroots"
|
||||
xargs cp --recursive --target "$CLAN_TEST_STORE/nix/store" < "$closureInfo/store-paths"
|
||||
nix-store --load-db --store "$CLAN_TEST_STORE" < "$closureInfo/registration"
|
||||
|
||||
# limit build cores to 4
|
||||
jobs="$((NIX_BUILD_CORES>4 ? 4 : NIX_BUILD_CORES))"
|
||||
|
||||
python -m pytest -m "with_core" ./clan_lib -n $jobs
|
||||
touch $out
|
||||
'';
|
||||
};
|
||||
|
||||
passthru.nixpkgs = nixpkgs';
|
||||
|
||||
@@ -29,7 +29,7 @@ clan_cli = [
|
||||
]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests", "clan_cli"]
|
||||
testpaths = ["tests", "clan_cli", "clan_lib"]
|
||||
faulthandler_timeout = 240
|
||||
log_level = "DEBUG"
|
||||
log_format = "%(message)s"
|
||||
|
||||
@@ -13,9 +13,9 @@ export async function get_inventory(client: QueryClient, base_path: string) {
|
||||
queryKey: [base_path, "inventory"],
|
||||
queryFn: () => {
|
||||
console.log("Refreshing inventory");
|
||||
return callApi("get_inventory", { base_path }) as Promise<
|
||||
ApiEnvelope<Inventory>
|
||||
>;
|
||||
return callApi("get_inventory", {
|
||||
flake: { identifier: base_path },
|
||||
}) as Promise<ApiEnvelope<Inventory>>;
|
||||
},
|
||||
revalidateIfStale: true,
|
||||
staleTime: 60 * 1000,
|
||||
|
||||
@@ -6,7 +6,7 @@ export const instance_name = (machine_name: string) =>
|
||||
|
||||
export async function get_iwd_service(base_path: string, machine_name: string) {
|
||||
const r = await callApi("get_inventory", {
|
||||
base_path,
|
||||
flake: { identifier: base_path },
|
||||
});
|
||||
if (r.status == "error") {
|
||||
return null;
|
||||
|
||||
@@ -10,7 +10,7 @@ import { Filter } from "../routes/machines";
|
||||
import { Typography } from "./Typography";
|
||||
import { Button } from "./button";
|
||||
|
||||
type MachineDetails = SuccessQuery<"list_inventory_machines">["data"][string];
|
||||
type MachineDetails = SuccessQuery<"list_machines">["data"][string];
|
||||
|
||||
interface MachineListItemProps {
|
||||
name: string;
|
||||
@@ -55,9 +55,9 @@ export const MachineListItem = (props: MachineListItemProps) => {
|
||||
flake: {
|
||||
identifier: active_clan,
|
||||
},
|
||||
override_target_host: info?.deploy.targetHost,
|
||||
},
|
||||
no_reboot: true,
|
||||
target_host: info?.deploy.targetHost,
|
||||
debug: true,
|
||||
nix_options: [],
|
||||
password: null,
|
||||
|
||||
@@ -43,7 +43,7 @@ export const tagsQuery = (uri: string | null) =>
|
||||
if (!uri) return [];
|
||||
|
||||
const response = await callApi("get_inventory", {
|
||||
base_path: uri,
|
||||
flake: { identifier: uri },
|
||||
});
|
||||
if (response.status === "error") {
|
||||
toast.error("Failed to fetch data");
|
||||
@@ -64,7 +64,7 @@ export const machinesQuery = (uri: string | null) =>
|
||||
if (!uri) return [];
|
||||
|
||||
const response = await callApi("get_inventory", {
|
||||
base_path: uri,
|
||||
flake: { identifier: uri },
|
||||
});
|
||||
if (response.status === "error") {
|
||||
toast.error("Failed to fetch data");
|
||||
|
||||
@@ -42,7 +42,7 @@ const EditClanForm = (props: EditClanFormProps) => {
|
||||
(async () => {
|
||||
await callApi("update_clan_meta", {
|
||||
options: {
|
||||
directory: props.directory,
|
||||
flake: { identifier: props.directory },
|
||||
meta: values,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -10,7 +10,9 @@ export function DiskView() {
|
||||
const currUri = activeURI();
|
||||
if (currUri) {
|
||||
// Example of calling an API
|
||||
const result = await callApi("get_inventory", { base_path: currUri });
|
||||
const result = await callApi("get_inventory", {
|
||||
flake: { identifier: currUri },
|
||||
});
|
||||
if (result.status === "error") throw new Error("Failed to fetch data");
|
||||
return result.data;
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ type MachineFormInterface = MachineData & {
|
||||
disk?: string;
|
||||
};
|
||||
|
||||
type MachineData = SuccessData<"get_inventory_machine_details">;
|
||||
type MachineData = SuccessData<"get_machine_details">;
|
||||
|
||||
const steps: Record<StepIdx, string> = {
|
||||
"1": "Hardware detection",
|
||||
@@ -115,7 +115,7 @@ const InstallMachine = (props: InstallMachineProps) => {
|
||||
if (shouldRunDisk) {
|
||||
setProgressText("Setting up disk ... (1/5)");
|
||||
const disk_response = await callApi("set_machine_disk_schema", {
|
||||
base_path: curr_uri,
|
||||
flake: { identifier: curr_uri },
|
||||
machine_name: props.name,
|
||||
placeholders: diskValues.placeholders,
|
||||
schema_name: diskValues.schema,
|
||||
@@ -142,8 +142,8 @@ const InstallMachine = (props: InstallMachineProps) => {
|
||||
flake: {
|
||||
identifier: curr_uri,
|
||||
},
|
||||
override_target_host: target,
|
||||
},
|
||||
target_host: target,
|
||||
password: "",
|
||||
},
|
||||
});
|
||||
@@ -425,7 +425,9 @@ const MachineForm = (props: MachineDetailsProps) => {
|
||||
}
|
||||
|
||||
const machine_response = await callApi("set_machine", {
|
||||
flake_url: curr_uri,
|
||||
flake: {
|
||||
identifier: curr_uri,
|
||||
},
|
||||
machine_name: props.initialData.machine.name || "My machine",
|
||||
machine: {
|
||||
...values.machine,
|
||||
@@ -641,18 +643,17 @@ const MachineForm = (props: MachineDetailsProps) => {
|
||||
export const MachineDetails = () => {
|
||||
const params = useParams();
|
||||
const genericQuery = createQuery(() => ({
|
||||
queryKey: [
|
||||
activeURI(),
|
||||
"machine",
|
||||
params.id,
|
||||
"get_inventory_machine_details",
|
||||
],
|
||||
queryKey: [activeURI(), "machine", params.id, "get_machine_details"],
|
||||
queryFn: async () => {
|
||||
const curr = activeURI();
|
||||
if (curr) {
|
||||
const result = await callApi("get_inventory_machine_details", {
|
||||
flake_url: curr,
|
||||
machine_name: params.id,
|
||||
const result = await callApi("get_machine_details", {
|
||||
machine: {
|
||||
flake: {
|
||||
identifier: curr,
|
||||
},
|
||||
name: params.id,
|
||||
},
|
||||
});
|
||||
if (result.status === "error") throw new Error("Failed to fetch data");
|
||||
return result.data;
|
||||
|
||||
@@ -37,7 +37,9 @@ export const DiskStep = (props: StepProps<DiskValues>) => {
|
||||
queryKey: [props.dir, props.machine_id, "disk_schemas"],
|
||||
queryFn: async () => {
|
||||
const result = await callApi("get_disk_schemas", {
|
||||
base_path: props.dir,
|
||||
flake: {
|
||||
identifier: props.dir,
|
||||
},
|
||||
machine_name: props.machine_id,
|
||||
});
|
||||
if (result.status === "error") throw new Error("Failed to fetch data");
|
||||
|
||||
@@ -52,7 +52,9 @@ export const HWStep = (props: StepProps<HardwareValues>) => {
|
||||
queryKey: [props.dir, props.machine_id, "hw_report"],
|
||||
queryFn: async () => {
|
||||
const result = await callApi("show_machine_hardware_config", {
|
||||
clan_dir: props.dir,
|
||||
flake: {
|
||||
identifier: props.dir,
|
||||
},
|
||||
machine_name: props.machine_id,
|
||||
});
|
||||
if (result.status === "error") throw new Error("Failed to fetch data");
|
||||
|
||||
@@ -18,7 +18,7 @@ import { Header } from "@/src/layout/header";
|
||||
import { makePersisted } from "@solid-primitives/storage";
|
||||
|
||||
type MachinesModel = Extract<
|
||||
OperationResponse<"list_inventory_machines">,
|
||||
OperationResponse<"list_machines">,
|
||||
{ status: "success" }
|
||||
>["data"];
|
||||
|
||||
@@ -32,14 +32,16 @@ export const MachineListView: Component = () => {
|
||||
const [filter, setFilter] = createSignal<Filter>({ tags: [] });
|
||||
|
||||
const inventoryQuery = createQuery<MachinesModel>(() => ({
|
||||
queryKey: [activeURI(), "list_inventory_machines"],
|
||||
queryKey: [activeURI(), "list_machines"],
|
||||
placeholderData: {},
|
||||
enabled: !!activeURI(),
|
||||
queryFn: async () => {
|
||||
const uri = activeURI();
|
||||
if (uri) {
|
||||
const response = await callApi("list_inventory_machines", {
|
||||
flake_url: uri,
|
||||
const response = await callApi("list_machines", {
|
||||
flake: {
|
||||
identifier: uri,
|
||||
},
|
||||
});
|
||||
if (response.status === "error") {
|
||||
toast.error("Failed to fetch data");
|
||||
|
||||
Reference in New Issue
Block a user