Compare commits

..

1 Commits

Author SHA1 Message Date
DavHau
588eeabf39 ci performance: add check to ensure nothing depends on the whole repo
Since this project is an ever growing monorepo, having derivations depending on the whole repo leads to bad CI performance, as the cache is busted on every commit.

-> We never want any derivations depend on the whole repo

...except: the test that tests that nothing depends on the whole repo, which is added by this commit.
2025-04-30 11:55:31 +07:00
144 changed files with 1193 additions and 5654 deletions

7
.gitignore vendored
View File

@@ -14,12 +14,8 @@ example_clan
nixos.qcow2
**/*.glade~
/docs/out
/pkgs/clan-cli/clan_cli/select
**/.local.env
# MacOS stuff
**/.DS_store
# dream2nix
.dream2nix
@@ -43,6 +39,3 @@ repo
node_modules
dist
.webui
# TODO: remove after bug in select is fixed
select

View File

@@ -1,2 +0,0 @@
nixosModules/clanCore/vars/.* @lopter
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @lopter

View File

@@ -26,7 +26,6 @@ clanLib.test.makeTestClan {
roles.admin.machines = [ "admin1" ];
};
};
instances."test" = {
module.name = "new-service";
roles.peer.machines.peer1 = { };
@@ -41,23 +40,15 @@ clanLib.test.makeTestClan {
perMachine = {
nixosModule = {
# This should be generated by:
# nix run .#generate-test-vars -- checks/dummy-inventory-test dummy-inventory-test
# ./pkgs/scripts/update-vars.py
clan.core.vars.generators.new-service = {
files.not-a-secret = {
files.hello = {
secret = false;
deploy = true;
};
files.a-secret = {
secret = true;
deploy = true;
owner = "nobody";
group = "users";
mode = "0644";
};
script = ''
# This is a dummy script that does nothing
echo -n "not-a-secret" > $out/not-a-secret
echo -n "a-secret" > $out/a-secret
echo "This is a dummy script" > $out/hello
'';
};
};
@@ -78,15 +69,7 @@ clanLib.test.makeTestClan {
print(peer1.succeed("systemctl status dummy-service"))
# peer1 should have the 'hello' file
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
# Check that the file is owned by 'nobody'
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
# Check that the file is in the 'users' group
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
# Check that the file is in the '0644' mode
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.hello.path}")
'';
}
);

View File

@@ -1,6 +1,6 @@
[
{
"publickey": "age12yt078p9ewxy2sh0a36nxdpgglv8wqqftmj4dkj9rgy5fuyn4p0q5nje9m",
"publickey": "age1hd2exjq88h7538y6mvjvexx3u5gp6a03yfn5nj32h2667yyksyaqcuk5qs",
"type": "age"
}
]

View File

@@ -1,6 +1,6 @@
[
{
"publickey": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
"publickey": "age19urkt89q45a2wk6a4yaramzufjtnw6nq2snls0v7hmf7tqf73axsfx50tk",
"type": "age"
}
]

View File

@@ -1,15 +1,15 @@
{
"data": "ENC[AES256_GCM,data:GPpsUhSzWPtTP8EUNKsobFXjYqDldhkkIH6hBk11RsDLAGWdhVrwcISGbhsWpYhvAdPKA84DB6Zqyh9lL2bLM9//ybC1kzY20BQ=,iv:NrxMLdedT2FCkUAD00SwsAHchIsxWvqe7BQekWuJcxw=,tag:pMDXcMyHnLF2t3Qhb1KolA==,type:str]",
"data": "ENC[AES256_GCM,data:hhuFgZcPqht0h3tKxGtheS4GlrVDo4TxH0a9lxgPYj2i12QUmE04rB07A+hu4Z8WNWLYvdM5069mEOZYm3lSeTzBHQPxYZRuVj0=,iv:sA1srRFQqsMlJTAjFcb09tI/Jg2WjOVJL5NZkPwiLoU=,tag:6xXo9FZpmAJw6hCBsWzf8Q==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzb2tWb1ExKzdmUTRzaGVj\nK3cyYTBHZTJwVjM1SzUvbHFiMnVhY05iKzFZCnJTSE1VSVdpcUFLSEJuaE1CZzJD\nWjZxYzN2cUltdThNMVRKU3FIb20vUXMKLS0tIFlHQXRIdnMybDZFUVEzWlQrc1dw\nbUxhZURXblhHd0pka0JIK1FTZEVqdUEKI/rfxQRBc+xGRelhswkJQ9GcZs6lzfgy\nuCxS5JI9npdPLQ/131F3b21+sP5YWqks41uZG+vslM1zQ+BlENNhDw==\n-----END AGE ENCRYPTED FILE-----\n"
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBGaGVHeTgrN3dJQ2VITFBM\neWVzbDhjb0pwNUhBUjdUc0p5OTVta1dvSno4ClJxeUc4Z0hiaFRkVlJ1YTA4Lyta\neWdwV005WGYvMUNRVG1qOVdicTk0NUkKLS0tIFQvaDNFS1JMSFlHRXlhc3lsZm03\nYVhDaHNsam5wN1VqdzA3WTZwM1JwV2sKZk/SiZJgjllADdfHLSWuQcU4+LttDpt/\nqqDUATEuqYaALljC/y3COT+grTM2bwGjj6fsfsfiO/EL9iwzD3+7oA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-05-04T12:44:13Z",
"mac": "ENC[AES256_GCM,data:fWxLHXBWolHVxv6Q7utcy6OVLV13ziswrIYyNKiwy1vsU8i7xvvuGO1HlnE+q43D2WuHR53liKq1UHuf1JMrWzTwZ0PYe+CVugtoEtbR2qu3rK/jAkOyMyhmmHzmf6Rp4ZMCzKgZeC/X2bDKY/z0firHAvjWydEyogutHpvtznM=,iv:OQI3FfkLneqbdztAXVQB3UkHwDPK+0hWu5hZ9m8Oczg=,tag:em6GfS2QHsXs391QKPxfmA==,type:str]",
"lastmodified": "2025-04-09T15:10:16Z",
"mac": "ENC[AES256_GCM,data:xuXj4833G6nhvcRo2ekDxz8G5phltmU8h1GgGofH9WndzrqLKeRSqm/n03IHRW0f4F68XxnyAkfvokVh6vW3LRQAFkqIlXz5U4+zFNcaVaPobS5gHTgxsCoTUoalWPvHWtXd50hUVXeAt8rPfTfeveVGja8bOERk8mvwUPxb6h4=,iv:yP1usA9m8tKl6Z/UK9PaVMJlZlF5qpY4EiM4+ByVlik=,tag:8DgoIhLstp3MRki90VfEvw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
"version": "3.10.1"
}
}

View File

@@ -1,15 +1,15 @@
{
"data": "ENC[AES256_GCM,data:W3cOkUYL5/YulW2pEISyTlMaA/t7/WBE7BoCdFlqrqgaCL7tG4IV2HgjiPWzIVMs0zvDSaghdEvAIoB4wOf470d1nSWs0/E8SDk=,iv:wXXaZIw3sPY8L/wxsu7+C5v+d3RQRuwxZRP4YLkS8K4=,tag:HeK4okj7O7XDA9JDz2KULw==,type:str]",
"data": "ENC[AES256_GCM,data:rwPhbayGf6mE1E9NCN+LuL7VfWWOfhoJW6H2tNSoyebtyTpM3GO2jWca1+N7hI0juhNkUk+rIsYQYbCa/5DZQiV0/2Jgu4US1XY=,iv:B5mcaQsDjb6BacxGB4Kk88/qLCpVOjQNRvGN+fgUiEo=,tag:Uz0A8kAF5NzFetbv9yHIjQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxRC83b3dtSVpXcGovNnVs\nTzFka2J2MEFhYkF1ajVrdjMrNUtPWGRObjM4Cm5zSUR5OGw0T0FaL3BaWmR6L29W\nU2syMFIyMUhFRUZpWFpCT28vWko2ZU0KLS0tIFpHK3BjU1V1L0FrMGtwTGFuU3Mz\nRkV5VjI2Vndod202bUR3RWQwNXpmVzQKNk8/y7M62wTIIKqY4r3ZRk5aUCRUfine\n1LUSHMKa2bRe+hR7nS7AF4BGXp03h2UPY0FP5+U5q8XuIj1jfMX8kg==\n-----END AGE ENCRYPTED FILE-----\n"
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBWY0hKQ1dnV0tMYytDMCtj\nTDV4Zk5NeVN0bCtqaWRQV3d4M0VlcGVZMkhZCm02dHZyOGVlYzJ5Z3FlUWNXMVQ0\nb2ZrTXZQRzRNdzFDeWZCVGhlTS9rMm8KLS0tIEJkY1QwOENRYWw3cjIwd3I0bzdz\nOEtQNm1saE5wNWt2UUVnYlN4NWtGdFkKmWHU5ttZoQ3NZu/zkX5VxfC2sMpSOyod\neb7LRhFqPfo5N1XphJcCqr5QUoZOfnH0xFhZ2lxWUS3ItiRpU4VDwg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-05-04T12:44:16Z",
"mac": "ENC[AES256_GCM,data:yTkQeFvKrN1+5FP+yInsaRWSAG+ZGG0uWF3+gVRvzJTFxab8kT2XkAMc+4D7SKgcjsmwBBb77GNoAKaKByhZ92UaCfZ2X66i7ZmYUwLM1NVVmm+xiwwjsh7PJXlZO/70anTzd1evtlZse0jEmRnV5Y0F0M6YqXmuwU+qGUJU2F8=,iv:sy6ozhXonWVruaQfa7pdEoV5GkNZR/UbbINKAPbgWeg=,tag:VMruQ1KExmlMR7TsGNgMlg==,type:str]",
"lastmodified": "2025-04-09T15:10:41Z",
"mac": "ENC[AES256_GCM,data:pab0G2GPjgs59sbiZ8XIV5SdRtq5NPU0yq18FcqiMV8noAL94fyVAY7fb+9HILQWQsEjcykgk9mA2MQ0KpK/XG8+tDQKcBH+F+2aQnw5GJevXmfi7KLTU0P224SNo7EnKlfFruB/+NZ0WBtkbbg1OzekrbplchpSI6BxWz/jASE=,iv:TCj9FCxgfMF2+PJejr67zgGnF+CFS+YeJiejnHbf7j0=,tag:s7r9SqxeqpAkncohYvIQ2Q==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
"version": "3.10.1"
}
}

View File

@@ -1,19 +1,19 @@
{
"data": "ENC[AES256_GCM,data:T8edCvw=,iv:7/G5xt5fv38I9uFzk7WMIr9xQdz/6lFxqOC+18HBg8Q=,tag:F39Cxbgmzml+lZLsZ59Kmg==,type:str]",
"data": "ENC[AES256_GCM,data:bxM9aYMK,iv:SMNYtk9FSyZ1PIfEzayTKKdCnZWdhcyUEiTwFUNb988=,tag:qJYW4+VQyhF1tGPQPTKlOQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age12yt078p9ewxy2sh0a36nxdpgglv8wqqftmj4dkj9rgy5fuyn4p0q5nje9m",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPNUhiYkZWK3dPMHNiRTVM\nRHNvaHFsOFp1c0UxQitwVG0zY01MNDZRV1E4CjEybENoTVIzN29vQ3FtUTRSYmFU\nNXIzQllVSllXRGN2M1B6WXJLdHZSajgKLS0tIDllZ0ZmZUcxMHhDQUpUOEdWbmkv\neUQweHArYTdFSmNteVpuQ3BKdnh0Y0UKs8Hm3D+rXRRfpUVSZM3zYjs6b9z8g10D\nGTkvreUMim4CS22pjdQ3eNA9TGeDXfWXE7XzwXLCb+wVcf7KwbDmvg==\n-----END AGE ENCRYPTED FILE-----\n"
"recipient": "age1hd2exjq88h7538y6mvjvexx3u5gp6a03yfn5nj32h2667yyksyaqcuk5qs",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAvZDZYYXdpcXVqRFRnQ2Jx\nTFhFWEJTR290cHZhTXZadFFvcHM4MHVIN3lFCmJhOEZrL3g4TFBZVllxdDFZakJn\nR3NxdXo0eE8vTDh3QlhWOFpVZ0lNUHcKLS0tIEE4dkpCalNzaXJ0Qks3VHJSUzZF\nb2N3NGdjNHJnSUN6bW8welZ1VDdJakEKGKZ7nn1p11IyJB6DMxu2HJMvZ+0+5WpE\nPLWh2NlGJO3XrrL4Fw7xetwbqE+QUZPNl/JbEbu4KLIUGLjqk9JDhQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKSDhpT3cvck9PenZYVEZH\ndFQreVRBdG93L1dBUGlvYjFWcDlHWUJsZUVBCm9DMTJ4UytiYzlEVHNWdUcwS1ds\nT0dhbzAzNDdmbDBCU0dvL2xNeHpXcGsKLS0tIFArbmpsbzU3WnpJdUt1MGN0L1d0\nV1JkTDJYWUxsbmhTQVNOeVRaSUhTODQKk9Vph2eldS5nwuvVX0SCsxEm4B+sO76Z\ndIjJ3OQxzoZmXMaOOuKHC5U0Y75Qn7eXC43w5KHsl2CMIUYsBGJOZw==\n-----END AGE ENCRYPTED FILE-----\n"
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHckJCQVFyb21aT1R0d2Rr\nMWxNMHVqcGxabHBmS0RibW9sN0gyZDI1b1dFCnRWUk5LSWdxV3c4RWVZdUtEN1Fv\nRk4xVmwwT2xrdWVERkJXUVVlVXJjTVUKLS0tIC9ERG9KMGxTNEsrbzFHUGRiVUlm\nRi9qakxoc1FOVVV1TkUrckwxRUVnajQKE8ms/np2NMswden3xkjdC8cXccASLOoN\nu+EaEk69UvBvnOg9VBjyPAraIKgNrTc4WWwz+DOBj1pCwVbu9XxUlA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-05-04T12:44:14Z",
"mac": "ENC[AES256_GCM,data:6fKrS1eLLUWlHkQpxLFXBRk6f2wa5ADLMViVvYXXGU24ayl9UuNSKrCRHp9cbzhqhti3HdwyNt6TM+2X6qhiiAQanKEB2PF7JRYX74NfNKil9BEDjt5AqqtpSgVv5l7Ku/uSHaPkd2sDmzHsy5Q4bSGxJQokStk1kidrwle+mbc=,iv:I/Aad82L/TCxStM8d8IZICUrwdjRbGx2fuGWqexr21o=,tag:BfgRbGUxhPZzK2fLik1kxA==,type:str]",
"lastmodified": "2025-04-09T15:10:30Z",
"mac": "ENC[AES256_GCM,data:cIwWctUbAFI8TRMxYWy5xqlKDVLMqBIxVv4LInnLqi3AauL0rJ3Z7AxK/wb2dCQM07E1N7YaORNqgUpFC1xo0hObAA8mrPaToPotKDkjua0zuyTUNS1COoraYjZpI/LKwmik/qtk399LMhiC7aHs+IliT9Dd41B8LSMBXwdMldY=,iv:sZ+//BrYH5Ay2JJAGs7K+WfO2ASK82syDlilQjGmgFs=,tag:nY+Af9eQRLwkiHZe85dQ9A==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
"version": "3.10.1"
}
}

View File

@@ -1,19 +1,19 @@
{
"data": "ENC[AES256_GCM,data:vp0yW0Gt,iv:FO2cy+UpEl5aRay/LUGu//c82QiVxuKuGSaVh0rGJvc=,tag:vf2RAOPpcRW0HwxHoGy17A==,type:str]",
"data": "ENC[AES256_GCM,data:ImlGIKxE,iv:UUWxjLNRKJCD2WHNpw8lfvCc8rnXPCqc2pni1ODckjE=,tag:HFCqiv31E9bShIIaAEjF0A==,type:str]",
"sops": {
"age": [
{
"recipient": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjaFVNMEd2YUxpSm5XVVRi\nY2ZUc3NTOStJUFNMWWVPQTgxZ2tCK1QrMW1ZCjYwMlA4dkIzSlc0TGtvZjcyK3Bi\nM3pob2JOOFUyeVJ6M2JpaTRCZlc1R0kKLS0tIDJMb1dFcVRWckhwYWNCQng0RlFO\nTkw3OGt4dkFIZVY5aVEzZE5mMzJSM0EKUv8bUqg48L2FfYVUVlpXvyZvPye699of\nG6PcjLh1ZMbNCfnsCzr+P8Vdk/F4J/ifxL66lRGfu2xOLxwciwQ+5Q==\n-----END AGE ENCRYPTED FILE-----\n"
"recipient": "age19urkt89q45a2wk6a4yaramzufjtnw6nq2snls0v7hmf7tqf73axsfx50tk",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBpTEROZjh6NjBhSlJSc1Av\nSHhjdkhwVUd3VzBZemhQb3dhMlJXalBmZlFjCkZPYkhZZGVOVTNjUWdFU0s4cWFn\nL2NXbkRCdUlMdElnK2lGbG5iV0w1cHMKLS0tIFREcmxDdHlUNVBFVGRVZSt0c0E5\nbnpHaW1Vb3R3ZFFnZVMxY3djSjJmOU0KIwqCSQf5S9oA59BXu7yC/V6yqvCh88pa\nYgmNyBjulytPh1aAfOuNWIGdIxBpcEf+gFjz3EiJY9Kft3fTmhp2bw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnZ2dDbVhoQngxM3lTSmZF\nUTAwS1lCTGhEMU1GVXpFUzlIUFdqZy9LajF3Ck9mdVpBRjlyVUNhZXZIUFZjUzF1\nNlhFN28vNmwzcUVkNmlzUnpkWjJuZE0KLS0tIHpXVHVlNk9vU1ZPTGRrYStWbmRO\nbDM4U2o1SlEwYWtqOXBqd3BFUTAvMHcKkI8UVd0v+x+ELZ5CoGq9DzlA6DnVNU2r\nrV9wLfbFd7RHxS0/TYZh5tmU42nO3iMYA9FqERQXCtZgXS9KvfqHwQ==\n-----END AGE ENCRYPTED FILE-----\n"
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArN3R4TThibjdYbE9TMDE1\naUhuNDlscExjaktIR2VmTk1OMWtVM0NpTUJZClJUNEcwVDlibExWQk84TTNEWFhp\nMjYyZStHc1N0ZTh1S3VTVk45WGxlWWMKLS0tIHFab25LY1R1d1l6NE5XbHJvQ3lj\nNGsxUldFVHQ5RVJERDlGbi9NY29hNWsKENBTcAS/R/dTGRYdaWv5Mc/YG4bkah5w\nb421ZMQF+r4CYnzUqnwivTG8TMRMqJLavfkutE6ZUfJbbLufrTk5Lw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-05-04T12:44:18Z",
"mac": "ENC[AES256_GCM,data:1ZZ+ZI1JsHmxTov1bRijfol3kTkyheg2o3ivLsMHRhCmScsUry97hQJchF78+y2Izt7avaQEHYn6pVbYt/0rLrSYD7Ru7ITVxXoYHOiN5Qb98masUzpibZjrdyg5nO+LW5/Hmmwsc3yn/+o3IH1AUYpsxlJRdnHHCmoSOFaiFFM=,iv:OQlgmpOTw4ljujNzqwQ5/0Mz8pQpCSUtqRvj3FJAxDs=,tag:foZvdeW7gK9ZVKkWqnlxGA==,type:str]",
"lastmodified": "2025-04-09T15:11:04Z",
"mac": "ENC[AES256_GCM,data:JdJzocQZWVprOmZ4Ni04k1tpD1TpFcK5neKy3+0/c3+uPBwjwaMayISKRaa/ILUXlalg60oTqxB4fUFoYVm8KGQVhDwPhO/T1hyYVQqidonrcYfJfCYg00mVSREV/AWqXb7RTnaEBfrdnRJvaAQF9g2qDXGVgzp3eACdlItclv4=,iv:nOw1jQjIWHWwU3SiKpuQgMKXyu8MZYI+zI9UYYd9fCI=,tag:ewUkemIPm/5PkmuUD0EcAQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
"version": "3.10.1"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/peer1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:prFl0EJy8bM=,iv:zITWxf+6Ebk0iB5vhhd7SBQa1HFrIJXm8xpSM+D9I0M=,tag:NZCRMCs1SzNKLBu/KUDKMQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0S0RZRWxaZVZvTUhjdWVL\naU9WZmtEcm1qa2JsRmdvdmZmNENMaWFEVUFRCmdoVnRXSGlpRlFjNmVVbDJ5VnFT\nMnVJUlVnM3lxNmZCRTdoRVJ4NW1oYWcKLS0tIFFNbXBFUk1RWnlUTW1SeG1vYzlM\nVVpEclFVOE9PWWQxVkZ0eEgwWndoRWcKDAOHe+FIxqGsc6LhxMy164qjwG6t2Ei2\nP0FSs+bcKMDpudxeuxCjnDm/VoLxOWeuqkB+9K2vSm2W/c/fHTSbrA==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB2VU5jOEpwYUtDVEVFcVpU\nQkExTVZ3ejZHcGo5TG8zdUQwNktoV09WdUZvCmQ0dE1TOWRFbTlxdVd4WWRxd3VF\nQUNTTkNNT3NKYjQ5dEJDY0xVZ3pZVUUKLS0tIDFjajRZNFJZUTdNeS8yN05FMFZU\ncEtjRjhRbGE0MnRLdk10NkFLMkxqencKGzJ66dHluIghH04RV/FccfEQP07yqnfb\n25Hi0XIVJfXBwje4UEyszrWTxPPwVXdQDQmoNKf76Qy2jYqJ56uksw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-05-04T12:44:20Z",
"mac": "ENC[AES256_GCM,data:FIkilsni5kOdNlVwDuLsQ/zExypHRWdqIBQDNWMLTwe8OrsNPkX+KYutUvt9GaSoGv4iDULaMRoizO/OZUNfc2d8XYSdj0cxOG1Joov4GPUcC/UGyNuQneAejZBKolvlnidKZArofnuK9g+lOTANEUtEXUTnx8L+VahqPZayQas=,iv:NAo6sT3L8OOB3wv1pjr3RY2FwXgVmZ4N0F4BEX4YPUY=,tag:zHwmXygyvkdpASZCodQT9Q==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -0,0 +1 @@
This is a dummy script

View File

@@ -21,7 +21,6 @@ in
pkgs,
lib,
self',
system,
...
}:
{
@@ -56,17 +55,11 @@ in
syncthing = import ./syncthing nixosTestArgs;
};
packagesToBuild = lib.removeAttrs self'.packages [
# exclude the check that checks that nothing depends on the repo root
# We might want to include this later once everything is fixed
"dont-depend-on-repo-root"
];
flakeOutputs =
lib.mapAttrs' (
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") packagesToBuild
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
self'.legacyPackages.homeConfigurations or { }
@@ -84,10 +77,7 @@ in
schema =
(self.clanLib.inventory.evalClanService {
modules = [ m ];
prefix = [
"checks"
system
];
key = "checks";
}).config.result.api.schema;
in
schema

View File

@@ -8,6 +8,7 @@ let
{ modulesPath, pkgs, ... }:
let
dependencies = [
self
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file

View File

@@ -44,11 +44,7 @@
{
environment.etc."install-closure".source = "${closureInfo}/store-paths";
system.extraDependencies = dependencies;
virtualisation.memorySize = 2048;
virtualisation.useNixStoreImage = true;
virtualisation.writableStore = true;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli-full ];
};
};

View File

@@ -8,8 +8,5 @@
(modulesPath + "/profiles/minimal.nix")
];
virtualisation.useNixStoreImage = true;
virtualisation.writableStore = true;
clan.core.enableRecommendedDefaults = false;
}

View File

@@ -27,7 +27,6 @@
clan-core.checks.${system}
[
"dont-depend-on-repo-root"
"package-dont-depend-on-repo-root"
"package-clan-core-flake"
];
checksOutPaths = map (x: "''${x}") (builtins.attrValues checks);
@@ -48,7 +47,7 @@
'';
in
lib.optionalAttrs (system == "x86_64-linux") {
packages.dont-depend-on-repo-root =
checks.dont-depend-on-repo-root =
pkgs.runCommand
# append repo hash to this tests name to ensure it gets invalidated on each chain
# This is needed because this test is an FOD (due to networking) and would get cached indefinitely.

View File

@@ -210,18 +210,14 @@ in
data_dir = Path('data')
data_dir.mkdir(mode=0o770, exist_ok=True)
# Create a temporary config file
# with appropriate permissions
tmp_config_path = data_dir / '.config.json'
tmp_config_path.touch(mode=0o660, exist_ok=False)
# Write the config with secrets back
with open(tmp_config_path, 'w') as f:
config_path = data_dir / 'config.json'
with open(config_path, 'w') as f:
f.write(json.dumps(config, indent=4))
# Move config into place
config_path = data_dir / 'config.json'
tmp_config_path.rename(config_path)
# Set file permissions to read and write
# only by the user and group
config_path.chmod(0o660)
# Set file permissions to read
# and write only by the user and group

View File

@@ -7,12 +7,8 @@ features = [ "inventory" ]
After the system was installed/deployed the following command can be used to display the root-password:
```bash
clan vars get [machine_name] root-password/root-password
clan secrets get {machine_name}-password
```
See also: [Vars](../../manual/vars-backend.md)
To regenerate the password run:
```
clan vars generate --regenerate [machine_name] --generator root-password
```
See also: [Facts / Secrets](../../getting-started/secrets.md)

View File

@@ -13,12 +13,9 @@ If setting the option prompt to true, the user will be prompted to type in their
After the system was installed/deployed the following command can be used to display the user-password:
```bash
clan vars get [machine_name] root-password/root-password
clan secrets get {machine_name}-user-password
```
See also: [Vars](../../manual/vars-backend.md)
See also: [Facts / Secrets](../../getting-started/secrets.md)
To regenerate the password run:
```
clan vars generate --regenerate [machine_name] --generator user-password
```
To regenerate the password, delete the password files in the clan directory and redeploy the machine.

View File

@@ -4,15 +4,7 @@
_class = "clan.service";
manifest.name = "clan-core/hello-word";
roles.peer = {
interface =
{ lib, ... }:
{
options.foo = lib.mkOption {
type = lib.types.str;
};
};
};
roles.peer = { };
perMachine =
{ machine, ... }:

View File

@@ -50,7 +50,6 @@ in
hello-service = import ./tests/vm/default.nix {
inherit module;
inherit self inputs pkgs;
# clanLib is exposed from inputs.clan-core
clanLib = self.clanLib;
};
};

View File

@@ -19,7 +19,7 @@ We might not be sure whether all of those will exist but the architecture should
## Decision
This leads to the conclusion that we should do `library` centric development.
With the current `clan` python code being a library that can be imported to create various tools ontop of it.
With the current `clan` python code beeing a library that can be imported to create various tools ontop of it.
All **CLI** or **UI** related parts should be moved out of the main library.
*Note: The next person who wants implement any new frontend should do this first. Currently it looks like the TUI is the next one.*

View File

@@ -1,47 +0,0 @@
# ADR Numbering process
## Status
Proposed after some conversation between @lassulus, @Mic92, & @lopter.
## Context
It can be useful to refer to ADRs by their numbers, rather than their full title. To that end, short and sequential numbers are useful.
The issue is that an ADR number is effectively assigned when the ADR is merged, before being merged its number is provisional. Because multiple ADRs can be written at the same time, you end-up with multiple provisional ADRs with the same number, for example this is the third ADR-3:
1. ADR-3-clan-compat: see [#3212];
2. ADR-3-fetching-nix-from-python: see [#3452];
3. ADR-3-numbering-process: this ADR.
This situation makes it impossible to refer to an ADR by its number, and why I (@lopter) went with the arbitrary number 7 in [#3196].
We could solve this problem by using the PR number as the ADR number (@lassulus). The issue is that PR numbers are getting big in clan-core which does not make them easy to remember, or use in conversation and code (@lopter).
Another approach would be to move the ADRs in a different repository, this would reset the counter back to 1, and make it straightforward to keep ADR and PR numbers in sync (@lopter). The issue then is that ADR are not in context with their changes which makes them more difficult to review (@Mic92).
## Decision
A third approach would be to:
1. Commit ADRs before they are approved, so that the next ADR number gets assigned;
1. Open a PR for the proposed ADR;
1. Update the ADR file committed in step 1, so that its markdown contents point to the PR that tracks it.
## Consequences
### ADR have unique and memorable numbers trough their entire life cycle
This makes it easier to refer to them in conversation or in code.
### You need to have commit access to get an ADR number assigned
This makes it more difficult for someone external to the project to contribute an ADR.
### Creating a new ADR requires multiple commits
Maybe a script or CI flow could help with that if it becomes painful.
[#3212]: https://git.clan.lol/clan/clan-core/pulls/3212/
[#3452]: https://git.clan.lol/clan/clan-core/pulls/3452/
[#3196]: https://git.clan.lol/clan/clan-core/pulls/3196/

View File

@@ -26,7 +26,8 @@ writeShellScriptBin "deploy-docs" ''
trap "rm -rf $tmpdir" EXIT
if [ -n "''${SSH_HOMEPAGE_KEY-}" ]; then
( umask 0177 && echo "$SSH_HOMEPAGE_KEY" > "$tmpdir/ssh_key" )
echo "$SSH_HOMEPAGE_KEY" > "$tmpdir/ssh_key"
chmod 600 "$tmpdir/ssh_key"
sshExtraArgs="-i $tmpdir/ssh_key"
else
sshExtraArgs=

View File

@@ -1,6 +1,6 @@
# :material-api: Overview
This section of the site provides an overview of available options and commands within the Clan Framework.
This section of the site provides an **automatically extracted** overview of the available options and commands within the Clan Framework.
---

56
flake.lock generated
View File

@@ -16,15 +16,17 @@
]
},
"locked": {
"lastModified": 1746459034,
"narHash": "sha256-VHHc8EFPu2uk8mf4ItTHwxgrQxFixNHkclPQMXZfYig=",
"rev": "d63db1621463918966e8e0ec2eb7ddbe8aae332e",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/d63db1621463918966e8e0ec2eb7ddbe8aae332e.tar.gz"
"lastModified": 1745889637,
"narHash": "sha256-+BW9rppchFYIiJldD+fZA3MS2OtPNrb8l27SC3GyoSk=",
"ref": "refs/heads/main",
"rev": "11b5673d9c7290a6b96c2b6c6c5be600304f310f",
"revCount": 415,
"type": "git",
"url": "https://git.clan.lol/clan/data-mesher"
},
"original": {
"type": "tarball",
"url": "https://git.clan.lol/clan/data-mesher/archive/main.tar.gz"
"type": "git",
"url": "https://git.clan.lol/clan/data-mesher"
}
},
"disko": {
@@ -34,11 +36,11 @@
]
},
"locked": {
"lastModified": 1746411114,
"narHash": "sha256-mLlkVX1kKbAa/Ns5u26wDYw4YW4ziMFM21fhtRmfirU=",
"lastModified": 1745812220,
"narHash": "sha256-hotBG0EJ9VmAHJYF0yhWuTVZpENHvwcJ2SxvIPrXm+g=",
"owner": "nix-community",
"repo": "disko",
"rev": "b5d1320ebc2f34dbea4655f95167f55e2130cdb3",
"rev": "d0c543d740fad42fe2c035b43c9d41127e073c78",
"type": "github"
},
"original": {
@@ -74,11 +76,11 @@
]
},
"locked": {
"lastModified": 1746254942,
"narHash": "sha256-Y062AuRx6l+TJNX8wxZcT59SSLsqD9EedAY0mqgTtQE=",
"lastModified": 1745816321,
"narHash": "sha256-Gyh/fkCDqVNGM0BWvk+4UAS17w2UI6iwnbQQCmc1TDI=",
"owner": "nix-darwin",
"repo": "nix-darwin",
"rev": "760a11c87009155afa0140d55c40e7c336d62d7a",
"rev": "4515dacafb0ccd42e5395aacc49fd58a43027e01",
"type": "github"
},
"original": {
@@ -91,13 +93,15 @@
"locked": {
"lastModified": 1745005516,
"narHash": "sha256-IVaoOGDIvAa/8I0sdiiZuKptDldrkDWUNf/+ezIRhyc=",
"ref": "refs/heads/main",
"rev": "69d8bf596194c5c35a4e90dd02c52aa530caddf8",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/69d8bf596194c5c35a4e90dd02c52aa530caddf8.tar.gz"
"revCount": 40,
"type": "git",
"url": "https://git.clan.lol/clan/nix-select"
},
"original": {
"type": "tarball",
"url": "https://git.clan.lol/clan/nix-select/archive/main.tar.gz"
"type": "git",
"url": "https://git.clan.lol/clan/nix-select"
}
},
"nixos-facter-modules": {
@@ -118,10 +122,10 @@
"nixpkgs": {
"locked": {
"lastModified": 315532800,
"narHash": "sha256-EbVl0wIdDYZWrxpQoxPlXfliaR4KHA9xP5dVjG1CZxI=",
"rev": "ed30f8aba41605e3ab46421e3dcb4510ec560ff8",
"narHash": "sha256-+Elxpf3FLkgKfh81xrEjVolpJEn8+fKWqEJ3ZXbAbS4=",
"rev": "29335f23bea5e34228349ea739f31ee79e267b88",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre794180.ed30f8aba416/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre791229.29335f23bea5/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
@@ -149,11 +153,11 @@
]
},
"locked": {
"lastModified": 1746485181,
"narHash": "sha256-PxrrSFLaC7YuItShxmYbMgSuFFuwxBB+qsl9BZUnRvg=",
"lastModified": 1745310711,
"narHash": "sha256-ePyTpKEJTgX0gvgNQWd7tQYQ3glIkbqcW778RpHlqgA=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "e93ee1d900ad264d65e9701a5c6f895683433386",
"rev": "5e3e92b16d6fdf9923425a8d4df7496b2434f39c",
"type": "github"
},
"original": {
@@ -184,11 +188,11 @@
]
},
"locked": {
"lastModified": 1746216483,
"narHash": "sha256-4h3s1L/kKqt3gMDcVfN8/4v2jqHrgLIe4qok4ApH5x4=",
"lastModified": 1745929750,
"narHash": "sha256-k5ELLpTwRP/OElcLpNaFWLNf8GRDq4/eHBmFy06gGko=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "29ec5026372e0dec56f890e50dbe4f45930320fd",
"rev": "82bf32e541b30080d94e46af13d46da0708609ea",
"type": "github"
},
"original": {

View File

@@ -23,10 +23,10 @@
treefmt-nix.url = "github:numtide/treefmt-nix";
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
nix-select.url = "https://git.clan.lol/clan/nix-select/archive/main.tar.gz";
nix-select.url = "git+https://git.clan.lol/clan/nix-select";
data-mesher = {
url = "https://git.clan.lol/clan/data-mesher/archive/main.tar.gz";
url = "git+https://git.clan.lol/clan/data-mesher";
inputs = {
flake-parts.follows = "flake-parts";
nixpkgs.follows = "nixpkgs";
@@ -40,6 +40,7 @@
inputs@{
flake-parts,
nixpkgs,
self,
systems,
...
}:

View File

@@ -10,11 +10,6 @@ let
in
{
options = {
_prefix = lib.mkOption {
type = types.listOf types.str;
internal = true;
default = [ ];
};
self = lib.mkOption {
type = types.raw;
default = self;
@@ -165,6 +160,7 @@ in
# Those options are interfaced by the CLI
# We don't specify the type here, for better performance.
inventory = lib.mkOption { type = lib.types.raw; };
inventoryValuesPrios = lib.mkOption { type = lib.types.raw; };
# all exported clan templates from this clan
templates = lib.mkOption { type = lib.types.raw; };
# all exported clan modules from this clan
@@ -173,7 +169,6 @@ in
inventoryFile = lib.mkOption { type = lib.types.raw; };
# The machine 'imports' generated by the inventory per machine
inventoryClass = lib.mkOption { type = lib.types.raw; };
evalServiceSchema = lib.mkOption { };
# clan-core's modules
clanModules = lib.mkOption { type = lib.types.raw; };
source = lib.mkOption { type = lib.types.raw; };

View File

@@ -44,8 +44,6 @@ let
buildInventory {
inherit inventory directory;
flakeInputs = config.self.inputs;
prefix = config._prefix ++ [ "inventoryClass" ];
localModuleSet = config.self.clan.modules;
}
);
@@ -206,21 +204,20 @@ in
inherit inventoryClass;
# Endpoint that can be called to get a service schema
evalServiceSchema = clan-core.clanLib.evalServiceSchema config.self;
# TODO: unify this interface
# We should have only clan.modules. (consistent with clan.templates)
inherit (clan-core) clanModules clanLib;
modules = config.modules;
inherit inventoryFile;
inventoryValuesPrios =
# Temporary workaround
builtins.removeAttrs (clan-core.clanLib.introspection.getPrios { options = inventory.options; })
# tags are freeformType which is not supported yet.
[ "tags" ];
templates = config.templates;
inventory = config.inventory;
# TODO: Remove this in about a month
# It is only here for backwards compatibility for people with older CLI versions
inventoryValuesPrios = inventoryClass.introspection;
meta = config.inventory.meta;
secrets = config.secrets;

View File

@@ -15,27 +15,10 @@ lib.fix (clanLib: {
*/
callLib = file: args: import file ({ inherit lib clanLib; } // args);
# ------------------------------------
buildClan = clanLib.buildClanModule.buildClanWith {
clan-core = self;
inherit nixpkgs nix-darwin;
};
evalServiceSchema =
self:
{
moduleSpec,
flakeInputs ? self.inputs,
localModuleSet ? self.clan.modules,
}:
let
resolvedModule = clanLib.inventory.resolveModule {
inherit moduleSpec flakeInputs localModuleSet;
};
in
(clanLib.inventory.evalClanService {
modules = [ resolvedModule ];
prefix = [ ];
}).config.result.api.schema;
# ------------------------------------
# ClanLib functions
evalClan = clanLib.callLib ./inventory/eval-clan-modules { };

View File

@@ -12,37 +12,27 @@ let
inventory,
directory,
flakeInputs,
prefix ? [ ],
localModuleSet ? { },
}:
(lib.evalModules {
# TODO: remove clanLib from specialArgs
specialArgs = {
inherit clanLib;
};
modules = [
./builder
(lib.modules.importApply ./service-list-from-inputs.nix {
inherit flakeInputs clanLib localModuleSet;
})
{ inherit directory inventory; }
(
# config.distributedServices.allMachines.${name} or [ ];
{ config, ... }:
{
distributedServices = clanLib.inventory.mapInstances {
inherit (config) inventory;
inherit flakeInputs;
prefix = prefix ++ [ "distributedServices" ];
};
machines = lib.mapAttrs (_machineName: v: {
machineImports = v;
}) config.distributedServices.allMachines;
}
)
(lib.modules.importApply ./inventory-introspection.nix { inherit clanLib; })
];
}).config;
in

View File

@@ -1,17 +0,0 @@
{ clanLib }:
{
config,
options,
lib,
...
}:
{
options.introspection = lib.mkOption {
readOnly = true;
# TODO: use options.inventory instead of the evaluate config attribute
default =
builtins.removeAttrs (clanLib.introspection.getPrios { options = config.inventory.options; })
# tags are freeformType which is not supported yet.
[ "tags" ];
};
}

View File

@@ -1,43 +0,0 @@
{
flakeInputs,
clanLib,
localModuleSet,
}:
{ lib, config, ... }:
let
inspectModule =
inputName: moduleName: module:
let
eval = clanLib.inventory.evalClanService {
modules = [ module ];
prefix = [
inputName
"clan"
"modules"
moduleName
];
};
in
{
manifest = eval.config.manifest;
roles = lib.mapAttrs (_n: _v: { }) eval.config.roles;
};
in
{
options.modulesPerSource = lib.mkOption {
# { sourceName :: { moduleName :: {} }}
default =
let
inputsWithModules = lib.filterAttrs (_inputName: v: v ? clan.modules) flakeInputs;
in
lib.mapAttrs (
inputName: v: lib.mapAttrs (inspectModule inputName) v.clan.modules
) inputsWithModules;
};
options.localModules = lib.mkOption {
default = lib.mapAttrs (inspectModule "self") localModuleSet;
};
}

View File

@@ -3,7 +3,7 @@ let
services = clanLib.callLib ./distributed-service/inventory-adapter.nix { };
in
{
inherit (services) evalClanService mapInstances resolveModule;
inherit (services) evalClanService mapInstances;
inherit (import ./build-inventory { inherit lib clanLib; }) buildInventory;
interface = ./build-inventory/interface.nix;
# Returns the list of machine names

View File

@@ -1,7 +1,7 @@
# This module enables itself if
# manifest.features.API = true
# It converts the roles.interface to a json-schema
{ clanLib, prefix }:
{ clanLib, attrName }:
let
converter = clanLib.jsonschema {
includeDefaults = true;
@@ -45,7 +45,7 @@ in
To see the evaluation problem run
nix eval .#${lib.concatStringsSep "." prefix}.config.result.api.schema.${roleName}
nix eval .#clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.${attrName}.config.result.api.schema.${roleName}
'';
assertion = (builtins.tryEval (lib.deepSeq config.result.api.schema.${roleName} true)).success;
};

View File

@@ -16,72 +16,27 @@
}:
let
evalClanService =
{ modules, prefix }:
{ modules, key }:
(lib.evalModules {
class = "clan.service";
modules = [
./service-module.nix
# feature modules
(lib.modules.importApply ./api-feature.nix {
inherit clanLib prefix;
inherit clanLib;
attrName = key;
})
] ++ modules;
});
resolveModule =
{
moduleSpec,
flakeInputs,
localModuleSet,
}:
let
# TODO:
resolvedModuleSet =
# If the module.name is self then take the modules defined in the flake
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
if moduleSpec.input == null then
localModuleSet
else
let
input =
flakeInputs.${moduleSpec.input} or (throw ''
Flake doesn't provide input with name '${moduleSpec.input}'
Choose one of the following inputs:
- ${
builtins.concatStringsSep "\n- " (
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flakeInputs)
)
}
To import a local module from 'inventory.modules' remove the 'input' attribute from the module definition
Remove the following line from the module definition:
...
- module.input = "${moduleSpec.input}"
'');
clanAttrs =
input.clan
or (throw "It seems the flake input ${moduleSpec.input} doesn't export any clan resources");
in
clanAttrs.modules;
resolvedModule =
resolvedModuleSet.${moduleSpec.name}
or (throw "flake doesn't provide clan-module with name ${moduleSpec.name}");
in
resolvedModule;
in
{
inherit evalClanService resolveModule;
inherit evalClanService;
mapInstances =
{
# This is used to resolve the module imports from 'flake.inputs'
flakeInputs,
# The clan inventory
inventory,
prefix ? [ ],
}:
let
# machineHasTag = machineName: tagName: lib.elem tagName inventory.machines.${machineName}.tags;
@@ -90,11 +45,42 @@ in
importedModuleWithInstances = lib.mapAttrs (
instanceName: instance:
let
resolvedModule = resolveModule {
moduleSpec = instance.module;
localModuleSet = inventory.modules;
inherit flakeInputs;
};
# TODO:
resolvedModuleSet =
# If the module.name is self then take the modules defined in the flake
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
if instance.module.input == null then
inventory.modules
else
let
input =
flakeInputs.${instance.module.input} or (throw ''
Flake doesn't provide input with name '${instance.module.input}'
Choose one of the following inputs:
- ${
builtins.concatStringsSep "\n- " (
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flakeInputs)
)
}
To import a local module from 'inventory.modules' remove the 'input' attribute from the module definition
Remove the following line from the module definition:
...
- module.input = "${instance.module.input}"
'');
clanAttrs =
input.clan
or (throw "It seems the flake input ${instance.module.input} doesn't export any clan resources");
in
clanAttrs.modules;
resolvedModule =
resolvedModuleSet.${instance.module.name}
or (throw "flake doesn't provide clan-module with name ${instance.module.name}");
# Every instance includes machines via roles
# :: { client :: ... }
@@ -152,7 +138,7 @@ in
importedModulesEvaluated = lib.mapAttrs (
module_ident: instances:
evalClanService {
prefix = prefix ++ [ module_ident ];
key = module_ident;
modules =
[
# Import the resolved module.

View File

@@ -255,9 +255,7 @@ in
{
options.API = mkOption {
type = types.bool;
# This is read only, because we don't support turning it off yet
readOnly = true;
default = true;
default = false;
description = ''
Enables automatic API schema conversion for the interface of this module.
'';

View File

@@ -92,7 +92,7 @@ in
lib.lazyDerivation {
# lazyDerivation improves performance when only passthru items and/or meta are used.
derivation = hostPkgs.stdenv.mkDerivation {
name = "container-test-run-${config.name}";
name = "vm-test-run-${config.name}";
requiredSystemFeatures = [ "uid-range" ];

View File

@@ -187,22 +187,6 @@ class Machine:
if line_pattern.match(line)
)
def nsenter_command(self, command: str) -> list[str]:
return [
"nsenter",
"--target",
str(self.container_pid),
"--mount",
"--uts",
"--ipc",
"--net",
"--pid",
"--cgroup",
"/bin/sh",
"-c",
command,
]
def execute(
self,
command: str,
@@ -247,7 +231,20 @@ class Machine:
command = f"set -eo pipefail; source /etc/profile; set -u; {command}"
proc = subprocess.run(
self.nsenter_command(command),
[
"nsenter",
"--target",
str(self.container_pid),
"--mount",
"--uts",
"--ipc",
"--net",
"--pid",
"--cgroup",
"/bin/sh",
"-c",
command,
],
timeout=timeout,
check=False,
stdout=subprocess.PIPE,
@@ -468,9 +465,6 @@ class Driver:
print(f"Starting {machine.name}")
machine.start()
for machine in self.machines:
print(" ".join(machine.nsenter_command("bash")))
def test_symbols(self) -> dict[str, Any]:
general_symbols = {
"start_all": self.start_all,

View File

@@ -22,9 +22,6 @@ in
pkgs,
self,
useContainers ? true,
# Displayed for better error messages, otherwise the placeholder
system ? "<system>",
attrName ? "<check_name>",
...
}:
let
@@ -38,7 +35,7 @@ in
{
imports = [
nixosTest
] ++ lib.optionals useContainers [ ./container-test-driver/driver-module.nix ];
] ++ lib.optionals (useContainers) [ ./container-test-driver/driver-module.nix ];
options = {
clanSettings = mkOption {
default = { };
@@ -63,15 +60,6 @@ in
};
modules = [
clanLib.buildClanModule.flakePartsModule
{
_prefix = [
"checks"
system
attrName
"config"
"clan"
];
}
];
};
};

View File

@@ -39,35 +39,9 @@ in
type = submodule { imports = [ ./interface.nix ]; };
};
config = {
# check all that all non-secret files have no owner/group/mode set
warnings = lib.foldl' (
warnings: generator:
warnings
++ lib.foldl' (
warnings: file:
warnings
++
lib.optional
(
!file.secret
&& (
file.owner != "root"
|| file.group != (if _class == "darwin" then "wheel" else "root")
|| file.mode != "0400"
)
)
''
The config.clan.core.vars.generators.${generator.name}.files.${file.name} is not secret, but has non-default owner/group/mode set.
This doesn't work because the file will be added to the nix store
''
) [ ] (lib.attrValues generator.files)
) [ ] (lib.attrValues config.clan.core.vars.generators);
system.clan.deployment.data = {
vars = config.clan.core.vars._serialized;
inherit (config.clan.core.networking) targetHost buildHost;
inherit (config.clan.core.deployment) requireExplicitUpdate;
};
config.system.clan.deployment.data = {
vars = config.clan.core.vars._serialized;
inherit (config.clan.core.networking) targetHost buildHost;
inherit (config.clan.core.deployment) requireExplicitUpdate;
};
}

View File

@@ -39,7 +39,7 @@ in
internal = true;
description = ''
JSON serialization of the generators.
This is read from the python client to generate the specified resources.
This is read from the python client to generate the specified ressources.
'';
default = {
# TODO: We don't support per-machine choice of backends
@@ -241,30 +241,12 @@ in
type = bool;
default = true;
};
flakePath = lib.mkOption {
description = ''
The path to the file containing the content of the generated value.
This will be set automatically
'';
type = nullOr str;
default = null;
};
path = lib.mkOption {
description = ''
The path to the file containing the content of the generated value.
This will be set automatically
'';
type = str;
defaultText = ''
builtins.path {
name = "$${generator.config._module.args.name}_$${file.config._module.args.name}";
path = file.config.flakePath;
}
'';
default = builtins.path {
name = "${generator.config._module.args.name}_${file.config._module.args.name}";
path = file.config.flakePath;
};
};
neededFor = lib.mkOption {
description = ''

View File

@@ -11,7 +11,7 @@ in
config.clan.core.vars.settings = mkIf (config.clan.core.vars.settings.publicStore == "in_repo") {
publicModule = "clan_cli.vars.public_modules.in_repo";
fileModule = file: {
flakePath = mkIf (file.config.secret == false) (
path = mkIf (file.config.secret == false) (
if file.config.share then
(
config.clan.core.settings.directory
@@ -25,9 +25,9 @@ in
);
value = mkIf (file.config.secret == false) (
# dynamically adjust priority to allow overriding with mkDefault in case the file is not found
if (pathExists file.config.flakePath) then
if (pathExists file.config.path) then
# if the file is found it should have normal priority
readFile file.config.flakePath
readFile file.config.path
else
# if the file is not found, we want to downgrade the priority, to allow overriding via mkDefault
mkOptionDefault (

View File

@@ -49,10 +49,7 @@ in
mode
neededForUsers
;
sopsFile = builtins.path {
name = "${secret.generator}_${secret.name}";
path = secretPath secret;
};
sopsFile = secretPath secret;
format = "binary";
};
}) (builtins.filter (x: builtins.pathExists (secretPath x)) vars)

View File

@@ -29,8 +29,6 @@ mkShell {
export GIT_ROOT=$(git rev-parse --show-toplevel)
export PKG_ROOT=$GIT_ROOT/pkgs/clan-app
export CLAN_CORE_PATH="$GIT_ROOT"
# Add current package to PYTHONPATH
export PYTHONPATH="$PKG_ROOT''${PYTHONPATH:+:$PYTHONPATH:}"

View File

@@ -6,13 +6,13 @@ from pathlib import Path
from types import ModuleType
# These imports are unused, but necessary for @API.register to run once.
from clan_lib.api import directory, disk, iwd, mdns_discovery, modules
from clan_lib.api import admin, directory, disk, iwd, mdns_discovery, modules
from .arg_actions import AppendOptionAction
from .clan import show, update
# API endpoints that are not used in the cli.
__all__ = ["directory", "disk", "iwd", "mdns_discovery", "modules", "update"]
__all__ = ["admin", "directory", "disk", "iwd", "mdns_discovery", "modules", "update"]
from . import (
backups,

View File

@@ -19,23 +19,21 @@ def create_backup(machine: Machine, provider: str | None = None) -> None:
if not backup_scripts["providers"]:
msg = "No providers specified"
raise ClanError(msg)
with machine.target_host() as host:
for provider in backup_scripts["providers"]:
proc = host.run(
[backup_scripts["providers"][provider]["create"]],
)
if proc.returncode != 0:
msg = "failed to start backup"
raise ClanError(msg)
print("successfully started backup")
for provider in backup_scripts["providers"]:
proc = machine.target_host.run(
[backup_scripts["providers"][provider]["create"]],
)
if proc.returncode != 0:
msg = "failed to start backup"
raise ClanError(msg)
print("successfully started backup")
else:
if provider not in backup_scripts["providers"]:
msg = f"provider {provider} not found"
raise ClanError(msg)
with machine.target_host() as host:
proc = host.run(
[backup_scripts["providers"][provider]["create"]],
)
proc = machine.target_host.run(
[backup_scripts["providers"][provider]["create"]],
)
if proc.returncode != 0:
msg = "failed to start backup"
raise ClanError(msg)

View File

@@ -10,7 +10,6 @@ from clan_cli.completions import (
)
from clan_cli.errors import ClanError
from clan_cli.machines.machines import Machine
from clan_cli.ssh.host import Host
@dataclass
@@ -19,11 +18,11 @@ class Backup:
job_name: str | None = None
def list_provider(machine: Machine, host: Host, provider: str) -> list[Backup]:
def list_provider(machine: Machine, provider: str) -> list[Backup]:
results = []
backup_metadata = machine.eval_nix("config.clan.core.backups")
list_command = backup_metadata["providers"][provider]["list"]
proc = host.run(
proc = machine.target_host.run(
[list_command],
RunOpts(log=Log.NONE, check=False),
)
@@ -49,13 +48,12 @@ def list_provider(machine: Machine, host: Host, provider: str) -> list[Backup]:
def list_backups(machine: Machine, provider: str | None = None) -> list[Backup]:
backup_metadata = machine.eval_nix("config.clan.core.backups")
results = []
with machine.target_host() as host:
if provider is None:
for _provider in backup_metadata["providers"]:
results += list_provider(machine, host, _provider)
if provider is None:
for _provider in backup_metadata["providers"]:
results += list_provider(machine, _provider)
else:
results += list_provider(machine, host, provider)
else:
results += list_provider(machine, provider)
return results

View File

@@ -8,12 +8,9 @@ from clan_cli.completions import (
)
from clan_cli.errors import ClanError
from clan_cli.machines.machines import Machine
from clan_cli.ssh.host import Host
def restore_service(
machine: Machine, host: Host, name: str, provider: str, service: str
) -> None:
def restore_service(machine: Machine, name: str, provider: str, service: str) -> None:
backup_metadata = machine.eval_nix("config.clan.core.backups")
backup_folders = machine.eval_nix("config.clan.core.state")
@@ -28,7 +25,7 @@ def restore_service(
env["FOLDERS"] = ":".join(set(folders))
if pre_restore := backup_folders[service]["preRestoreCommand"]:
proc = host.run(
proc = machine.target_host.run(
[pre_restore],
RunOpts(log=Log.STDERR),
extra_env=env,
@@ -37,7 +34,7 @@ def restore_service(
msg = f"failed to run preRestoreCommand: {pre_restore}, error was: {proc.stdout}"
raise ClanError(msg)
proc = host.run(
proc = machine.target_host.run(
[backup_metadata["providers"][provider]["restore"]],
RunOpts(log=Log.STDERR),
extra_env=env,
@@ -47,7 +44,7 @@ def restore_service(
raise ClanError(msg)
if post_restore := backup_folders[service]["postRestoreCommand"]:
proc = host.run(
proc = machine.target_host.run(
[post_restore],
RunOpts(log=Log.STDERR),
extra_env=env,
@@ -64,19 +61,18 @@ def restore_backup(
service: str | None = None,
) -> None:
errors = []
with machine.target_host() as host:
if service is None:
backup_folders = machine.eval_nix("config.clan.core.state")
for _service in backup_folders:
try:
restore_service(machine, host, name, provider, _service)
except ClanError as e:
errors.append(f"{_service}: {e}")
else:
if service is None:
backup_folders = machine.eval_nix("config.clan.core.state")
for _service in backup_folders:
try:
restore_service(machine, host, name, provider, service)
restore_service(machine, name, provider, _service)
except ClanError as e:
errors.append(f"{service}: {e}")
errors.append(f"{_service}: {e}")
else:
try:
restore_service(machine, name, provider, service)
except ClanError as e:
errors.append(f"{service}: {e}")
if errors:
raise ClanError(
"Restore failed for the following services:\n" + "\n".join(errors)

View File

@@ -1,8 +1,4 @@
import os
import shutil
from pathlib import Path
from clan_cli.cmd import Log, RunOpts, run
from clan_cli.cmd import run
from clan_cli.nix import nix_shell
_works: bool | None = None
@@ -16,11 +12,6 @@ def bubblewrap_works() -> bool:
def _bubblewrap_works() -> bool:
real_bash_path = Path("bash")
if os.environ.get("IN_NIX_SANDBOX"):
bash_executable_path = Path(str(shutil.which("bash")))
real_bash_path = bash_executable_path.resolve()
# fmt: off
cmd = nix_shell(
[
@@ -39,10 +30,13 @@ def _bubblewrap_works() -> bool:
"--gid", "1000",
"--",
# do nothing, just test if bash executes
str(real_bash_path), "-c", ":"
"bash", "-c", ":"
],
)
# fmt: on
res = run(cmd, RunOpts(log=Log.BOTH, check=False))
return res.returncode == 0
try:
run(cmd)
except Exception:
return False
else:
return True

View File

@@ -107,7 +107,7 @@ def create_clan(opts: CreateOptions) -> CreateClanResponse:
response.flake_update = flake_update
if opts.initial:
init_inventory(Flake(str(opts.dest)), init=opts.initial)
init_inventory(str(opts.dest), init=opts.initial)
return response

View File

@@ -15,10 +15,7 @@ log = logging.getLogger(__name__)
@API.register
def show_clan_meta(uri: str) -> Meta:
if uri.startswith("/") and not Path(uri).exists():
msg = f"Path {uri} does not exist"
raise ClanError(msg, description="clan directory does not exist")
def show_clan_meta(uri: str | Path) -> Meta:
cmd = nix_eval(
[
f"{uri}#clanInternals.inventory.meta",

View File

@@ -2,21 +2,20 @@ from dataclasses import dataclass
from clan_lib.api import API
from clan_cli.flake import Flake
from clan_cli.inventory import Inventory, Meta, load_inventory_json, set_inventory
@dataclass
class UpdateOptions:
flake: Flake
directory: str
meta: Meta
@API.register
def update_clan_meta(options: UpdateOptions) -> Inventory:
inventory = load_inventory_json(options.flake)
inventory = load_inventory_json(options.directory)
inventory["meta"] = options.meta
set_inventory(inventory, options.flake, "Update clan metadata")
set_inventory(inventory, options.directory, "Update clan metadata")
return inventory

View File

@@ -244,12 +244,12 @@ class TimeTable:
# Print in default color
print(f"Took {v} for command: '{k}'")
def add(self, cmd: str, duration: float) -> None:
def add(self, cmd: str, time: float) -> None:
with self.lock:
if cmd in self.table:
self.table[cmd] += duration
self.table[cmd] += time
else:
self.table[cmd] = duration
self.table[cmd] = time
TIME_TABLE = None
@@ -259,7 +259,7 @@ if os.environ.get("CLAN_CLI_PERF"):
@dataclass
class RunOpts:
input: IO[bytes] | bytes | None = None
input: bytes | None = None
stdout: IO[bytes] | None = None
stderr: IO[bytes] | None = None
env: dict[str, str] | None = None
@@ -329,7 +329,7 @@ def run(
if options.requires_root_perm:
cmd = cmd_with_root(cmd, options.graphical_perm)
if options.input and isinstance(options.input, bytes):
if options.input:
if any(not ch.isprintable() for ch in options.input.decode("ascii", "replace")):
filtered_input = "<<binary_blob>>"
else:
@@ -344,7 +344,7 @@ def run(
start = timeit.default_timer()
with ExitStack() as stack:
stdin = subprocess.PIPE if isinstance(options.input, bytes) else options.input
stdin = subprocess.PIPE if options.input is not None else None
process = stack.enter_context(
subprocess.Popen(
cmd,
@@ -364,18 +364,13 @@ def run(
else:
stack.enter_context(terminate_process_group(process))
if isinstance(options.input, bytes):
input_bytes = options.input
else:
input_bytes = None
stdout_buf, stderr_buf = handle_io(
process,
options.log,
prefix=options.prefix,
msg_color=options.msg_color,
timeout=options.timeout,
input_bytes=input_bytes,
input_bytes=options.input,
stdout=options.stdout,
stderr=options.stderr,
)
@@ -423,3 +418,6 @@ def run_no_stdout(
cmd,
opts,
)
# type: ignore

View File

@@ -1,3 +1,9 @@
import pytest
from clan_cli.custom_logger import setup_logging
# collect_ignore = ["./nixpkgs"]
pytest_plugins = [
"clan_cli.tests.temporary_dir",
"clan_cli.tests.root",
@@ -13,3 +19,13 @@ pytest_plugins = [
"clan_cli.tests.stdout",
"clan_cli.tests.nix_config",
]
# Executed on pytest session start
def pytest_sessionstart(session: pytest.Session) -> None:
# This function will be called once at the beginning of the test session
print("Starting pytest session")
# You can access the session config, items, testsfailed, etc.
print(f"Session config: {session.config}")
setup_logging(level="INFO")

View File

@@ -4,13 +4,9 @@ import sys
import urllib
from enum import Enum
from pathlib import Path
from typing import TYPE_CHECKING
from .errors import ClanError
if TYPE_CHECKING:
from clan_cli.flake import Flake
log = logging.getLogger(__name__)
@@ -135,17 +131,12 @@ def vm_state_dir(flake_url: str, vm_name: str) -> Path:
return user_data_dir() / "clan" / "vmstate" / clan_key / vm_name
def machines_dir(flake: "Flake") -> Path:
if flake.is_local:
return flake.path / "machines"
store_path = flake.store_path
assert store_path is not None, "Invalid flake object. Doesn't have a store path"
return Path(store_path) / "machines"
def machines_dir(flake_dir: Path) -> Path:
return flake_dir / "machines"
def specific_machine_dir(flake: "Flake", machine: str) -> Path:
return machines_dir(flake) / machine
def specific_machine_dir(flake_dir: Path, machine: str) -> Path:
return machines_dir(flake_dir) / machine
def module_root() -> Path:

View File

@@ -4,7 +4,6 @@ from abc import ABC, abstractmethod
from pathlib import Path
import clan_cli.machines.machines as machines
from clan_cli.ssh.host import Host
class SecretStoreBase(ABC):
@@ -26,7 +25,7 @@ class SecretStoreBase(ABC):
def exists(self, service: str, name: str) -> bool:
pass
def needs_upload(self, host: Host) -> bool:
def needs_upload(self) -> bool:
return True
@abstractmethod

View File

@@ -6,7 +6,6 @@ from typing import override
from clan_cli.cmd import Log, RunOpts
from clan_cli.machines.machines import Machine
from clan_cli.nix import nix_shell
from clan_cli.ssh.host import Host
from . import SecretStoreBase
@@ -94,9 +93,9 @@ class SecretStore(SecretStoreBase):
return b"\n".join(hashes)
@override
def needs_upload(self, host: Host) -> bool:
def needs_upload(self) -> bool:
local_hash = self.generate_hash()
remote_hash = host.run(
remote_hash = self.machine.target_host.run(
# TODO get the path to the secrets from the machine
["cat", f"{self.machine.secrets_upload_directory}/.pass_info"],
RunOpts(log=Log.STDERR, check=False),

View File

@@ -1,12 +1,10 @@
from pathlib import Path
from typing import override
from clan_cli.machines.machines import Machine
from clan_cli.secrets.folders import sops_secrets_folder
from clan_cli.secrets.machines import add_machine, has_machine
from clan_cli.secrets.secrets import decrypt_secret, encrypt_secret, has_secret
from clan_cli.secrets.sops import generate_private_key
from clan_cli.ssh.host import Host
from . import SecretStoreBase
@@ -60,10 +58,13 @@ class SecretStore(SecretStoreBase):
sops_secrets_folder(self.machine.flake_dir) / f"{self.machine.name}-{name}",
)
@override
def needs_upload(self, host: Host) -> bool:
return False
# We rely now on the vars backend to upload the age key
def upload(self, output_dir: Path) -> None:
pass
key_name = f"{self.machine.name}-age.key"
if not has_secret(sops_secrets_folder(self.machine.flake_dir) / key_name):
# skip uploading the secret, not managed by us
return
key = decrypt_secret(
self.machine.flake_dir,
sops_secrets_folder(self.machine.flake_dir) / key_name,
)
(output_dir / "key.txt").write_text(key)

View File

@@ -1,6 +1,5 @@
import shutil
from pathlib import Path
from typing import override
from clan_cli.dirs import vm_state_dir
from clan_cli.machines.machines import Machine
@@ -29,7 +28,6 @@ class SecretStore(SecretStoreBase):
def exists(self, service: str, name: str) -> bool:
return (self.dir / service / name).exists()
@override
def upload(self, output_dir: Path) -> None:
if output_dir.exists():
shutil.rmtree(output_dir)

View File

@@ -5,14 +5,13 @@ from tempfile import TemporaryDirectory
from clan_cli.completions import add_dynamic_completer, complete_machines
from clan_cli.machines.machines import Machine
from clan_cli.ssh.host import Host
from clan_cli.ssh.upload import upload
log = logging.getLogger(__name__)
def upload_secrets(machine: Machine, host: Host) -> None:
if not machine.secret_facts_store.needs_upload(host):
def upload_secrets(machine: Machine) -> None:
if not machine.secret_facts_store.needs_upload():
machine.info("Secrets already uploaded")
return
@@ -20,13 +19,13 @@ def upload_secrets(machine: Machine, host: Host) -> None:
local_secret_dir = Path(_tempdir).resolve()
machine.secret_facts_store.upload(local_secret_dir)
remote_secret_dir = Path(machine.secrets_upload_directory)
upload(host, local_secret_dir, remote_secret_dir)
upload(machine.target_host, local_secret_dir, remote_secret_dir)
def upload_command(args: argparse.Namespace) -> None:
machine = Machine(name=args.machine, flake=args.flake)
with machine.target_host() as host:
upload_secrets(machine, host)
upload_secrets(machine)
def register_upload_parser(parser: argparse.ArgumentParser) -> None:

View File

@@ -14,7 +14,6 @@ from clan_cli.nix import (
nix_build,
nix_command,
nix_config,
nix_eval,
nix_metadata,
nix_test_store,
)
@@ -345,6 +344,9 @@ class FlakeCacheEntry:
def is_cached(self, selectors: list[Selector]) -> bool:
selector: Selector
if selectors == []:
return self.fetched_all
selector = selectors[0]
# for store paths we have to check if they still exist, otherwise they have to be rebuild and are thus not cached
if isinstance(self.value, str) and self.value.startswith("/nix/store/"):
@@ -354,10 +356,6 @@ class FlakeCacheEntry:
if isinstance(self.value, str | float | int | None):
return True
if selectors == []:
return self.fetched_all
selector = selectors[0]
# we just fetch all subkeys, so we need to check of we inserted all keys at this level before
if selector.type == SelectorType.ALL:
assert isinstance(self.value, dict)
@@ -460,7 +458,7 @@ class FlakeCacheEntry:
result = []
for index in keys_to_select:
result.append(self.value[index].select(selectors[1:]))
return result
return result
# otherwise return a dict
return {k: self.value[k].select(selectors[1:]) for k in keys_to_select}
@@ -620,9 +618,11 @@ class Flake:
except Exception as e:
log.warning(f"Failed load eval cache: {e}. Continue without cache")
def prefetch(self) -> None:
def invalidate_cache(self) -> None:
"""
Loads the flake into the store and populates self.store_path and self.hash such that the flake can evaluate locally and offline
Invalidate the cache and reload it.
This method is used to refresh the cache by reloading it from the flake.
"""
cmd = [
"flake",
@@ -641,15 +641,6 @@ class Flake:
flake_metadata = json.loads(flake_prefetch.stdout)
self.store_path = flake_metadata["storePath"]
self.hash = flake_metadata["hash"]
self.flake_metadata = flake_metadata
def invalidate_cache(self) -> None:
"""
Invalidate the cache and reload it.
This method is used to refresh the cache by reloading it from the flake.
"""
self.prefetch()
self._cache = FlakeCache()
assert self.hash is not None
@@ -659,17 +650,17 @@ class Flake:
)
self.load_cache()
if "original" not in self.flake_metadata:
self.flake_metadata = nix_metadata(self.identifier)
if "original" not in flake_metadata:
flake_metadata = nix_metadata(self.identifier)
if self.flake_metadata["original"].get("url", "").startswith("file:"):
if flake_metadata["original"].get("url", "").startswith("file:"):
self._is_local = True
path = self.flake_metadata["original"]["url"].removeprefix("file://")
path = flake_metadata["original"]["url"].removeprefix("file://")
path = path.removeprefix("file:")
self._path = Path(path)
elif self.flake_metadata["original"].get("path"):
elif flake_metadata["original"].get("path"):
self._is_local = True
self._path = Path(self.flake_metadata["original"]["path"])
self._path = Path(flake_metadata["original"]["path"])
else:
self._is_local = False
assert self.store_path is not None
@@ -763,56 +754,6 @@ class Flake:
if self.flake_cache_path:
self._cache.save_to_file(self.flake_cache_path)
def uncached_nix_eval_with_args(
self,
attr_path: str,
f_args: dict[str, str],
nix_options: list[str] | None = None,
) -> str:
"""
Calls a nix function with the provided arguments 'f_args'
The argument must be an attribute set.
Args:
attr_path (str): The attribute path to the nix function
f_args (dict[str, nix_expr]): A python dictionary mapping from the name of the argument to a raw nix expression.
Example
flake.uncached_nix_eval_with_args(
"clanInternals.evalServiceSchema",
{ "moduleSpec": "{ name = \"hello-world\"; input = null; }" }
)
> '{ ...JSONSchema... }'
"""
# Always prefetch, so we don't get any stale information
self.prefetch()
if nix_options is None:
nix_options = []
arg_expr = "{"
for arg_name, arg_value in f_args.items():
arg_expr += f" {arg_name} = {arg_value}; "
arg_expr += "}"
nix_code = f"""
let
flake = builtins.getFlake "path:{self.store_path}?narHash={self.hash}";
in
flake.{attr_path} {arg_expr}
"""
if tmp_store := nix_test_store():
nix_options += ["--store", str(tmp_store)]
nix_options.append("--impure")
output = run(
nix_eval(["--expr", nix_code, *nix_options]), RunOpts(log=Log.NONE)
).stdout.strip()
return output
def precache(
self,
selectors: list[str],

View File

@@ -14,7 +14,7 @@ from clan_cli.facts.generate import generate_facts
from clan_cli.machines.machines import Machine
from clan_cli.nix import nix_shell
from clan_cli.vars.generate import generate_vars
from clan_cli.vars.upload import populate_secret_vars
from clan_cli.vars.upload import upload_secret_vars
from .automount import pause_automounting
from .list import list_possible_keymaps, list_possible_languages
@@ -35,7 +35,6 @@ class Disk:
device: str
# TODO: unify this with machine install
@API.register
def flash_machine(
machine: Machine,
@@ -108,7 +107,7 @@ def flash_machine(
local_dir.mkdir(parents=True)
machine.secret_facts_store.upload(local_dir)
populate_secret_vars(machine, local_dir)
upload_secret_vars(machine, local_dir)
disko_install = []
if os.geteuid() != 0:

View File

@@ -23,7 +23,6 @@ from clan_lib.api import API, dataclass_to_dict, from_dict
from clan_cli.cmd import run_no_stdout
from clan_cli.errors import ClanCmdError, ClanError
from clan_cli.flake import Flake
from clan_cli.git import commit_file
from clan_cli.nix import nix_eval
@@ -50,11 +49,11 @@ __all__ = [
]
def get_inventory_path(flake: Flake) -> Path:
def get_inventory_path(flake_dir: str | Path) -> Path:
"""
Get the path to the inventory file in the flake directory
"""
inventory_file = (flake.path / "inventory.json").resolve()
inventory_file = (Path(flake_dir) / "inventory.json").resolve()
return inventory_file
@@ -62,7 +61,8 @@ def get_inventory_path(flake: Flake) -> Path:
default_inventory: Inventory = {"meta": {"name": "New Clan"}}
def load_inventory_eval(flake_dir: Flake) -> Inventory:
@API.register
def load_inventory_eval(flake_dir: str | Path) -> Inventory:
"""
Loads the evaluated inventory.
After all merge operations with eventual nix code in buildClan.
@@ -355,7 +355,7 @@ def determine_writeability(
return results
def get_inventory_current_priority(flake: Flake) -> dict:
def get_inventory_current_priority(flake_dir: str | Path) -> dict:
"""
Returns the current priority of the inventory values
@@ -375,7 +375,7 @@ def get_inventory_current_priority(flake: Flake) -> dict:
"""
cmd = nix_eval(
[
f"{flake}#clanInternals.inventoryClass.introspection",
f"{flake_dir}#clanInternals.inventoryValuesPrios",
"--json",
]
)
@@ -393,7 +393,7 @@ def get_inventory_current_priority(flake: Flake) -> dict:
@API.register
def load_inventory_json(flake: Flake) -> Inventory:
def load_inventory_json(flake_dir: str | Path) -> Inventory:
"""
Load the inventory FILE from the flake directory
If no file is found, returns an empty dictionary
@@ -403,7 +403,7 @@ def load_inventory_json(flake: Flake) -> Inventory:
Use load_inventory_eval instead
"""
inventory_file = get_inventory_path(flake)
inventory_file = get_inventory_path(flake_dir)
if not inventory_file.exists():
return {}
@@ -473,14 +473,14 @@ def patch(d: dict[str, Any], path: str, content: Any) -> None:
@API.register
def patch_inventory_with(flake: Flake, section: str, content: dict[str, Any]) -> None:
def patch_inventory_with(base_dir: Path, section: str, content: dict[str, Any]) -> None:
"""
Pass only the section to update and the content to update with.
Make sure you pass only attributes that you would like to persist.
ATTENTION: Don't pass nix eval values unintentionally.
"""
inventory_file = get_inventory_path(flake)
inventory_file = get_inventory_path(base_dir)
curr_inventory = {}
if inventory_file.exists():
@@ -492,9 +492,7 @@ def patch_inventory_with(flake: Flake, section: str, content: dict[str, Any]) ->
with inventory_file.open("w") as f:
json.dump(curr_inventory, f, indent=2)
commit_file(
inventory_file, flake.path, commit_message=f"inventory.{section}: Update"
)
commit_file(inventory_file, base_dir, commit_message=f"inventory.{section}: Update")
@dataclass
@@ -506,16 +504,16 @@ class WriteInfo:
@API.register
def get_inventory_with_writeable_keys(
flake: Flake,
flake_dir: str | Path,
) -> WriteInfo:
"""
Load the inventory and determine the writeable keys
Performs 2 nix evaluations to get the current priority and the inventory
"""
current_priority = get_inventory_current_priority(flake)
current_priority = get_inventory_current_priority(flake_dir)
data_eval: Inventory = load_inventory_eval(flake)
data_disk: Inventory = load_inventory_json(flake)
data_eval: Inventory = load_inventory_eval(flake_dir)
data_disk: Inventory = load_inventory_json(flake_dir)
writeables = determine_writeability(
current_priority, dict(data_eval), dict(data_disk)
@@ -524,17 +522,16 @@ def get_inventory_with_writeable_keys(
return WriteInfo(writeables, data_eval, data_disk)
# TODO: remove this function in favor of a proper read/write API
@API.register
def set_inventory(
inventory: Inventory, flake: Flake, message: str, commit: bool = True
inventory: Inventory, flake_dir: str | Path, message: str, commit: bool = True
) -> None:
"""
Write the inventory to the flake directory
and commit it to git with the given message
"""
write_info = get_inventory_with_writeable_keys(flake)
write_info = get_inventory_with_writeable_keys(flake_dir)
# Remove internals from the inventory
inventory.pop("tags", None) # type: ignore
@@ -555,43 +552,43 @@ def set_inventory(
for delete_path in delete_set:
delete_by_path(persisted, delete_path)
inventory_file = get_inventory_path(flake)
inventory_file = get_inventory_path(flake_dir)
with inventory_file.open("w") as f:
json.dump(persisted, f, indent=2)
if commit:
commit_file(inventory_file, flake.path, commit_message=message)
commit_file(inventory_file, Path(flake_dir), commit_message=message)
# TODO: wrap this in a proper persistence API
def delete(flake: Flake, delete_set: set[str]) -> None:
@API.register
def delete(directory: str | Path, delete_set: set[str]) -> None:
"""
Delete keys from the inventory
"""
write_info = get_inventory_with_writeable_keys(flake)
write_info = get_inventory_with_writeable_keys(directory)
data_disk = dict(write_info.data_disk)
for delete_path in delete_set:
delete_by_path(data_disk, delete_path)
inventory_file = get_inventory_path(flake)
inventory_file = get_inventory_path(directory)
with inventory_file.open("w") as f:
json.dump(data_disk, f, indent=2)
commit_file(
inventory_file,
flake.path,
Path(directory),
commit_message=f"Delete inventory keys {delete_set}",
)
def init_inventory(flake: Flake, init: Inventory | None = None) -> None:
def init_inventory(directory: str, init: Inventory | None = None) -> None:
inventory = None
# Try reading the current flake
if init is None:
with contextlib.suppress(ClanCmdError):
inventory = load_inventory_eval(flake)
inventory = load_inventory_eval(directory)
if init is not None:
inventory = init
@@ -599,9 +596,9 @@ def init_inventory(flake: Flake, init: Inventory | None = None) -> None:
# Write inventory.json file
if inventory is not None:
# Persist creates a commit message for each change
set_inventory(inventory, flake, "Init inventory")
set_inventory(inventory, directory, "Init inventory")
@API.register
def get_inventory(flake: Flake) -> Inventory:
return load_inventory_eval(flake)
def get_inventory(base_path: str | Path) -> Inventory:
return load_inventory_eval(base_path)

View File

@@ -110,7 +110,7 @@ def create_machine(opts: CreateOptions, commit: bool = True) -> None:
new_machine["deploy"] = {"targetHost": target_host}
patch_inventory_with(
Flake(str(clan_dir)), f"machines.{machine_name}", dataclass_to_dict(new_machine)
clan_dir, f"machines.{machine_name}", dataclass_to_dict(new_machine)
)
# Commit at the end in that order to avoid committing halve-baked machines

View File

@@ -23,7 +23,7 @@ log = logging.getLogger(__name__)
@API.register
def delete_machine(flake: Flake, name: str) -> None:
try:
inventory.delete(flake, {f"machines.{name}"})
inventory.delete(str(flake.path), {f"machines.{name}"})
except KeyError as exc:
# louis@(2025-03-09): test infrastructure does not seem to set the
# inventory properly, but more importantly only one machine in my
@@ -35,7 +35,7 @@ def delete_machine(flake: Flake, name: str) -> None:
changed_paths: list[Path] = []
folder = specific_machine_dir(flake, name)
folder = specific_machine_dir(flake.path, name)
if folder.exists():
changed_paths.append(folder)
shutil.rmtree(folder)

View File

@@ -7,14 +7,14 @@ from pathlib import Path
from clan_lib.api import API
from clan_cli.cmd import RunOpts, run_no_stdout
from clan_cli.cmd import RunOpts, run, run_no_stdout
from clan_cli.completions import add_dynamic_completer, complete_machines
from clan_cli.dirs import specific_machine_dir
from clan_cli.errors import ClanCmdError, ClanError
from clan_cli.flake import Flake
from clan_cli.git import commit_file
from clan_cli.machines.machines import Machine
from clan_cli.nix import nix_config, nix_eval
from clan_cli.nix import nix_config, nix_eval, nix_shell
from .types import machine_name_type
@@ -26,39 +26,61 @@ class HardwareConfig(Enum):
NIXOS_GENERATE_CONFIG = "nixos-generate-config"
NONE = "none"
def config_path(self, flake: Flake, machine_name: str) -> Path:
machine_dir = specific_machine_dir(flake, machine_name)
def config_path(self, clan_dir: Path, machine_name: str) -> Path:
machine_dir = specific_machine_dir(clan_dir, machine_name)
if self == HardwareConfig.NIXOS_FACTER:
return machine_dir / "facter.json"
return machine_dir / "hardware-configuration.nix"
@classmethod
def detect_type(
cls: type["HardwareConfig"], flake: Flake, machine_name: str
cls: type["HardwareConfig"], clan_dir: Path, machine_name: str
) -> "HardwareConfig":
hardware_config = HardwareConfig.NIXOS_GENERATE_CONFIG.config_path(
flake, machine_name
clan_dir, machine_name
)
if hardware_config.exists() and "throw" not in hardware_config.read_text():
return HardwareConfig.NIXOS_GENERATE_CONFIG
if HardwareConfig.NIXOS_FACTER.config_path(flake, machine_name).exists():
if HardwareConfig.NIXOS_FACTER.config_path(clan_dir, machine_name).exists():
return HardwareConfig.NIXOS_FACTER
return HardwareConfig.NONE
@API.register
def show_machine_hardware_config(flake: Flake, machine_name: str) -> HardwareConfig:
def show_machine_hardware_config(clan_dir: Path, machine_name: str) -> HardwareConfig:
"""
Show hardware information for a machine returns None if none exist.
"""
return HardwareConfig.detect_type(flake, machine_name)
return HardwareConfig.detect_type(clan_dir, machine_name)
@API.register
def show_machine_hardware_platform(flake: Flake, machine_name: str) -> str | None:
def show_machine_deployment_target(clan_dir: Path, machine_name: str) -> str | None:
"""
Show deployment target for a machine returns None if none exist.
"""
config = nix_config()
system = config["system"]
cmd = nix_eval(
[
f"{clan_dir}#clanInternals.machines.{system}.{machine_name}",
"--apply",
"machine: { inherit (machine.config.clan.core.networking) targetHost; }",
"--json",
]
)
proc = run_no_stdout(cmd, RunOpts(prefix=machine_name))
res = proc.stdout.strip()
target_host = json.loads(res)
return target_host.get("targetHost", None)
@API.register
def show_machine_hardware_platform(clan_dir: Path, machine_name: str) -> str | None:
"""
Show hardware information for a machine returns None if none exist.
"""
@@ -66,7 +88,7 @@ def show_machine_hardware_platform(flake: Flake, machine_name: str) -> str | Non
system = config["system"]
cmd = nix_eval(
[
f"{flake}#clanInternals.machines.{system}.{machine_name}",
f"{clan_dir}#clanInternals.machines.{system}.{machine_name}",
"--apply",
"machine: { inherit (machine.pkgs) system; }",
"--json",
@@ -96,14 +118,11 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
and place the resulting *.nix file in the machine's directory.
"""
machine = Machine(
opts.machine,
flake=opts.flake,
private_key=Path(opts.keyfile) if opts.keyfile else None,
override_target_host=opts.target_host,
)
machine = Machine(opts.machine, flake=opts.flake)
if opts.target_host is not None:
machine.override_target_host = opts.target_host
hw_file = opts.backend.config_path(opts.flake, opts.machine)
hw_file = opts.backend.config_path(opts.flake.path, opts.machine)
hw_file.parent.mkdir(parents=True, exist_ok=True)
if opts.backend == HardwareConfig.NIXOS_FACTER:
@@ -116,26 +135,48 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
"--show-hardware-config",
]
with machine.target_host() as host:
host.ssh_options["StrictHostKeyChecking"] = "accept-new"
host.ssh_options["UserKnownHostsFile"] = "/dev/null"
if opts.password:
host.password = opts.password
host = machine.target_host
out = host.run(config_command, become_root=True, opts=RunOpts(check=False))
if out.returncode != 0:
if "nixos-facter" in out.stderr and "not found" in out.stderr:
machine.error(str(out.stderr))
msg = (
"Please use our custom nixos install images from https://github.com/nix-community/nixos-images/releases/tag/nixos-unstable. "
"nixos-factor only works on nixos / clan systems currently."
)
raise ClanError(msg)
# HACK: to make non-root user work
if host.user != "root":
config_command.insert(0, "sudo")
machine.error(str(out))
msg = f"Failed to inspect {opts.machine}. Address: {host.target}"
deps = ["openssh"]
if opts.password:
deps += ["sshpass"]
cmd = nix_shell(
deps,
[
*(["sshpass", "-p", opts.password] if opts.password else []),
"ssh",
*(["-i", f"{opts.keyfile}"] if opts.keyfile else []),
# Disable known hosts file
"-o",
"UserKnownHostsFile=/dev/null",
# Disable strict host key checking. The GUI user cannot type "yes" into the ssh terminal.
"-o",
"StrictHostKeyChecking=accept-new",
*(
["-p", str(machine.target_host.port)]
if machine.target_host.port
else []
),
host.target,
*config_command,
],
)
out = run(cmd, RunOpts(needs_user_terminal=True, prefix=machine.name, check=False))
if out.returncode != 0:
if "nixos-facter" in out.stderr and "not found" in out.stderr:
machine.error(str(out.stderr))
msg = "Please use our custom nixos install images. nixos-factor only works on nixos / clan systems currently."
raise ClanError(msg)
machine.error(str(out))
msg = f"Failed to inspect {opts.machine}. Address: {host.target}"
raise ClanError(msg)
backup_file = None
if hw_file.exists():
backup_file = hw_file.with_suffix(".bak")
@@ -152,7 +193,7 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
f"machines/{opts.machine}/{hw_file.name}: update hardware configuration",
)
try:
show_machine_hardware_platform(opts.flake, opts.machine)
show_machine_hardware_platform(opts.flake.path, opts.machine)
if backup_file:
backup_file.unlink(missing_ok=True)
except ClanCmdError as e:

View File

@@ -36,6 +36,7 @@ class BuildOn(Enum):
@dataclass
class InstallOptions:
machine: Machine
target_host: str
kexec: str | None = None
debug: bool = False
no_reboot: bool = False
@@ -51,16 +52,17 @@ class InstallOptions:
@API.register
def install_machine(opts: InstallOptions) -> None:
machine = opts.machine
machine.override_target_host = opts.target_host
machine.debug(f"installing {machine.name}")
machine.info(f"installing {machine.name}")
h = machine.target_host
machine.info(f"target host: {h.target}")
generate_facts([machine])
generate_vars([machine])
with (
TemporaryDirectory(prefix="nixos-install-") as _base_directory,
machine.target_host() as host,
):
with TemporaryDirectory(prefix="nixos-install-") as _base_directory:
base_directory = Path(_base_directory).resolve()
activation_secrets = base_directory / "activation_secrets"
upload_dir = activation_secrets / machine.secrets_upload_directory.lstrip("/")
@@ -113,7 +115,7 @@ def install_machine(opts: InstallOptions) -> None:
str(opts.update_hardware_config.value),
str(
opts.update_hardware_config.config_path(
machine.flake, machine.name
machine.flake.path, machine.name
)
),
]
@@ -132,14 +134,14 @@ def install_machine(opts: InstallOptions) -> None:
if opts.build_on:
cmd += ["--build-on", opts.build_on.value]
if host.port:
cmd += ["--ssh-port", str(host.port)]
if h.port:
cmd += ["--ssh-port", str(h.port)]
if opts.kexec:
cmd += ["--kexec", opts.kexec]
if opts.debug:
cmd.append("--debug")
cmd.append(host.target)
cmd.append(h.target)
if opts.use_tor:
# nix copy does not support tor socks proxy
# cmd.append("--ssh-option")
@@ -162,32 +164,7 @@ def install_machine(opts: InstallOptions) -> None:
def install_command(args: argparse.Namespace) -> None:
host_key_check = HostKeyCheck.from_str(args.host_key_check)
try:
# Only if the caller did not specify a target_host via args.target_host
# Find a suitable target_host that is reachable
target_host = args.target_host
deploy_info: DeployInfo | None = ssh_command_parse(args)
if deploy_info and not args.target_host:
host = find_reachable_host(deploy_info, host_key_check)
if host is None:
use_tor = True
target_host = f"root@{deploy_info.tor}"
else:
target_host = host.target
if args.password:
password = args.password
elif deploy_info and deploy_info.pwd:
password = deploy_info.pwd
else:
password = None
machine = Machine(
name=args.machine,
flake=args.flake,
nix_options=args.option,
override_target_host=target_host,
)
machine = Machine(name=args.machine, flake=args.flake, nix_options=args.option)
use_tor = False
if machine._class_ == "darwin":
@@ -198,16 +175,41 @@ def install_command(args: argparse.Namespace) -> None:
msg = "Could not find clan flake toplevel directory"
raise ClanError(msg)
deploy_info: DeployInfo | None = ssh_command_parse(args)
if args.target_host:
target_host = args.target_host
elif deploy_info:
host = find_reachable_host(deploy_info, host_key_check)
if host is None:
use_tor = True
target_host = f"root@{deploy_info.tor}"
else:
target_host = host.target
password = deploy_info.pwd
else:
target_host = machine.target_host.target
if args.password:
password = args.password
elif deploy_info and deploy_info.pwd:
password = deploy_info.pwd
else:
password = None
if not target_host:
msg = "No target host provided, please provide a target host."
raise ClanError(msg)
if not args.yes:
ask = input(
f"Install {args.machine} to {machine.target_host_address}? [y/N] "
)
ask = input(f"Install {args.machine} to {target_host}? [y/N] ")
if ask != "y":
return None
return install_machine(
InstallOptions(
machine=machine,
target_host=target_host,
kexec=args.kexec,
phases=args.phases,
debug=args.debug,

View File

@@ -2,7 +2,6 @@ import argparse
import json
import logging
import re
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Literal
@@ -12,38 +11,38 @@ from clan_lib.api.disk import MachineDiskMatter
from clan_lib.api.modules import parse_frontmatter
from clan_lib.api.serde import dataclass_to_dict
from clan_cli.cmd import RunOpts, run
from clan_cli.cmd import RunOpts, run_no_stdout
from clan_cli.completions import add_dynamic_completer, complete_tags
from clan_cli.dirs import specific_machine_dir
from clan_cli.errors import ClanError
from clan_cli.flake import Flake
from clan_cli.errors import ClanCmdError, ClanError
from clan_cli.inventory import (
Machine,
load_inventory_eval,
patch_inventory_with,
)
from clan_cli.inventory.classes import Machine as InventoryMachine
from clan_cli.machines.hardware import HardwareConfig
from clan_cli.machines.machines import Machine
from clan_cli.nix import nix_eval
from clan_cli.nix import nix_eval, nix_shell
from clan_cli.tags import list_nixos_machines_by_tags
log = logging.getLogger(__name__)
@API.register
def set_machine(flake: Flake, machine_name: str, machine: InventoryMachine) -> None:
patch_inventory_with(flake, f"machines.{machine_name}", dataclass_to_dict(machine))
def set_machine(flake_url: Path, machine_name: str, machine: Machine) -> None:
patch_inventory_with(
flake_url, f"machines.{machine_name}", dataclass_to_dict(machine)
)
@API.register
def list_machines(flake: Flake) -> dict[str, InventoryMachine]:
inventory = load_inventory_eval(flake)
def list_inventory_machines(flake_url: str | Path) -> dict[str, Machine]:
inventory = load_inventory_eval(flake_url)
return inventory.get("machines", {})
@dataclass
class MachineDetails:
machine: InventoryMachine
machine: Machine
hw_config: HardwareConfig | None = None
disk_schema: MachineDiskMatter | None = None
@@ -60,16 +59,16 @@ def extract_header(c: str) -> str:
@API.register
def get_machine_details(machine: Machine) -> MachineDetails:
inventory = load_inventory_eval(machine.flake)
machine_inv = inventory.get("machines", {}).get(machine.name)
if machine_inv is None:
msg = f"Machine {machine.name} not found in inventory"
def get_inventory_machine_details(flake_url: Path, machine_name: str) -> MachineDetails:
inventory = load_inventory_eval(flake_url)
machine = inventory.get("machines", {}).get(machine_name)
if machine is None:
msg = f"Machine {machine_name} not found in inventory"
raise ClanError(msg)
hw_config = HardwareConfig.detect_type(machine.flake, machine.name)
hw_config = HardwareConfig.detect_type(flake_url, machine_name)
machine_dir = specific_machine_dir(machine.flake, machine.name)
machine_dir = specific_machine_dir(flake_url, machine_name)
disk_schema: MachineDiskMatter | None = None
disk_path = machine_dir / "disko.nix"
if disk_path.exists():
@@ -80,9 +79,7 @@ def get_machine_details(machine: Machine) -> MachineDetails:
if data:
disk_schema = data # type: ignore
return MachineDetails(
machine=machine_inv, hw_config=hw_config, disk_schema=disk_schema
)
return MachineDetails(machine=machine, hw_config=hw_config, disk_schema=disk_schema)
def list_nixos_machines(flake_url: str | Path) -> list[str]:
@@ -95,7 +92,7 @@ def list_nixos_machines(flake_url: str | Path) -> list[str]:
]
)
proc = run(cmd)
proc = run_no_stdout(cmd)
try:
res = proc.stdout.strip()
@@ -109,36 +106,53 @@ def list_nixos_machines(flake_url: str | Path) -> list[str]:
@dataclass
class ConnectionOptions:
keyfile: str | None = None
timeout: int = 2
retries: int = 10
from clan_cli.machines.machines import Machine
@API.register
def check_machine_online(
machine: Machine, opts: ConnectionOptions | None = None
flake_url: str | Path, machine_name: str, opts: ConnectionOptions | None
) -> Literal["Online", "Offline"]:
hostname = machine.target_host_address
if not hostname:
msg = f"Machine {machine.name} does not specify a targetHost"
machine = load_inventory_eval(flake_url).get("machines", {}).get(machine_name)
if not machine:
msg = f"Machine {machine_name} not found in inventory"
raise ClanError(msg)
timeout = opts.timeout if opts and opts.timeout else 2
hostname = machine.get("deploy", {}).get("targetHost")
for _ in range(opts.retries if opts and opts.retries else 10):
with machine.target_host() as target:
res = target.run(
["true"],
RunOpts(timeout=timeout, check=False, needs_user_terminal=True),
)
if not hostname:
msg = f"Machine {machine_name} does not specify a targetHost"
raise ClanError(msg)
if res.returncode == 0:
return "Online"
time.sleep(timeout)
timeout = opts.timeout if opts and opts.timeout else 20
return "Offline"
cmd = nix_shell(
["util-linux", *(["openssh"] if hostname else [])],
[
"ssh",
*(["-i", f"{opts.keyfile}"] if opts and opts.keyfile else []),
# Disable strict host key checking
"-o",
"StrictHostKeyChecking=accept-new",
# Disable known hosts file
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
f"ConnectTimeout={timeout}",
f"{hostname}",
"true",
"&> /dev/null",
],
)
try:
proc = run_no_stdout(cmd, RunOpts(needs_user_terminal=True))
if proc.returncode != 0:
return "Offline"
except ClanCmdError:
return "Offline"
else:
return "Online"
def list_command(args: argparse.Namespace) -> None:

View File

@@ -2,8 +2,6 @@ import importlib
import json
import logging
import re
from collections.abc import Iterator
from contextlib import contextmanager
from dataclasses import dataclass, field
from functools import cached_property
from pathlib import Path
@@ -26,7 +24,7 @@ if TYPE_CHECKING:
from clan_cli.vars.generate import Generator
@dataclass(frozen=True)
@dataclass
class Machine:
name: str
flake: Flake
@@ -147,37 +145,34 @@ class Machine:
def flake_dir(self) -> Path:
return self.flake.path
@contextmanager
def target_host(self) -> Iterator[Host]:
with parse_deployment_address(
@property
def target_host(self) -> Host:
return parse_deployment_address(
self.name,
self.target_host_address,
self.host_key_check,
private_key=self.private_key,
meta={"machine": self},
) as target_host:
yield target_host
)
@contextmanager
def build_host(self) -> Iterator[Host | None]:
@property
def build_host(self) -> Host:
"""
The host where the machine is built and deployed from.
Can be the same as the target host.
"""
build_host = self.override_build_host or self.deployment.get("buildHost")
if build_host is None:
yield None
return
return self.target_host
# enable ssh agent forwarding to allow the build host to access the target host
with parse_deployment_address(
return parse_deployment_address(
self.name,
build_host,
self.host_key_check,
forward_agent=True,
private_key=self.private_key,
meta={"machine": self},
) as build_host:
yield build_host
meta={"machine": self, "target_host": self.target_host},
)
@cached_property
def deploy_as_root(self) -> bool:

View File

@@ -5,7 +5,6 @@ import os
import re
import shlex
import sys
from contextlib import ExitStack
from clan_lib.api import API
@@ -21,13 +20,14 @@ from clan_cli.facts.generate import generate_facts
from clan_cli.facts.upload import upload_secrets
from clan_cli.flake import Flake
from clan_cli.inventory import Machine as InventoryMachine
from clan_cli.machines.list import list_machines
from clan_cli.machines.machines import Machine
from clan_cli.nix import nix_command, nix_config, nix_metadata
from clan_cli.ssh.host import Host, HostKeyCheck
from clan_cli.vars.generate import generate_vars
from clan_cli.vars.upload import upload_secret_vars
from .inventory import get_all_machines, get_selected_machines
log = logging.getLogger(__name__)
@@ -43,7 +43,8 @@ def is_local_input(node: dict[str, dict[str, str]]) -> bool:
)
def upload_sources(machine: Machine, host: Host) -> str:
def upload_sources(machine: Machine) -> str:
host = machine.build_host
env = host.nix_ssh_env(os.environ.copy())
flake_url = (
@@ -68,12 +69,7 @@ def upload_sources(machine: Machine, host: Host) -> str:
)
run(
cmd,
RunOpts(
env=env,
needs_user_terminal=True,
error_msg="failed to upload sources",
prefix=machine.name,
),
RunOpts(env=env, error_msg="failed to upload sources", prefix=machine.name),
)
return path
@@ -88,12 +84,7 @@ def upload_sources(machine: Machine, host: Host) -> str:
flake_url,
]
)
proc = run(
cmd,
RunOpts(
env=env, needs_user_terminal=True, error_msg="failed to upload sources"
),
)
proc = run(cmd, RunOpts(env=env, error_msg="failed to upload sources"))
try:
return json.loads(proc.stdout)["path"]
@@ -110,41 +101,41 @@ def update_machines(base_path: str, machines: list[InventoryMachine]) -> None:
flake = Flake(base_path)
for machine in machines:
name = machine.get("name")
# prefer target host set via inventory, but fallback to the one set in the machine
target_host = machine.get("deploy", {}).get("targetHost")
if not name:
msg = "Machine name is not set"
raise ClanError(msg)
m = Machine(
name,
flake=flake,
override_target_host=target_host,
)
if not machine.get("deploy", {}).get("targetHost"):
msg = f"'TargetHost' is not set for machine '{name}'"
raise ClanError(msg)
# Copy targetHost to machine
m.override_target_host = machine.get("deploy", {}).get("targetHost")
# Would be nice to have?
# m.override_build_host = machine.deploy.buildHost
group_machines.append(m)
deploy_machines(group_machines)
def deploy_machine(machine: Machine) -> None:
with ExitStack() as stack:
target_host = stack.enter_context(machine.target_host())
build_host = stack.enter_context(machine.build_host())
if machine._class_ == "darwin":
if not machine.deploy_as_root and target_host.user == "root":
msg = f"'targetHost' should be set to a non-root user for deploying to nix-darwin on machine '{machine.name}'"
raise ClanError(msg)
host = build_host or target_host
def deploy_machines(machines: list[Machine]) -> None:
"""
Deploy to all hosts in parallel
"""
def deploy(machine: Machine) -> None:
host = machine.build_host
generate_facts([machine], service=None, regenerate=False)
generate_vars([machine], generator_name=None, regenerate=False)
upload_secrets(machine, target_host)
upload_secret_vars(machine, target_host)
upload_secrets(machine)
upload_secret_vars(machine)
path = upload_sources(machine, host)
path = upload_sources(
machine=machine,
)
nix_options = [
"--show-trace",
@@ -169,7 +160,8 @@ def deploy_machine(machine: Machine) -> None:
"",
]
if build_host:
target_host: Host | None = host.meta.get("target_host")
if target_host:
become_root = False
nix_options += ["--target-host", target_host.target]
@@ -179,11 +171,11 @@ def deploy_machine(machine: Machine) -> None:
switch_cmd = [f"{machine._class_}-rebuild", "switch", *nix_options]
test_cmd = [f"{machine._class_}-rebuild", "test", *nix_options]
remote_env = host.nix_ssh_env(None, local_ssh=False)
env = host.nix_ssh_env(None)
ret = host.run(
switch_cmd,
RunOpts(check=False, msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
extra_env=remote_env,
extra_env=env,
become_root=become_root,
)
@@ -205,27 +197,24 @@ def deploy_machine(machine: Machine) -> None:
)
ret = host.run(
test_cmd if is_mobile else switch_cmd,
RunOpts(
msg_color=MsgColor(stderr=AnsiColor.DEFAULT),
needs_user_terminal=True,
),
extra_env=remote_env,
RunOpts(msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
extra_env=env,
become_root=become_root,
)
def deploy_machines(machines: list[Machine]) -> None:
"""
Deploy to all hosts in parallel
"""
with AsyncRuntime() as runtime:
for machine in machines:
if machine._class_ == "darwin":
if not machine.deploy_as_root and machine.target_host.user == "root":
msg = f"'TargetHost' should be set to a non-root user for deploying to nix-darwin on machine '{machine.name}'"
raise ClanError(msg)
machine.info(f"Updating {machine.name}")
runtime.async_run(
AsyncOpts(
tid=machine.name, async_ctx=AsyncContext(prefix=machine.name)
),
deploy_machine,
deploy,
machine,
)
runtime.join_all()
@@ -237,73 +226,61 @@ def update_command(args: argparse.Namespace) -> None:
if args.flake is None:
msg = "Could not find clan flake toplevel directory"
raise ClanError(msg)
machines: list[Machine] = []
# if no machines are passed, we will update all machines
selected_machines = (
args.machines if args.machines else list_machines(args.flake).keys()
)
if args.target_host is not None and len(args.machines) > 1:
msg = "Target Host can only be set for one machines"
raise ClanError(msg)
for machine_name in selected_machines:
machines = []
if len(args.machines) == 1 and args.target_host is not None:
machine = Machine(
name=machine_name,
flake=args.flake,
nix_options=args.option,
override_target_host=args.target_host,
override_build_host=args.build_host,
host_key_check=HostKeyCheck.from_str(args.host_key_check),
name=args.machines[0], flake=args.flake, nix_options=args.option
)
machine.override_target_host = args.target_host
machine.override_build_host = args.build_host
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
machines.append(machine)
def filter_machine(m: Machine) -> bool:
if m.deployment.get("requireExplicitUpdate", False):
return False
elif args.target_host is not None:
print("target host can only be specified for a single machine")
exit(1)
else:
if len(args.machines) == 0:
ignored_machines = []
for machine in get_all_machines(args.flake, args.option):
if machine.deployment.get("requireExplicitUpdate", False):
continue
try:
machine.build_host # noqa: B018
except ClanError: # check if we have a build host set
ignored_machines.append(machine)
continue
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
machine.override_build_host = args.build_host
machines.append(machine)
try:
# check if the machine has a target host set
m.target_host # noqa: B018
except ClanError:
return False
if not machines and ignored_machines != []:
print(
"WARNING: No machines to update."
"The following defined machines were ignored because they"
"do not have the `clan.core.networking.targetHost` nixos option set:",
file=sys.stderr,
)
for machine in ignored_machines:
print(machine, file=sys.stderr)
return True
else:
machines = get_selected_machines(args.flake, args.option, args.machines)
for machine in machines:
machine.override_build_host = args.build_host
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
machines_to_update = machines
implicit_all: bool = len(args.machines) == 0
if implicit_all:
machines_to_update = list(filter(filter_machine, machines))
# machines that are in the list but not included in the update list
ignored_machines = {m.name for m in machines if m not in machines_to_update}
if not machines_to_update and ignored_machines:
print(
"WARNING: No machines to update.\n"
"The following defined machines were ignored because they\n"
"- Require explicit update (see 'requireExplicitUpdate')\n",
"- Might not have the `clan.core.networking.targetHost` nixos option set:\n",
file=sys.stderr,
)
for m in ignored_machines:
print(m, file=sys.stderr)
if machines_to_update:
# Prepopulate the cache
config = nix_config()
system = config["system"]
machine_names = [machine.name for machine in machines_to_update]
args.flake.precache(
[
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.clan.core.vars.generators.*.validationHash",
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.system.clan.deployment.file",
]
)
# Run the deplyoyment
deploy_machines(machines_to_update)
config = nix_config()
system = config["system"]
machine_names = [machine.name for machine in machines]
args.flake.precache(
[
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.clan.core.vars.generators.*.validationHash",
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.system.clan.deployment.file",
]
)
deploy_machines(machines)
except KeyboardInterrupt:
log.warning("Interrupted by user")
sys.exit(1)

View File

@@ -18,8 +18,7 @@ from clan_lib.api import API
from clan_cli.cmd import Log, RunOpts, run
from clan_cli.dirs import user_config_dir
from clan_cli.errors import ClanError
from clan_cli.flake import Flake
from clan_cli.nix import nix_shell
from clan_cli.nix import nix_eval, nix_shell
from .folders import sops_users_folder
@@ -197,11 +196,26 @@ def load_age_plugins(flake_dir: str | Path) -> list[str]:
msg = "Missing flake directory"
raise ClanError(msg)
flake = Flake(str(flake_dir))
result = flake.select("clanInternals.?secrets.?age.?plugins")
plugins = result["secrets"]["age"]["plugins"]
if plugins == {}:
plugins = []
cmd = nix_eval(
[
f"{flake_dir}#clanInternals.secrets.age.plugins",
"--json",
]
)
try:
result = run(cmd)
except Exception as e:
msg = f"Failed to load age plugins {flake_dir}"
raise ClanError(msg) from e
json_str = result.stdout.strip()
try:
plugins = json.loads(json_str)
except json.JSONDecodeError as e:
msg = f"Failed to decode '{json_str}': {e}"
raise ClanError(msg) from e
if isinstance(plugins, list):
return plugins

View File

@@ -87,7 +87,7 @@ def ssh_shell_from_deploy(
deploy_info: DeployInfo, runtime: AsyncRuntime, host_key_check: HostKeyCheck
) -> None:
if host := find_reachable_host(deploy_info, host_key_check):
host.interactive_ssh()
host.connect_ssh_shell(password=deploy_info.pwd)
else:
log.info("Could not reach host via clearnet 'addrs'")
log.info(f"Trying to reach host via tor '{deploy_info.tor}'")
@@ -96,7 +96,8 @@ def ssh_shell_from_deploy(
msg = "No tor address provided, please provide a tor address."
raise ClanError(msg)
if ssh_tor_reachable(TorTarget(onion=deploy_info.tor, port=22)):
host = Host(host=deploy_info.tor, password=deploy_info.pwd, tor_socks=True)
host = Host(host=deploy_info.tor)
host.connect_ssh_shell(password=deploy_info.pwd, tor_socks=True)
else:
msg = "Could not reach host via tor either."
raise ClanError(msg)

View File

@@ -5,11 +5,9 @@ import os
import shlex
import socket
import subprocess
import types
from dataclasses import dataclass, field
from pathlib import Path
from shlex import quote
from tempfile import TemporaryDirectory
from typing import Any
from clan_cli.cmd import CmdOut, RunOpts, run
@@ -31,32 +29,12 @@ class Host:
user: str | None = None
port: int | None = None
private_key: Path | None = None
password: str | None = None
forward_agent: bool = False
command_prefix: str | None = None
host_key_check: HostKeyCheck = HostKeyCheck.ASK
meta: dict[str, Any] = field(default_factory=dict)
verbose_ssh: bool = False
ssh_options: dict[str, str] = field(default_factory=dict)
tor_socks: bool = False
_temp_dir: TemporaryDirectory | None = None
def __enter__(self) -> "Host":
self._temp_dir = TemporaryDirectory(prefix="clan-ssh-")
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: types.TracebackType | None,
) -> None:
try:
if self._temp_dir:
self._temp_dir.cleanup()
except OSError:
pass
def __post_init__(self) -> None:
if not self.command_prefix:
@@ -128,9 +106,6 @@ class Host:
if extra_env is None:
extra_env = {}
if opts is None:
opts = RunOpts()
# If we are not root and we need to become root, prepend sudo
sudo = ""
if become_root and self.user != "root":
@@ -141,10 +116,11 @@ class Host:
for k, v in extra_env.items():
env_vars.append(f"{shlex.quote(k)}={shlex.quote(v)}")
if opts.prefix is None:
if opts is None:
opts = RunOpts()
else:
opts.needs_user_terminal = True
opts.prefix = self.command_prefix
# always set needs_user_terminal to True because ssh asks for passwords
opts.needs_user_terminal = True
if opts.cwd is not None:
msg = "cwd is not supported for remote commands"
@@ -182,17 +158,15 @@ class Host:
# Run the ssh command
return run(ssh_cmd, opts)
def nix_ssh_env(
self, env: dict[str, str] | None, local_ssh: bool = True
) -> dict[str, str]:
def nix_ssh_env(self, env: dict[str, str] | None) -> dict[str, str]:
if env is None:
env = {}
env["NIX_SSHOPTS"] = " ".join(self.ssh_cmd_opts(local_ssh=local_ssh))
env["NIX_SSHOPTS"] = " ".join(self.ssh_cmd_opts)
return env
@property
def ssh_cmd_opts(
self,
local_ssh: bool = True,
) -> list[str]:
ssh_opts = ["-A"] if self.forward_agent else []
if self.port:
@@ -206,40 +180,32 @@ class Host:
if self.private_key:
ssh_opts.extend(["-i", str(self.private_key)])
if local_ssh and self._temp_dir:
ssh_opts.extend(["-o", "ControlPersist=30m"])
ssh_opts.extend(
[
"-o",
f"ControlPath={Path(self._temp_dir.name) / 'clan-%h-%p-%r'}",
]
)
ssh_opts.extend(["-o", "ControlMaster=auto"])
return ssh_opts
def ssh_cmd(
self,
verbose_ssh: bool = False,
tor_socks: bool = False,
tty: bool = False,
password: str | None = None,
) -> list[str]:
packages = []
password_args = []
if self.password:
if password:
packages.append("sshpass")
password_args = [
"sshpass",
"-p",
self.password,
password,
]
ssh_opts = self.ssh_cmd_opts()
ssh_opts = self.ssh_cmd_opts
if verbose_ssh or self.verbose_ssh:
ssh_opts.extend(["-v"])
if tty:
ssh_opts.extend(["-t"])
if self.tor_socks:
if tor_socks:
packages.append("netcat")
ssh_opts.append("-o")
ssh_opts.append("ProxyCommand=nc -x 127.0.0.1:9050 -X 5 %h %p")
@@ -253,8 +219,12 @@ class Host:
return nix_shell(packages, cmd)
def interactive_ssh(self) -> None:
subprocess.run(self.ssh_cmd())
def connect_ssh_shell(
self, *, password: str | None = None, tor_socks: bool = False
) -> None:
cmd = self.ssh_cmd(tor_socks=tor_socks, password=password)
subprocess.run(cmd)
def is_ssh_reachable(host: Host) -> bool:

View File

@@ -27,14 +27,6 @@ def test_root() -> Path:
return TEST_ROOT
@pytest.fixture(scope="session")
def test_lib_root() -> Path:
"""
Root directory of the clan-lib tests
"""
return PROJECT_ROOT.parent / "clan_lib" / "tests"
@pytest.fixture(scope="session")
def clan_core() -> Path:
"""

View File

@@ -1,5 +1,4 @@
import pytest
from clan_cli.flake import Flake
from clan_cli.inventory import load_inventory_json
from clan_cli.secrets.folders import sops_machines_folder
from clan_cli.tests import fixtures_flakes
@@ -25,7 +24,7 @@ def test_machine_subcommands(
]
)
inventory: dict = dict(load_inventory_json(Flake(str(test_flake_with_core.path))))
inventory: dict = dict(load_inventory_json(str(test_flake_with_core.path)))
assert "machine1" in inventory["machines"]
assert "service" not in inventory
@@ -41,7 +40,7 @@ def test_machine_subcommands(
["machines", "delete", "--flake", str(test_flake_with_core.path), "machine1"]
)
inventory_2: dict = dict(load_inventory_json(Flake(str(test_flake_with_core.path))))
inventory_2: dict = dict(load_inventory_json(str(test_flake_with_core.path)))
assert "machine1" not in inventory_2["machines"]
assert "service" not in inventory_2

View File

@@ -88,7 +88,7 @@ def test_add_module_to_inventory(
}
}
set_inventory(inventory, Flake(str(base_path)), "Add borgbackup service")
set_inventory(inventory, base_path, "Add borgbackup service")
# cmd = ["facts", "generate", "--flake", str(test_flake_with_core.path), "machine1"]
cmd = [

View File

@@ -0,0 +1,98 @@
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from clan_cli.ssh.host import Host, HostKeyCheck
from clan_cli.ssh.upload import upload
from clan_cli.tests.fixtures_flakes import ClanFlake
from clan_cli.tests.helpers import cli
if TYPE_CHECKING:
from .age_keys import KeyPair
@pytest.mark.with_core
def test_upload_single_file(
monkeypatch: pytest.MonkeyPatch,
temporary_home: Path,
hosts: list[Host],
) -> None:
host = hosts[0]
host.host_key_check = HostKeyCheck.NONE
src_file = temporary_home / "test.txt"
src_file.write_text("test")
dest_file = temporary_home / "test_dest.txt"
upload(host, src_file, dest_file)
assert dest_file.exists()
assert dest_file.read_text() == "test"
@pytest.mark.with_core
def test_secrets_upload(
monkeypatch: pytest.MonkeyPatch,
flake: ClanFlake,
hosts: list[Host],
age_keys: list["KeyPair"],
) -> None:
config = flake.machines["vm1"]
config["nixpkgs"]["hostPlatform"] = "x86_64-linux"
host = hosts[0]
addr = f"{host.user}@{host.host}:{host.port}?StrictHostKeyChecking=no&UserKnownHostsFile=/dev/null&IdentityFile={host.private_key}"
config["clan"]["networking"]["targetHost"] = addr
config["clan"]["core"]["facts"]["secretUploadDirectory"] = str(flake.path / "facts")
flake.refresh()
with monkeypatch.context():
monkeypatch.chdir(str(flake.path))
monkeypatch.setenv("SOPS_AGE_KEY", age_keys[0].privkey)
sops_dir = flake.path / "facts"
# the flake defines this path as the location where the sops key should be installed
sops_key = sops_dir / "key.txt"
sops_key2 = sops_dir / "key2.txt"
# Create old state, which should be cleaned up
sops_dir.mkdir()
sops_key.write_text("OLD STATE")
sops_key2.write_text("OLD STATE2")
cli.run(
[
"secrets",
"users",
"add",
"--flake",
str(flake.path),
"user1",
age_keys[0].pubkey,
]
)
cli.run(
[
"secrets",
"machines",
"add",
"--flake",
str(flake.path),
"vm1",
age_keys[1].pubkey,
]
)
with monkeypatch.context() as m:
m.setenv("SOPS_NIX_SECRET", age_keys[0].privkey)
cli.run(["secrets", "set", "--flake", str(flake.path), "vm1-age.key"])
flake_path = flake.path.joinpath("flake.nix")
cli.run(["facts", "upload", "--flake", str(flake_path), "vm1"])
assert sops_key.exists()
assert sops_key.read_text() == age_keys[0].privkey
assert not sops_key2.exists()

View File

@@ -117,9 +117,7 @@ def test_parse_deployment_address(
assert result.user == expected_user or (
expected_user == "" and result.user == "root"
)
for key, value in expected_options.items():
assert result.ssh_options[key] == value
assert result.ssh_options == expected_options
def test_parse_ssh_options() -> None:

View File

@@ -1,24 +0,0 @@
from pathlib import Path
import pytest
from clan_cli.ssh.host import Host, HostKeyCheck
from clan_cli.ssh.upload import upload
@pytest.mark.with_core
def test_upload_single_file(
monkeypatch: pytest.MonkeyPatch,
temporary_home: Path,
hosts: list[Host],
) -> None:
host = hosts[0]
host.host_key_check = HostKeyCheck.NONE
src_file = temporary_home / "test.txt"
src_file.write_text("test")
dest_file = temporary_home / "test_dest.txt"
upload(host, src_file, dest_file)
assert dest_file.exists()
assert dest_file.read_text() == "test"

View File

@@ -12,7 +12,7 @@ from clan_cli.tests.age_keys import SopsSetup
from clan_cli.tests.fixtures_flakes import ClanFlake
from clan_cli.tests.helpers import cli
from clan_cli.vars.check import check_vars
from clan_cli.vars.generate import Generator, generate_vars_for_machine_interactive
from clan_cli.vars.generate import Generator, generate_vars_for_machine
from clan_cli.vars.get import get_var
from clan_cli.vars.graph import all_missing_closure, requested_closure
from clan_cli.vars.list import stringify_all_vars
@@ -706,11 +706,11 @@ def test_stdout_of_generate(
flake_.refresh()
monkeypatch.chdir(flake_.path)
flake = Flake(str(flake_.path))
from clan_cli.vars.generate import generate_vars_for_machine_interactive
from clan_cli.vars.generate import generate_vars_for_machine
# with capture_output as output:
with caplog.at_level(logging.INFO):
generate_vars_for_machine_interactive(
generate_vars_for_machine(
Machine(name="my_machine", flake=flake),
"my_generator",
regenerate=False,
@@ -723,7 +723,7 @@ def test_stdout_of_generate(
set_var("my_machine", "my_generator/my_value", b"world", flake)
with caplog.at_level(logging.INFO):
generate_vars_for_machine_interactive(
generate_vars_for_machine(
Machine(name="my_machine", flake=flake),
"my_generator",
regenerate=True,
@@ -734,7 +734,7 @@ def test_stdout_of_generate(
caplog.clear()
# check the output when nothing gets regenerated
with caplog.at_level(logging.INFO):
generate_vars_for_machine_interactive(
generate_vars_for_machine(
Machine(name="my_machine", flake=flake),
"my_generator",
regenerate=True,
@@ -743,7 +743,7 @@ def test_stdout_of_generate(
assert "hello" in caplog.text
caplog.clear()
with caplog.at_level(logging.INFO):
generate_vars_for_machine_interactive(
generate_vars_for_machine(
Machine(name="my_machine", flake=flake),
"my_secret_generator",
regenerate=False,
@@ -758,7 +758,7 @@ def test_stdout_of_generate(
Flake(str(flake.path)),
)
with caplog.at_level(logging.INFO):
generate_vars_for_machine_interactive(
generate_vars_for_machine(
Machine(name="my_machine", flake=flake),
"my_secret_generator",
regenerate=True,
@@ -848,7 +848,7 @@ def test_fails_when_files_are_left_from_other_backend(
flake.refresh()
monkeypatch.chdir(flake.path)
for generator in ["my_secret_generator", "my_value_generator"]:
generate_vars_for_machine_interactive(
generate_vars_for_machine(
Machine(name="my_machine", flake=Flake(str(flake.path))),
generator,
regenerate=False,
@@ -865,13 +865,13 @@ def test_fails_when_files_are_left_from_other_backend(
# This should raise an error
if generator == "my_secret_generator":
with pytest.raises(ClanError):
generate_vars_for_machine_interactive(
generate_vars_for_machine(
Machine(name="my_machine", flake=Flake(str(flake.path))),
generator,
regenerate=False,
)
else:
generate_vars_for_machine_interactive(
generate_vars_for_machine(
Machine(name="my_machine", flake=Flake(str(flake.path))),
generator,
regenerate=False,
@@ -970,7 +970,7 @@ def test_dynamic_invalidation(
custom_nix.write_text(
"""
{ config, ... }: let
p = config.clan.core.vars.generators.my_generator.files.my_value.flakePath;
p = config.clan.core.vars.generators.my_generator.files.my_value.path;
in {
clan.core.vars.generators.dependent_generator.validation = if builtins.pathExists p then builtins.readFile p else null;
}

View File

@@ -7,7 +7,6 @@ from typing import TYPE_CHECKING
from clan_cli.errors import ClanError
from clan_cli.machines import machines
from clan_cli.ssh.host import Host
if TYPE_CHECKING:
from .generate import Generator, Var
@@ -184,5 +183,5 @@ class StoreBase(ABC):
pass
@abstractmethod
def upload(self, host: Host, phases: list[str]) -> None:
def upload(self, phases: list[str]) -> None:
pass

View File

@@ -1,7 +1,6 @@
import argparse
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field
from functools import cached_property
@@ -16,13 +15,10 @@ from clan_cli.completions import (
complete_services_for_machine,
)
from clan_cli.errors import ClanError
from clan_cli.flake import Flake
from clan_cli.git import commit_files
from clan_cli.machines.inventory import get_all_machines, get_selected_machines
from clan_cli.nix import nix_config, nix_shell, nix_test_store
from clan_cli.vars._types import StoreBase
from clan_cli.vars.migration import check_can_migrate, migrate_files
from clan_lib.api import API
from .check import check_vars
from .graph import (
@@ -87,11 +83,6 @@ class Generator:
def bubblewrap_cmd(generator: str, tmpdir: Path) -> list[str]:
test_store = nix_test_store()
real_bash_path = Path("bash")
if os.environ.get("IN_NIX_SANDBOX"):
bash_executable_path = Path(str(shutil.which("bash")))
real_bash_path = bash_executable_path.resolve()
# fmt: off
return nix_shell(
[
@@ -115,8 +106,8 @@ def bubblewrap_cmd(generator: str, tmpdir: Path) -> list[str]:
"--uid", "1000",
"--gid", "1000",
"--",
str(real_bash_path), "-c", generator
]
"bash", "-c", generator
],
)
# fmt: on
@@ -157,15 +148,12 @@ def dependencies_as_dir(
) -> None:
for dep_generator, files in decrypted_dependencies.items():
dep_generator_dir = tmpdir / dep_generator
# Explicitly specify parents and exist_ok default values for clarity
dep_generator_dir.mkdir(mode=0o700, parents=False, exist_ok=False)
dep_generator_dir.mkdir()
dep_generator_dir.chmod(0o700)
for file_name, file in files.items():
file_path = dep_generator_dir / file_name
# Avoid the file creation and chmod race
# If the file already existed,
# we'd have to create a temp one and rename instead;
# however, this is a clean dir so there shouldn't be any collisions
file_path.touch(mode=0o600, exist_ok=False)
file_path.touch()
file_path.chmod(0o600)
file_path.write_bytes(file)
@@ -320,65 +308,131 @@ def get_closure(
return minimal_closure([generator_name], generators)
@API.register
def get_generators_closure(
machine_name: str,
base_dir: Path,
regenerate: bool = False,
) -> list[Generator]:
from clan_cli.machines.machines import Machine
return get_closure(
machine=Machine(name=machine_name, flake=Flake(str(base_dir))),
generator_name=None,
regenerate=regenerate,
)
def _generate_vars_for_machine(
def _migration_file_exists(
machine: "Machine",
generators: list[Generator],
all_prompt_values: dict[str, dict],
no_sandbox: bool = False,
generator: Generator,
fact_name: str,
) -> bool:
for generator in generators:
if check_can_migrate(machine, generator):
migrate_files(machine, generator)
else:
execute_generator(
machine=machine,
generator=generator,
secret_vars_store=machine.secret_vars_store,
public_vars_store=machine.public_vars_store,
prompt_values=all_prompt_values[generator.name],
no_sandbox=no_sandbox,
for file in generator.files:
if file.name == fact_name:
break
else:
msg = f"Could not find file {fact_name} in generator {generator.name}"
raise ClanError(msg)
is_secret = file.secret
if is_secret:
if machine.secret_facts_store.exists(generator.name, fact_name):
return True
machine.debug(
f"Cannot migrate fact {fact_name} for service {generator.name}, as it does not exist in the secret fact store"
)
if not is_secret:
if machine.public_facts_store.exists(generator.name, fact_name):
return True
machine.debug(
f"Cannot migrate fact {fact_name} for service {generator.name}, as it does not exist in the public fact store"
)
return False
def _migrate_file(
machine: "Machine",
generator: Generator,
var_name: str,
service_name: str,
fact_name: str,
) -> list[Path]:
for file in generator.files:
if file.name == var_name:
break
else:
msg = f"Could not find file {fact_name} in generator {generator.name}"
raise ClanError(msg)
paths = []
if file.secret:
old_value = machine.secret_facts_store.get(service_name, fact_name)
maybe_path = machine.secret_vars_store.set(
generator, file, old_value, is_migration=True
)
if maybe_path:
paths.append(maybe_path)
else:
old_value = machine.public_facts_store.get(service_name, fact_name)
maybe_path = machine.public_vars_store.set(
generator, file, old_value, is_migration=True
)
if maybe_path:
paths.append(maybe_path)
return paths
def _migrate_files(
machine: "Machine",
generator: Generator,
) -> None:
not_found = []
files_to_commit = []
for file in generator.files:
if _migration_file_exists(machine, generator, file.name):
assert generator.migrate_fact is not None
files_to_commit += _migrate_file(
machine, generator, file.name, generator.migrate_fact, file.name
)
return True
@API.register
def generate_vars_for_machine(
machine_name: str,
generators: list[str],
all_prompt_values: dict[str, dict[str, str]],
base_dir: Path,
no_sandbox: bool = False,
) -> bool:
from clan_cli.machines.machines import Machine
machine = Machine(name=machine_name, flake=Flake(str(base_dir)))
generators_set = set(generators)
generators_ = [g for g in machine.vars_generators if g.name in generators_set]
return _generate_vars_for_machine(
machine=machine,
generators=generators_,
all_prompt_values=all_prompt_values,
no_sandbox=no_sandbox,
else:
not_found.append(file.name)
if len(not_found) > 0:
msg = f"Could not migrate the following files for generator {generator.name}, as no fact or secret exists with the same name: {not_found}"
raise ClanError(msg)
commit_files(
files_to_commit,
machine.flake_dir,
f"migrated facts to vars for generator {generator.name} for machine {machine.name}",
)
def generate_vars_for_machine_interactive(
def _check_can_migrate(
machine: "Machine",
generator: Generator,
) -> bool:
service_name = generator.migrate_fact
if not service_name:
return False
# ensure that none of the generated vars already exist in the store
all_files_missing = True
all_files_present = True
for file in generator.files:
if file.secret:
if machine.secret_vars_store.exists(generator, file.name):
all_files_missing = False
else:
all_files_present = False
else:
if machine.public_vars_store.exists(generator, file.name):
all_files_missing = False
else:
all_files_present = False
if not all_files_present and not all_files_missing:
msg = f"Cannot migrate facts for generator {generator.name} as some files already exist in the store"
raise ClanError(msg)
if all_files_present:
# all files already migrated, no need to run migration again
return False
# ensure that all files can be migrated (exists in the corresponding fact store)
return bool(
all(
_migration_file_exists(machine, generator, file.name)
for file in generator.files
)
)
def generate_vars_for_machine(
machine: "Machine",
generator_name: str | None,
regenerate: bool,
@@ -402,18 +456,22 @@ def generate_vars_for_machine_interactive(
msg += f"Secret vars store: {sec_healtcheck_msg}"
raise ClanError(msg)
generators = get_closure(machine, generator_name, regenerate)
if len(generators) == 0:
closure = get_closure(machine, generator_name, regenerate)
if len(closure) == 0:
return False
all_prompt_values = {}
for generator in generators:
all_prompt_values[generator.name] = _ask_prompts(generator)
return _generate_vars_for_machine(
machine,
generators,
all_prompt_values,
no_sandbox=no_sandbox,
)
for generator in closure:
if _check_can_migrate(machine, generator):
_migrate_files(machine, generator)
else:
execute_generator(
machine=machine,
generator=generator,
secret_vars_store=machine.secret_vars_store,
public_vars_store=machine.public_vars_store,
prompt_values=_ask_prompts(generator),
no_sandbox=no_sandbox,
)
return True
def generate_vars(
@@ -426,7 +484,7 @@ def generate_vars(
for machine in machines:
errors = []
try:
was_regenerated |= generate_vars_for_machine_interactive(
was_regenerated |= generate_vars_for_machine(
machine, generator_name, regenerate, no_sandbox=no_sandbox
)
except Exception as exc:

View File

@@ -1,19 +1,17 @@
import argparse
import logging
import os
from pathlib import Path
from clan_cli.errors import ClanError
from clan_cli.flake import Flake
from clan_cli.secrets.key import generate_key
from clan_cli.secrets.sops import maybe_get_admin_public_key
from clan_cli.secrets.users import add_user
from clan_lib.api import API
log = logging.getLogger(__name__)
@API.register
def keygen(flake_dir: Path, user: str | None = None, force: bool = False) -> None:
def keygen(user: str | None, flake: Flake, force: bool) -> None:
if user is None:
user = os.getenv("USER", None)
if not user:
@@ -24,7 +22,7 @@ def keygen(flake_dir: Path, user: str | None = None, force: bool = False) -> Non
pub_key = generate_key()
# TODO set flake_dir=flake.path / "vars"
add_user(
flake_dir=flake_dir,
flake_dir=flake.path,
name=user,
keys=[pub_key],
force=force,
@@ -35,8 +33,8 @@ def _command(
args: argparse.Namespace,
) -> None:
keygen(
flake_dir=args.flake.path,
user=args.user,
flake=args.flake,
force=args.force,
)

View File

@@ -1,136 +0,0 @@
import logging
from pathlib import Path
from typing import TYPE_CHECKING
from clan_cli.errors import ClanError
from clan_cli.git import commit_files
log = logging.getLogger(__name__)
if TYPE_CHECKING:
from clan_cli.machines.machines import Machine
from clan_cli.vars.generate import Generator
def _migration_file_exists(
machine: "Machine",
generator: "Generator",
fact_name: str,
) -> bool:
for file in generator.files:
if file.name == fact_name:
break
else:
msg = f"Could not find file {fact_name} in generator {generator.name}"
raise ClanError(msg)
is_secret = file.secret
if is_secret:
if machine.secret_facts_store.exists(generator.name, fact_name):
return True
machine.debug(
f"Cannot migrate fact {fact_name} for service {generator.name}, as it does not exist in the secret fact store"
)
if not is_secret:
if machine.public_facts_store.exists(generator.name, fact_name):
return True
machine.debug(
f"Cannot migrate fact {fact_name} for service {generator.name}, as it does not exist in the public fact store"
)
return False
def _migrate_file(
machine: "Machine",
generator: "Generator",
var_name: str,
service_name: str,
fact_name: str,
) -> list[Path]:
for file in generator.files:
if file.name == var_name:
break
else:
msg = f"Could not find file {fact_name} in generator {generator.name}"
raise ClanError(msg)
paths = []
if file.secret:
old_value = machine.secret_facts_store.get(service_name, fact_name)
maybe_path = machine.secret_vars_store.set(
generator, file, old_value, is_migration=True
)
if maybe_path:
paths.append(maybe_path)
else:
old_value = machine.public_facts_store.get(service_name, fact_name)
maybe_path = machine.public_vars_store.set(
generator, file, old_value, is_migration=True
)
if maybe_path:
paths.append(maybe_path)
return paths
def migrate_files(
machine: "Machine",
generator: "Generator",
) -> None:
not_found = []
files_to_commit = []
for file in generator.files:
if _migration_file_exists(machine, generator, file.name):
assert generator.migrate_fact is not None
files_to_commit += _migrate_file(
machine, generator, file.name, generator.migrate_fact, file.name
)
else:
not_found.append(file.name)
if len(not_found) > 0:
msg = f"Could not migrate the following files for generator {generator.name}, as no fact or secret exists with the same name: {not_found}"
raise ClanError(msg)
commit_files(
files_to_commit,
machine.flake_dir,
f"migrated facts to vars for generator {generator.name} for machine {machine.name}",
)
def check_can_migrate(
machine: "Machine",
generator: "Generator",
) -> bool:
service_name = generator.migrate_fact
if not service_name:
return False
# ensure that none of the generated vars already exist in the store
all_files_missing = True
all_files_present = True
for file in generator.files:
if file.secret:
if machine.secret_vars_store.exists(generator, file.name):
all_files_missing = False
else:
all_files_present = False
else:
if machine.public_vars_store.exists(generator, file.name):
all_files_missing = False
else:
all_files_present = False
if not all_files_present and not all_files_missing:
msg = f"Cannot migrate facts for generator {generator.name} as some files already exist in the store"
raise ClanError(msg)
if all_files_present:
# all files already migrated, no need to run migration again
return False
# ensure that all files can be migrated (exists in the corresponding fact store)
return bool(
all(
_migration_file_exists(machine, generator, file.name)
for file in generator.files
)
)

View File

@@ -33,6 +33,7 @@ class Prompt:
description=data["description"],
prompt_type=PromptType(data["type"]),
persist=data.get("persist", data["persist"]),
previous_value=data.get("previousValue"),
)

View File

@@ -4,7 +4,6 @@ from pathlib import Path
from clan_cli.errors import ClanError
from clan_cli.machines.machines import Machine
from clan_cli.ssh.host import Host
from clan_cli.vars._types import StoreBase
from clan_cli.vars.generate import Generator, Var
@@ -73,6 +72,6 @@ class FactStore(StoreBase):
msg = "populate_dir is not implemented for public vars stores"
raise NotImplementedError(msg)
def upload(self, host: Host, phases: list[str]) -> None:
def upload(self, phases: list[str]) -> None:
msg = "upload is not implemented for public vars stores"
raise NotImplementedError(msg)

View File

@@ -6,7 +6,6 @@ from pathlib import Path
from clan_cli.dirs import vm_state_dir
from clan_cli.errors import ClanError
from clan_cli.machines.machines import Machine
from clan_cli.ssh.host import Host
from clan_cli.vars._types import StoreBase
from clan_cli.vars.generate import Generator, Var
@@ -70,6 +69,6 @@ class FactStore(StoreBase):
msg = "populate_dir is not implemented for public vars stores"
raise NotImplementedError(msg)
def upload(self, host: Host, phases: list[str]) -> None:
def upload(self, phases: list[str]) -> None:
msg = "upload is not implemented for public vars stores"
raise NotImplementedError(msg)

View File

@@ -3,7 +3,6 @@ import tempfile
from pathlib import Path
from clan_cli.machines.machines import Machine
from clan_cli.ssh.host import Host
from clan_cli.vars._types import StoreBase
from clan_cli.vars.generate import Generator, Var
@@ -46,6 +45,6 @@ class SecretStore(StoreBase):
shutil.copytree(self.dir, output_dir)
shutil.rmtree(self.dir)
def upload(self, host: Host, phases: list[str]) -> None:
def upload(self, phases: list[str]) -> None:
msg = "Cannot upload secrets with FS backend"
raise NotImplementedError(msg)

View File

@@ -10,7 +10,6 @@ from tempfile import TemporaryDirectory
from clan_cli.cmd import CmdOut, Log, RunOpts, run
from clan_cli.machines.machines import Machine
from clan_cli.nix import nix_shell
from clan_cli.ssh.host import Host
from clan_cli.ssh.upload import upload
from clan_cli.vars._types import StoreBase
from clan_cli.vars.generate import Generator, Var
@@ -147,9 +146,9 @@ class SecretStore(StoreBase):
manifest += hashes
return b"\n".join(manifest)
def needs_upload(self, host: Host) -> bool:
def needs_upload(self) -> bool:
local_hash = self.generate_hash()
remote_hash = host.run(
remote_hash = self.machine.target_host.run(
# TODO get the path to the secrets from the machine
[
"cat",
@@ -225,11 +224,11 @@ class SecretStore(StoreBase):
(output_dir / f".{self._store_backend}_info").write_bytes(self.generate_hash())
def upload(self, host: Host, phases: list[str]) -> None:
def upload(self, phases: list[str]) -> None:
if "partitioning" in phases:
msg = "Cannot upload partitioning secrets"
raise NotImplementedError(msg)
if not self.needs_upload(host):
if not self.needs_upload():
log.info("Secrets already uploaded")
return
with TemporaryDirectory(prefix="vars-upload-") as _tempdir:
@@ -238,4 +237,4 @@ class SecretStore(StoreBase):
upload_dir = Path(
self.machine.deployment["password-store"]["secretLocation"]
)
upload(host, pass_dir, upload_dir)
upload(self.machine.target_host, pass_dir, upload_dir)

View File

@@ -23,7 +23,6 @@ from clan_cli.secrets.secrets import (
groups_folder,
has_secret,
)
from clan_cli.ssh.host import Host
from clan_cli.ssh.upload import upload
from clan_cli.vars._types import StoreBase
from clan_cli.vars.generate import Generator
@@ -221,15 +220,14 @@ class SecretStore(StoreBase):
target_path.write_bytes(self.get(generator, file.name))
target_path.chmod(file.mode)
@override
def upload(self, host: Host, phases: list[str]) -> None:
def upload(self, phases: list[str]) -> None:
if "partitioning" in phases:
msg = "Cannot upload partitioning secrets"
raise NotImplementedError(msg)
with TemporaryDirectory(prefix="sops-upload-") as _tempdir:
sops_upload_dir = Path(_tempdir).resolve()
self.populate_dir(sops_upload_dir, phases)
upload(host, sops_upload_dir, Path("/var/lib/sops-nix"))
upload(self.machine.target_host, sops_upload_dir, Path("/var/lib/sops-nix"))
def exists(self, generator: Generator, name: str) -> bool:
secret_folder = self.secret_path(generator, name)
@@ -262,6 +260,7 @@ class SecretStore(StoreBase):
return keys
# }
def needs_fix(self, generator: Generator, name: str) -> tuple[bool, str | None]:
secret_path = self.secret_path(generator, name)
current_recipients = sops.get_recipients(secret_path)

View File

@@ -4,7 +4,6 @@ from pathlib import Path
from clan_cli.dirs import vm_state_dir
from clan_cli.machines.machines import Machine
from clan_cli.ssh.host import Host
from clan_cli.vars._types import StoreBase
from clan_cli.vars.generate import Generator, Var
@@ -61,6 +60,6 @@ class SecretStore(StoreBase):
shutil.rmtree(output_dir)
shutil.copytree(self.dir, output_dir)
def upload(self, host: Host, phases: list[str]) -> None:
def upload(self, phases: list[str]) -> None:
msg = "Cannot upload secrets to VMs"
raise NotImplementedError(msg)

View File

@@ -4,19 +4,17 @@ from pathlib import Path
from clan_cli.completions import add_dynamic_completer, complete_machines
from clan_cli.machines.machines import Machine
from clan_cli.ssh.host import Host
log = logging.getLogger(__name__)
def upload_secret_vars(machine: Machine, host: Host) -> None:
machine.secret_vars_store.upload(host, phases=["activation", "users", "services"])
def populate_secret_vars(machine: Machine, directory: Path) -> None:
machine.secret_vars_store.populate_dir(
directory, phases=["activation", "users", "services"]
)
def upload_secret_vars(machine: Machine, directory: Path | None = None) -> None:
if directory:
machine.secret_vars_store.populate_dir(
directory, phases=["activation", "users", "services"]
)
else:
machine.secret_vars_store.upload(phases=["activation", "users", "services"])
def upload_command(args: argparse.Namespace) -> None:
@@ -24,11 +22,7 @@ def upload_command(args: argparse.Namespace) -> None:
directory = None
if args.directory:
directory = Path(args.directory)
populate_secret_vars(machine, directory)
return
with machine.target_host() as host:
upload_secret_vars(machine, host)
upload_secret_vars(machine, directory)
def register_upload_parser(parser: argparse.ArgumentParser) -> None:

View File

@@ -22,7 +22,7 @@ from clan_cli.nix import nix_shell
from clan_cli.qemu.qga import QgaSession
from clan_cli.qemu.qmp import QEMUMonitorProtocol
from clan_cli.vars.generate import generate_vars
from clan_cli.vars.upload import populate_secret_vars
from clan_cli.vars.upload import upload_secret_vars
from .inspect import VmConfig, inspect_vm
from .qemu import qemu_command
@@ -84,7 +84,7 @@ def get_secrets(
generate_vars([machine])
machine.secret_facts_store.upload(secrets_dir)
populate_secret_vars(machine, secrets_dir)
upload_secret_vars(machine, secrets_dir)
return secrets_dir

View File

@@ -0,0 +1,37 @@
# @API.register
# def set_admin_service(
# base_url: str,
# allowed_keys: dict[str, str],
# instance_name: str = "admin",
# extra_machines: list[str] | None = None,
# ) -> None:
# """
# Set the admin service of a clan
# Every machine is by default part of the admin service via the 'all' tag
# """
# if extra_machines is None:
# extra_machines = []
# inventory = load_inventory_eval(base_url)
# if not allowed_keys:
# msg = "At least one key must be provided to ensure access"
# raise ClanError(msg)
# instance = ServiceAdmin(
# meta=ServiceMeta(name=instance_name),
# roles=ServiceAdminRole(
# default=ServiceAdminRoleDefault(
# machines=extra_machines,
# tags=["all"],
# )
# ),
# config=AdminConfig(allowedKeys=allowed_keys),
# )
# inventory.services.admin[instance_name] = instance
# save_inventory(
# inventory,
# base_url,
# f"Set admin service: '{instance_name}'",
# )

Some files were not shown because too many files have changed in this diff Show More