Compare commits
217 Commits
check-the-
...
pinned-cla
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2bd9141d2d | ||
|
|
f788313e97 | ||
|
|
89b70ffa6f | ||
|
|
ed1692574f | ||
|
|
1106c50924 | ||
|
|
e99e47da10 | ||
|
|
67def050fd | ||
|
|
c0d2787dee | ||
|
|
ecc327277c | ||
|
|
0064a8bfbc | ||
|
|
1e8b9def2a | ||
|
|
f0983ede5e | ||
|
|
10bc9e3e44 | ||
|
|
556fd8845e | ||
|
|
fab079af71 | ||
|
|
0370c1cf02 | ||
|
|
aa557f3a96 | ||
|
|
e8699e68b5 | ||
|
|
f8f31d430d | ||
|
|
3d345e0bca | ||
|
|
80711fcf72 | ||
|
|
35684090e3 | ||
|
|
8069b137f3 | ||
|
|
2fba6b15e8 | ||
|
|
cddee0ca86 | ||
|
|
0f3ab641d9 | ||
|
|
d5f90b2730 | ||
|
|
54335221d8 | ||
|
|
76b13476a5 | ||
|
|
b933dcf2e2 | ||
|
|
8a755fff8c | ||
|
|
5726dd1010 | ||
|
|
b306c748b8 | ||
|
|
2682581c09 | ||
|
|
a0a5827157 | ||
|
|
8638861a87 | ||
|
|
c5a28e2655 | ||
|
|
0af36d0a4d | ||
|
|
34b63ca1d5 | ||
|
|
e24a6e23ad | ||
|
|
fd7ccaca1a | ||
|
|
4251d5ee0b | ||
|
|
0a8839bcc0 | ||
|
|
cb41aaafa1 | ||
|
|
9867b6a894 | ||
|
|
7459566c2b | ||
|
|
1c08d6dd25 | ||
|
|
14f4d65c47 | ||
|
|
43159cc2f0 | ||
|
|
9d8ebfd267 | ||
|
|
1e379f6fa7 | ||
|
|
b32a7749cf | ||
|
|
153da50d6f | ||
|
|
dd3bb314fd | ||
|
|
687f26eef1 | ||
|
|
afdb08643d | ||
|
|
0946d4316e | ||
|
|
462c0764b9 | ||
|
|
a748a27ddc | ||
|
|
baf686e83f | ||
|
|
03ddce83b7 | ||
|
|
45eb73680d | ||
|
|
7d39d49b30 | ||
|
|
698a39fafb | ||
|
|
b633db4f8e | ||
|
|
7b9d18f9eb | ||
|
|
51950329a3 | ||
|
|
16256440e6 | ||
|
|
dfbb860898 | ||
|
|
444fc3f820 | ||
|
|
572ce8885f | ||
|
|
0bee027251 | ||
|
|
334367c3f7 | ||
|
|
2371a5fa78 | ||
|
|
4792d8b1e3 | ||
|
|
ace0328a14 | ||
|
|
66c2d54961 | ||
|
|
e18efdd48f | ||
|
|
8b652866c7 | ||
|
|
7129c38675 | ||
|
|
caacf65dc0 | ||
|
|
f8723ab897 | ||
|
|
b877df4c6e | ||
|
|
bf04eabc21 | ||
|
|
5149ed9318 | ||
|
|
4d84180dd9 | ||
|
|
7571fdef74 | ||
|
|
7d55511d6f | ||
|
|
cbd7157cfc | ||
|
|
25faba4795 | ||
|
|
0f0bab7976 | ||
|
|
f0e18bbdfb | ||
|
|
6d4db71ea3 | ||
|
|
b21c98db7f | ||
|
|
df3fe00b8a | ||
|
|
7371085c05 | ||
|
|
6804327bca | ||
|
|
ec76d5f8e5 | ||
|
|
864cdf33a7 | ||
|
|
e6dbccd8e5 | ||
|
|
30ac51b313 | ||
|
|
00b12c2c51 | ||
|
|
085d726217 | ||
|
|
3e65a76dfe | ||
|
|
929632049e | ||
|
|
0d1e642dfd | ||
|
|
8af68cbd9d | ||
|
|
a44fee9eab | ||
|
|
b27f34aed3 | ||
|
|
cd23c9ff41 | ||
|
|
852a673839 | ||
|
|
763a403e9f | ||
|
|
78f8a080a8 | ||
|
|
af2a00bde3 | ||
|
|
47c44ec7ec | ||
|
|
1f66e90db1 | ||
|
|
89fbf723ca | ||
|
|
548fdfb877 | ||
|
|
e44d34ef99 | ||
|
|
acc6797c22 | ||
|
|
9fbd943f44 | ||
|
|
d42d9ad943 | ||
|
|
3c16ccdb38 | ||
|
|
7b9e431da7 | ||
|
|
05388c9c7e | ||
|
|
fe36aa4161 | ||
|
|
6829c7f2dd | ||
|
|
aa2e8eef88 | ||
|
|
9e07064ec5 | ||
|
|
305a361f56 | ||
|
|
db32e99082 | ||
|
|
50ac0266f5 | ||
|
|
b01691cb64 | ||
|
|
44b237d9be | ||
|
|
f8bbd91c4a | ||
|
|
401de330f8 | ||
|
|
51da7ed5e8 | ||
|
|
e170cc2641 | ||
|
|
8434f0fc35 | ||
|
|
d6bbb42dda | ||
|
|
6539a6a24f | ||
|
|
8a849eb90f | ||
|
|
15f691d5aa | ||
|
|
82949237b7 | ||
|
|
7abb8bb662 | ||
|
|
f4d34b1326 | ||
|
|
3b5c22ebcf | ||
|
|
a2ce48f8cc | ||
|
|
f6899166c7 | ||
|
|
f5277c989a | ||
|
|
03731a2a67 | ||
|
|
091a56f57d | ||
|
|
7351f7994c | ||
|
|
5770ea036c | ||
|
|
0d537a146e | ||
|
|
c430ff6253 | ||
|
|
f3f4ebfc71 | ||
|
|
b79446f97e | ||
|
|
6d75a5596e | ||
|
|
2d97119a3b | ||
|
|
d0ff114f6b | ||
|
|
20ab5a67c1 | ||
|
|
d445a353d5 | ||
|
|
b08a2bdb75 | ||
|
|
10fd3f6e43 | ||
|
|
e8c85e3237 | ||
|
|
6aa3ec66d8 | ||
|
|
b767a4a09c | ||
|
|
b0bd209638 | ||
|
|
b187d9b3d2 | ||
|
|
83d8c3d2f3 | ||
|
|
1ce482f8f7 | ||
|
|
8803b3e0b5 | ||
|
|
9b66af37eb | ||
|
|
9186961ccb | ||
|
|
ca594bbe95 | ||
|
|
5454076df7 | ||
|
|
f8e7292bc4 | ||
|
|
2ddb38a434 | ||
|
|
a99c832ed9 | ||
|
|
12882ed68d | ||
|
|
134c545782 | ||
|
|
7889192b7c | ||
|
|
05a18baecb | ||
|
|
e6ebca8588 | ||
|
|
fcf1c683c5 | ||
|
|
db215a48b5 | ||
|
|
1df62bd2f2 | ||
|
|
ea1c8b9503 | ||
|
|
511b107511 | ||
|
|
47bcec69ab | ||
|
|
47203d849e | ||
|
|
7b4b700c33 | ||
|
|
69d394088b | ||
|
|
4c1e346cf2 | ||
|
|
be9a43c50b | ||
|
|
049d41f35c | ||
|
|
055bd1edd5 | ||
|
|
9ae44db29c | ||
|
|
17a6eda4b1 | ||
|
|
6beba157fe | ||
|
|
a14dcf4adb | ||
|
|
9bc23690a3 | ||
|
|
5b0334adda | ||
|
|
45639c0d4f | ||
|
|
dfa861428f | ||
|
|
f15cd773c5 | ||
|
|
1a24a05034 | ||
|
|
e07551cecf | ||
|
|
1f4b526e42 | ||
|
|
8a4fe1405a | ||
|
|
f7e0345ab3 | ||
|
|
11afc1faef | ||
|
|
c0964e1b22 | ||
|
|
f8c5b178a4 | ||
|
|
93090b74e5 | ||
|
|
839f8fb347 |
29
.gitea/workflows/update-clan-core-for-checks.yml
Normal file
29
.gitea/workflows/update-clan-core-for-checks.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
name: "Update pinned clan-core for checks"
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "51 2 * * *"
|
||||
jobs:
|
||||
update-pinned-clan-core:
|
||||
runs-on: nix
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Update clan-core for checks
|
||||
run: nix run .#update-clan-core-for-checks
|
||||
- name: Create pull request
|
||||
run: |
|
||||
git commit -am ""
|
||||
git push origin HEAD:update-clan-core-for-checks
|
||||
curl -X POST \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"head": "update-clan-core-branch",
|
||||
"base": "main",
|
||||
"title": "Automated Update: Clan Core",
|
||||
"body": "This PR updates the pinned clan-core for checks."
|
||||
}' \
|
||||
"${GITEA_SERVER_URL}/api/v1/repos/${GITEA_OWNER}/${GITEA_REPO}/pulls"
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -14,8 +14,12 @@ example_clan
|
||||
nixos.qcow2
|
||||
**/*.glade~
|
||||
/docs/out
|
||||
/pkgs/clan-cli/clan_cli/select
|
||||
**/.local.env
|
||||
|
||||
# MacOS stuff
|
||||
**/.DS_store
|
||||
|
||||
# dream2nix
|
||||
.dream2nix
|
||||
|
||||
@@ -39,3 +43,6 @@ repo
|
||||
node_modules
|
||||
dist
|
||||
.webui
|
||||
|
||||
# TODO: remove after bug in select is fixed
|
||||
select
|
||||
|
||||
2
CODEOWNERS
Normal file
2
CODEOWNERS
Normal file
@@ -0,0 +1,2 @@
|
||||
nixosModules/clanCore/vars/.* @lopter
|
||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @lopter
|
||||
@@ -147,25 +147,7 @@
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
clanCore = self.filter {
|
||||
include = [
|
||||
"checks/backups"
|
||||
"checks/flake-module.nix"
|
||||
"clanModules/borgbackup"
|
||||
"clanModules/flake-module.nix"
|
||||
"clanModules/localbackup"
|
||||
"clanModules/packages"
|
||||
"clanModules/single-disk"
|
||||
"clanModules/zerotier"
|
||||
"flake.lock"
|
||||
"flakeModules"
|
||||
"inventory.json"
|
||||
"nixosModules"
|
||||
# Just include everything in 'lib'
|
||||
# If anything changes in /lib that may affect everything
|
||||
"lib"
|
||||
];
|
||||
};
|
||||
clanCore = self.checks.x86_64-linux.clan-core-for-checks;
|
||||
in
|
||||
{
|
||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
||||
@@ -182,11 +164,6 @@
|
||||
# import the inventory generated nixosModules
|
||||
self.clanInternals.inventoryClass.machines.test-backup.machineImports;
|
||||
clan.core.settings.directory = ./.;
|
||||
environment.systemPackages = [
|
||||
(pkgs.writeShellScriptBin "foo" ''
|
||||
echo ${clanCore}
|
||||
'')
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
|
||||
6
checks/clan-core-for-checks.nix
Normal file
6
checks/clan-core-for-checks.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
{ fetchgit }:
|
||||
fetchgit {
|
||||
url = "https://git.clan.lol/clan/clan-core.git";
|
||||
rev = "1e8b9def2a021877342491ca1f4c45533a580759";
|
||||
sha256 = "0f12vwr1abwa1iwjbb5z5xx8jlh80d9njwdm6iaw1z1h2m76xgzc";
|
||||
}
|
||||
122
checks/dont-depend-on-repo-root.nix
Normal file
122
checks/dont-depend-on-repo-root.nix
Normal file
@@ -0,0 +1,122 @@
|
||||
{
|
||||
...
|
||||
}:
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
system,
|
||||
pkgs,
|
||||
self',
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
clanCore = self'.packages.clan-core-flake;
|
||||
clanCoreHash = lib.substring 0 12 (builtins.hashString "sha256" "${clanCore}");
|
||||
/*
|
||||
construct a flake for the test which contains a single check which depends
|
||||
on all checks of clan-core.
|
||||
*/
|
||||
testFlakeFile = pkgs.writeText "flake.nix" ''
|
||||
{
|
||||
inputs.clan-core.url = path:///to/nowhere;
|
||||
outputs = {clan-core, ...}:
|
||||
let
|
||||
checks =
|
||||
builtins.removeAttrs
|
||||
clan-core.checks.${system}
|
||||
[
|
||||
"dont-depend-on-repo-root"
|
||||
"package-dont-depend-on-repo-root"
|
||||
"package-clan-core-flake"
|
||||
];
|
||||
checksOutPaths = map (x: "''${x}") (builtins.attrValues checks);
|
||||
in
|
||||
{
|
||||
checks.${system}.check = builtins.derivation {
|
||||
name = "all-clan-core-checks";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
args = ["-c" '''
|
||||
of outPath in ''${toString checksOutPaths}; do
|
||||
echo "$outPath" >> $out
|
||||
done
|
||||
'''];
|
||||
};
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
lib.optionalAttrs (system == "x86_64-linux") {
|
||||
packages.dont-depend-on-repo-root =
|
||||
pkgs.runCommand
|
||||
# append repo hash to this tests name to ensure it gets invalidated on each chain
|
||||
# This is needed because this test is an FOD (due to networking) and would get cached indefinitely.
|
||||
"check-dont-depend-on-repo-root-${clanCoreHash}"
|
||||
{
|
||||
buildInputs = [
|
||||
pkgs.nix
|
||||
pkgs.cacert
|
||||
pkgs.nix-diff
|
||||
];
|
||||
outputHashAlgo = "sha256";
|
||||
outputHash = "sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=";
|
||||
}
|
||||
''
|
||||
mkdir clanCore testFlake store
|
||||
clanCore=$(realpath clanCore)
|
||||
testFlake=$(realpath testFlake)
|
||||
|
||||
# copy clan core flake and make writable
|
||||
cp -r ${clanCore}/* clanCore/
|
||||
chmod +w -R clanCore\
|
||||
|
||||
# copy test flake and make writable
|
||||
cp ${testFlakeFile} testFlake/flake.nix
|
||||
chmod +w -R testFlake
|
||||
|
||||
# enable flakes
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
# give nix a $HOME
|
||||
export HOME=$(realpath ./store)
|
||||
|
||||
# override clan-core flake input to point to $clanCore\
|
||||
echo "locking clan-core to $clanCore"
|
||||
nix flake lock --override-input clan-core "path://$clanCore" "$testFlake" --store "$HOME"
|
||||
|
||||
# evaluate all tests
|
||||
echo "evaluating all tests for clan core"
|
||||
nix eval "$testFlake"#checks.${system}.check.drvPath --store "$HOME" --raw > drvPath1 &
|
||||
|
||||
# slightly modify clan core
|
||||
cp -r $clanCore clanCore2
|
||||
cp -r $testFlake testFlake2
|
||||
export clanCore2=$(realpath clanCore2)
|
||||
export testFlake2=$(realpath testFlake2)
|
||||
touch clanCore2/fly-fpv
|
||||
|
||||
# re-evaluate all tests
|
||||
echo "locking clan-core to $clanCore2"
|
||||
nix flake lock --override-input clan-core "path://$clanCore2" "$testFlake2" --store "$HOME"
|
||||
echo "evaluating all tests for clan core with added file"
|
||||
nix eval "$testFlake2"#checks.${system}.check.drvPath --store "$HOME" --raw > drvPath2
|
||||
|
||||
# wait for first nix eval to return as well
|
||||
while ! grep -q drv drvPath1; do sleep 1; done
|
||||
|
||||
# raise error if outputs are different
|
||||
if [ "$(cat drvPath1)" != "$(cat drvPath2)" ]; then
|
||||
echo -e "\n\nERROR: Something in clan-core depends on the whole repo" > /dev/stderr
|
||||
echo -e "See details in the nix-diff below which shows the difference between two evaluations:"
|
||||
echo -e " 1. Evaluation of clan-core checks without any changes"
|
||||
echo -e " 1. Evaluation of clan-core checks after adding a file to the top-level of the repo"
|
||||
echo "nix-diff:"
|
||||
export NIX_REMOTE="$HOME"
|
||||
nix-diff $(cat drvPath1) $(cat drvPath2)
|
||||
exit 1
|
||||
fi
|
||||
touch $out
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -26,6 +26,7 @@ clanLib.test.makeTestClan {
|
||||
roles.admin.machines = [ "admin1" ];
|
||||
};
|
||||
};
|
||||
|
||||
instances."test" = {
|
||||
module.name = "new-service";
|
||||
roles.peer.machines.peer1 = { };
|
||||
@@ -33,25 +34,33 @@ clanLib.test.makeTestClan {
|
||||
|
||||
modules = {
|
||||
legacy-module = ./legacy-module;
|
||||
new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
# ./pkgs/scripts/update-vars.py
|
||||
clan.core.vars.generators.new-service = {
|
||||
files.hello = {
|
||||
secret = false;
|
||||
deploy = true;
|
||||
};
|
||||
script = ''
|
||||
# This is a dummy script that does nothing
|
||||
echo "This is a dummy script" > $out/hello
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
roles.peer = { };
|
||||
perMachine = {
|
||||
nixosModule = {
|
||||
# This should be generated by:
|
||||
# nix run .#generate-test-vars -- checks/dummy-inventory-test dummy-inventory-test
|
||||
clan.core.vars.generators.new-service = {
|
||||
files.not-a-secret = {
|
||||
secret = false;
|
||||
deploy = true;
|
||||
};
|
||||
files.a-secret = {
|
||||
secret = true;
|
||||
deploy = true;
|
||||
owner = "nobody";
|
||||
group = "users";
|
||||
mode = "0644";
|
||||
};
|
||||
script = ''
|
||||
# This is a dummy script that does nothing
|
||||
echo -n "not-a-secret" > $out/not-a-secret
|
||||
echo -n "a-secret" > $out/a-secret
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
@@ -69,7 +78,15 @@ clanLib.test.makeTestClan {
|
||||
print(peer1.succeed("systemctl status dummy-service"))
|
||||
|
||||
# peer1 should have the 'hello' file
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.hello.path}")
|
||||
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")
|
||||
|
||||
ls_out = peer1.succeed("ls -la ${nodes.peer1.clan.core.vars.generators.new-service.files.a-secret.path}")
|
||||
# Check that the file is owned by 'nobody'
|
||||
assert "nobody" in ls_out, f"File is not owned by 'nobody': {ls_out}"
|
||||
# Check that the file is in the 'users' group
|
||||
assert "users" in ls_out, f"File is not in the 'users' group: {ls_out}"
|
||||
# Check that the file is in the '0644' mode
|
||||
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
|
||||
'';
|
||||
}
|
||||
);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1hd2exjq88h7538y6mvjvexx3u5gp6a03yfn5nj32h2667yyksyaqcuk5qs",
|
||||
"publickey": "age12yt078p9ewxy2sh0a36nxdpgglv8wqqftmj4dkj9rgy5fuyn4p0q5nje9m",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age19urkt89q45a2wk6a4yaramzufjtnw6nq2snls0v7hmf7tqf73axsfx50tk",
|
||||
"publickey": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:hhuFgZcPqht0h3tKxGtheS4GlrVDo4TxH0a9lxgPYj2i12QUmE04rB07A+hu4Z8WNWLYvdM5069mEOZYm3lSeTzBHQPxYZRuVj0=,iv:sA1srRFQqsMlJTAjFcb09tI/Jg2WjOVJL5NZkPwiLoU=,tag:6xXo9FZpmAJw6hCBsWzf8Q==,type:str]",
|
||||
"data": "ENC[AES256_GCM,data:GPpsUhSzWPtTP8EUNKsobFXjYqDldhkkIH6hBk11RsDLAGWdhVrwcISGbhsWpYhvAdPKA84DB6Zqyh9lL2bLM9//ybC1kzY20BQ=,iv:NrxMLdedT2FCkUAD00SwsAHchIsxWvqe7BQekWuJcxw=,tag:pMDXcMyHnLF2t3Qhb1KolA==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBGaGVHeTgrN3dJQ2VITFBM\neWVzbDhjb0pwNUhBUjdUc0p5OTVta1dvSno4ClJxeUc4Z0hiaFRkVlJ1YTA4Lyta\neWdwV005WGYvMUNRVG1qOVdicTk0NUkKLS0tIFQvaDNFS1JMSFlHRXlhc3lsZm03\nYVhDaHNsam5wN1VqdzA3WTZwM1JwV2sKZk/SiZJgjllADdfHLSWuQcU4+LttDpt/\nqqDUATEuqYaALljC/y3COT+grTM2bwGjj6fsfsfiO/EL9iwzD3+7oA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzb2tWb1ExKzdmUTRzaGVj\nK3cyYTBHZTJwVjM1SzUvbHFiMnVhY05iKzFZCnJTSE1VSVdpcUFLSEJuaE1CZzJD\nWjZxYzN2cUltdThNMVRKU3FIb20vUXMKLS0tIFlHQXRIdnMybDZFUVEzWlQrc1dw\nbUxhZURXblhHd0pka0JIK1FTZEVqdUEKI/rfxQRBc+xGRelhswkJQ9GcZs6lzfgy\nuCxS5JI9npdPLQ/131F3b21+sP5YWqks41uZG+vslM1zQ+BlENNhDw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-09T15:10:16Z",
|
||||
"mac": "ENC[AES256_GCM,data:xuXj4833G6nhvcRo2ekDxz8G5phltmU8h1GgGofH9WndzrqLKeRSqm/n03IHRW0f4F68XxnyAkfvokVh6vW3LRQAFkqIlXz5U4+zFNcaVaPobS5gHTgxsCoTUoalWPvHWtXd50hUVXeAt8rPfTfeveVGja8bOERk8mvwUPxb6h4=,iv:yP1usA9m8tKl6Z/UK9PaVMJlZlF5qpY4EiM4+ByVlik=,tag:8DgoIhLstp3MRki90VfEvw==,type:str]",
|
||||
"lastmodified": "2025-05-04T12:44:13Z",
|
||||
"mac": "ENC[AES256_GCM,data:fWxLHXBWolHVxv6Q7utcy6OVLV13ziswrIYyNKiwy1vsU8i7xvvuGO1HlnE+q43D2WuHR53liKq1UHuf1JMrWzTwZ0PYe+CVugtoEtbR2qu3rK/jAkOyMyhmmHzmf6Rp4ZMCzKgZeC/X2bDKY/z0firHAvjWydEyogutHpvtznM=,iv:OQI3FfkLneqbdztAXVQB3UkHwDPK+0hWu5hZ9m8Oczg=,tag:em6GfS2QHsXs391QKPxfmA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:rwPhbayGf6mE1E9NCN+LuL7VfWWOfhoJW6H2tNSoyebtyTpM3GO2jWca1+N7hI0juhNkUk+rIsYQYbCa/5DZQiV0/2Jgu4US1XY=,iv:B5mcaQsDjb6BacxGB4Kk88/qLCpVOjQNRvGN+fgUiEo=,tag:Uz0A8kAF5NzFetbv9yHIjQ==,type:str]",
|
||||
"data": "ENC[AES256_GCM,data:W3cOkUYL5/YulW2pEISyTlMaA/t7/WBE7BoCdFlqrqgaCL7tG4IV2HgjiPWzIVMs0zvDSaghdEvAIoB4wOf470d1nSWs0/E8SDk=,iv:wXXaZIw3sPY8L/wxsu7+C5v+d3RQRuwxZRP4YLkS8K4=,tag:HeK4okj7O7XDA9JDz2KULw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBWY0hKQ1dnV0tMYytDMCtj\nTDV4Zk5NeVN0bCtqaWRQV3d4M0VlcGVZMkhZCm02dHZyOGVlYzJ5Z3FlUWNXMVQ0\nb2ZrTXZQRzRNdzFDeWZCVGhlTS9rMm8KLS0tIEJkY1QwOENRYWw3cjIwd3I0bzdz\nOEtQNm1saE5wNWt2UUVnYlN4NWtGdFkKmWHU5ttZoQ3NZu/zkX5VxfC2sMpSOyod\neb7LRhFqPfo5N1XphJcCqr5QUoZOfnH0xFhZ2lxWUS3ItiRpU4VDwg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxRC83b3dtSVpXcGovNnVs\nTzFka2J2MEFhYkF1ajVrdjMrNUtPWGRObjM4Cm5zSUR5OGw0T0FaL3BaWmR6L29W\nU2syMFIyMUhFRUZpWFpCT28vWko2ZU0KLS0tIFpHK3BjU1V1L0FrMGtwTGFuU3Mz\nRkV5VjI2Vndod202bUR3RWQwNXpmVzQKNk8/y7M62wTIIKqY4r3ZRk5aUCRUfine\n1LUSHMKa2bRe+hR7nS7AF4BGXp03h2UPY0FP5+U5q8XuIj1jfMX8kg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-09T15:10:41Z",
|
||||
"mac": "ENC[AES256_GCM,data:pab0G2GPjgs59sbiZ8XIV5SdRtq5NPU0yq18FcqiMV8noAL94fyVAY7fb+9HILQWQsEjcykgk9mA2MQ0KpK/XG8+tDQKcBH+F+2aQnw5GJevXmfi7KLTU0P224SNo7EnKlfFruB/+NZ0WBtkbbg1OzekrbplchpSI6BxWz/jASE=,iv:TCj9FCxgfMF2+PJejr67zgGnF+CFS+YeJiejnHbf7j0=,tag:s7r9SqxeqpAkncohYvIQ2Q==,type:str]",
|
||||
"lastmodified": "2025-05-04T12:44:16Z",
|
||||
"mac": "ENC[AES256_GCM,data:yTkQeFvKrN1+5FP+yInsaRWSAG+ZGG0uWF3+gVRvzJTFxab8kT2XkAMc+4D7SKgcjsmwBBb77GNoAKaKByhZ92UaCfZ2X66i7ZmYUwLM1NVVmm+xiwwjsh7PJXlZO/70anTzd1evtlZse0jEmRnV5Y0F0M6YqXmuwU+qGUJU2F8=,iv:sy6ozhXonWVruaQfa7pdEoV5GkNZR/UbbINKAPbgWeg=,tag:VMruQ1KExmlMR7TsGNgMlg==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:bxM9aYMK,iv:SMNYtk9FSyZ1PIfEzayTKKdCnZWdhcyUEiTwFUNb988=,tag:qJYW4+VQyhF1tGPQPTKlOQ==,type:str]",
|
||||
"data": "ENC[AES256_GCM,data:T8edCvw=,iv:7/G5xt5fv38I9uFzk7WMIr9xQdz/6lFxqOC+18HBg8Q=,tag:F39Cxbgmzml+lZLsZ59Kmg==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1hd2exjq88h7538y6mvjvexx3u5gp6a03yfn5nj32h2667yyksyaqcuk5qs",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAvZDZYYXdpcXVqRFRnQ2Jx\nTFhFWEJTR290cHZhTXZadFFvcHM4MHVIN3lFCmJhOEZrL3g4TFBZVllxdDFZakJn\nR3NxdXo0eE8vTDh3QlhWOFpVZ0lNUHcKLS0tIEE4dkpCalNzaXJ0Qks3VHJSUzZF\nb2N3NGdjNHJnSUN6bW8welZ1VDdJakEKGKZ7nn1p11IyJB6DMxu2HJMvZ+0+5WpE\nPLWh2NlGJO3XrrL4Fw7xetwbqE+QUZPNl/JbEbu4KLIUGLjqk9JDhQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"recipient": "age12yt078p9ewxy2sh0a36nxdpgglv8wqqftmj4dkj9rgy5fuyn4p0q5nje9m",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPNUhiYkZWK3dPMHNiRTVM\nRHNvaHFsOFp1c0UxQitwVG0zY01MNDZRV1E4CjEybENoTVIzN29vQ3FtUTRSYmFU\nNXIzQllVSllXRGN2M1B6WXJLdHZSajgKLS0tIDllZ0ZmZUcxMHhDQUpUOEdWbmkv\neUQweHArYTdFSmNteVpuQ3BKdnh0Y0UKs8Hm3D+rXRRfpUVSZM3zYjs6b9z8g10D\nGTkvreUMim4CS22pjdQ3eNA9TGeDXfWXE7XzwXLCb+wVcf7KwbDmvg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHckJCQVFyb21aT1R0d2Rr\nMWxNMHVqcGxabHBmS0RibW9sN0gyZDI1b1dFCnRWUk5LSWdxV3c4RWVZdUtEN1Fv\nRk4xVmwwT2xrdWVERkJXUVVlVXJjTVUKLS0tIC9ERG9KMGxTNEsrbzFHUGRiVUlm\nRi9qakxoc1FOVVV1TkUrckwxRUVnajQKE8ms/np2NMswden3xkjdC8cXccASLOoN\nu+EaEk69UvBvnOg9VBjyPAraIKgNrTc4WWwz+DOBj1pCwVbu9XxUlA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKSDhpT3cvck9PenZYVEZH\ndFQreVRBdG93L1dBUGlvYjFWcDlHWUJsZUVBCm9DMTJ4UytiYzlEVHNWdUcwS1ds\nT0dhbzAzNDdmbDBCU0dvL2xNeHpXcGsKLS0tIFArbmpsbzU3WnpJdUt1MGN0L1d0\nV1JkTDJYWUxsbmhTQVNOeVRaSUhTODQKk9Vph2eldS5nwuvVX0SCsxEm4B+sO76Z\ndIjJ3OQxzoZmXMaOOuKHC5U0Y75Qn7eXC43w5KHsl2CMIUYsBGJOZw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-09T15:10:30Z",
|
||||
"mac": "ENC[AES256_GCM,data:cIwWctUbAFI8TRMxYWy5xqlKDVLMqBIxVv4LInnLqi3AauL0rJ3Z7AxK/wb2dCQM07E1N7YaORNqgUpFC1xo0hObAA8mrPaToPotKDkjua0zuyTUNS1COoraYjZpI/LKwmik/qtk399LMhiC7aHs+IliT9Dd41B8LSMBXwdMldY=,iv:sZ+//BrYH5Ay2JJAGs7K+WfO2ASK82syDlilQjGmgFs=,tag:nY+Af9eQRLwkiHZe85dQ9A==,type:str]",
|
||||
"lastmodified": "2025-05-04T12:44:14Z",
|
||||
"mac": "ENC[AES256_GCM,data:6fKrS1eLLUWlHkQpxLFXBRk6f2wa5ADLMViVvYXXGU24ayl9UuNSKrCRHp9cbzhqhti3HdwyNt6TM+2X6qhiiAQanKEB2PF7JRYX74NfNKil9BEDjt5AqqtpSgVv5l7Ku/uSHaPkd2sDmzHsy5Q4bSGxJQokStk1kidrwle+mbc=,iv:I/Aad82L/TCxStM8d8IZICUrwdjRbGx2fuGWqexr21o=,tag:BfgRbGUxhPZzK2fLik1kxA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
13898
|
||||
18650
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:ImlGIKxE,iv:UUWxjLNRKJCD2WHNpw8lfvCc8rnXPCqc2pni1ODckjE=,tag:HFCqiv31E9bShIIaAEjF0A==,type:str]",
|
||||
"data": "ENC[AES256_GCM,data:vp0yW0Gt,iv:FO2cy+UpEl5aRay/LUGu//c82QiVxuKuGSaVh0rGJvc=,tag:vf2RAOPpcRW0HwxHoGy17A==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age19urkt89q45a2wk6a4yaramzufjtnw6nq2snls0v7hmf7tqf73axsfx50tk",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBpTEROZjh6NjBhSlJSc1Av\nSHhjdkhwVUd3VzBZemhQb3dhMlJXalBmZlFjCkZPYkhZZGVOVTNjUWdFU0s4cWFn\nL2NXbkRCdUlMdElnK2lGbG5iV0w1cHMKLS0tIFREcmxDdHlUNVBFVGRVZSt0c0E5\nbnpHaW1Vb3R3ZFFnZVMxY3djSjJmOU0KIwqCSQf5S9oA59BXu7yC/V6yqvCh88pa\nYgmNyBjulytPh1aAfOuNWIGdIxBpcEf+gFjz3EiJY9Kft3fTmhp2bw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"recipient": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjaFVNMEd2YUxpSm5XVVRi\nY2ZUc3NTOStJUFNMWWVPQTgxZ2tCK1QrMW1ZCjYwMlA4dkIzSlc0TGtvZjcyK3Bi\nM3pob2JOOFUyeVJ6M2JpaTRCZlc1R0kKLS0tIDJMb1dFcVRWckhwYWNCQng0RlFO\nTkw3OGt4dkFIZVY5aVEzZE5mMzJSM0EKUv8bUqg48L2FfYVUVlpXvyZvPye699of\nG6PcjLh1ZMbNCfnsCzr+P8Vdk/F4J/ifxL66lRGfu2xOLxwciwQ+5Q==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArN3R4TThibjdYbE9TMDE1\naUhuNDlscExjaktIR2VmTk1OMWtVM0NpTUJZClJUNEcwVDlibExWQk84TTNEWFhp\nMjYyZStHc1N0ZTh1S3VTVk45WGxlWWMKLS0tIHFab25LY1R1d1l6NE5XbHJvQ3lj\nNGsxUldFVHQ5RVJERDlGbi9NY29hNWsKENBTcAS/R/dTGRYdaWv5Mc/YG4bkah5w\nb421ZMQF+r4CYnzUqnwivTG8TMRMqJLavfkutE6ZUfJbbLufrTk5Lw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnZ2dDbVhoQngxM3lTSmZF\nUTAwS1lCTGhEMU1GVXpFUzlIUFdqZy9LajF3Ck9mdVpBRjlyVUNhZXZIUFZjUzF1\nNlhFN28vNmwzcUVkNmlzUnpkWjJuZE0KLS0tIHpXVHVlNk9vU1ZPTGRrYStWbmRO\nbDM4U2o1SlEwYWtqOXBqd3BFUTAvMHcKkI8UVd0v+x+ELZ5CoGq9DzlA6DnVNU2r\nrV9wLfbFd7RHxS0/TYZh5tmU42nO3iMYA9FqERQXCtZgXS9KvfqHwQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-04-09T15:11:04Z",
|
||||
"mac": "ENC[AES256_GCM,data:JdJzocQZWVprOmZ4Ni04k1tpD1TpFcK5neKy3+0/c3+uPBwjwaMayISKRaa/ILUXlalg60oTqxB4fUFoYVm8KGQVhDwPhO/T1hyYVQqidonrcYfJfCYg00mVSREV/AWqXb7RTnaEBfrdnRJvaAQF9g2qDXGVgzp3eACdlItclv4=,iv:nOw1jQjIWHWwU3SiKpuQgMKXyu8MZYI+zI9UYYd9fCI=,tag:ewUkemIPm/5PkmuUD0EcAQ==,type:str]",
|
||||
"lastmodified": "2025-05-04T12:44:18Z",
|
||||
"mac": "ENC[AES256_GCM,data:1ZZ+ZI1JsHmxTov1bRijfol3kTkyheg2o3ivLsMHRhCmScsUry97hQJchF78+y2Izt7avaQEHYn6pVbYt/0rLrSYD7Ru7ITVxXoYHOiN5Qb98masUzpibZjrdyg5nO+LW5/Hmmwsc3yn/+o3IH1AUYpsxlJRdnHHCmoSOFaiFFM=,iv:OQlgmpOTw4ljujNzqwQ5/0Mz8pQpCSUtqRvj3FJAxDs=,tag:foZvdeW7gK9ZVKkWqnlxGA==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
30661
|
||||
6745
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../../sops/machines/peer1
|
||||
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:prFl0EJy8bM=,iv:zITWxf+6Ebk0iB5vhhd7SBQa1HFrIJXm8xpSM+D9I0M=,tag:NZCRMCs1SzNKLBu/KUDKMQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age12w2ld4vxfyf3hdq2d8la4cu0tye4pq97egvv3me4wary7xkdnq2snh0zx2",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0S0RZRWxaZVZvTUhjdWVL\naU9WZmtEcm1qa2JsRmdvdmZmNENMaWFEVUFRCmdoVnRXSGlpRlFjNmVVbDJ5VnFT\nMnVJUlVnM3lxNmZCRTdoRVJ4NW1oYWcKLS0tIFFNbXBFUk1RWnlUTW1SeG1vYzlM\nVVpEclFVOE9PWWQxVkZ0eEgwWndoRWcKDAOHe+FIxqGsc6LhxMy164qjwG6t2Ei2\nP0FSs+bcKMDpudxeuxCjnDm/VoLxOWeuqkB+9K2vSm2W/c/fHTSbrA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB2VU5jOEpwYUtDVEVFcVpU\nQkExTVZ3ejZHcGo5TG8zdUQwNktoV09WdUZvCmQ0dE1TOWRFbTlxdVd4WWRxd3VF\nQUNTTkNNT3NKYjQ5dEJDY0xVZ3pZVUUKLS0tIDFjajRZNFJZUTdNeS8yN05FMFZU\ncEtjRjhRbGE0MnRLdk10NkFLMkxqencKGzJ66dHluIghH04RV/FccfEQP07yqnfb\n25Hi0XIVJfXBwje4UEyszrWTxPPwVXdQDQmoNKf76Qy2jYqJ56uksw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-05-04T12:44:20Z",
|
||||
"mac": "ENC[AES256_GCM,data:FIkilsni5kOdNlVwDuLsQ/zExypHRWdqIBQDNWMLTwe8OrsNPkX+KYutUvt9GaSoGv4iDULaMRoizO/OZUNfc2d8XYSdj0cxOG1Joov4GPUcC/UGyNuQneAejZBKolvlnidKZArofnuK9g+lOTANEUtEXUTnx8L+VahqPZayQas=,iv:NAo6sT3L8OOB3wv1pjr3RY2FwXgVmZ4N0F4BEX4YPUY=,tag:zHwmXygyvkdpASZCodQT9Q==,type:str]",
|
||||
"unencrypted_suffix": "_unencrypted",
|
||||
"version": "3.10.2"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../../sops/users/admin
|
||||
@@ -1 +0,0 @@
|
||||
This is a dummy script
|
||||
@@ -0,0 +1 @@
|
||||
not-a-secret
|
||||
@@ -14,12 +14,14 @@ in
|
||||
./installation/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
];
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
self',
|
||||
system,
|
||||
...
|
||||
}:
|
||||
{
|
||||
@@ -54,11 +56,17 @@ in
|
||||
syncthing = import ./syncthing nixosTestArgs;
|
||||
};
|
||||
|
||||
packagesToBuild = lib.removeAttrs self'.packages [
|
||||
# exclude the check that checks that nothing depends on the repo root
|
||||
# We might want to include this later once everything is fixed
|
||||
"dont-depend-on-repo-root"
|
||||
];
|
||||
|
||||
flakeOutputs =
|
||||
lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
) (lib.filterAttrs (n: _: !lib.hasPrefix "test-" n) self.nixosConfigurations)
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") packagesToBuild
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
|
||||
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
|
||||
self'.legacyPackages.homeConfigurations or { }
|
||||
@@ -76,7 +84,10 @@ in
|
||||
schema =
|
||||
(self.clanLib.inventory.evalClanService {
|
||||
modules = [ m ];
|
||||
key = "checks";
|
||||
prefix = [
|
||||
"checks"
|
||||
system
|
||||
];
|
||||
}).config.result.api.schema;
|
||||
in
|
||||
schema
|
||||
@@ -90,6 +101,12 @@ in
|
||||
mkdir -p $out
|
||||
cat $schemaFile > $out/allSchemas.json
|
||||
'';
|
||||
|
||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||
cp -r ${pkgs.callPackage ./clan-core-for-checks.nix { }} $out
|
||||
chmod +w $out/flake.lock
|
||||
cp ${../flake.lock} $out/flake.lock
|
||||
'';
|
||||
};
|
||||
legacyPackages = {
|
||||
nixosTests =
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
let
|
||||
dependencies = [
|
||||
pkgs.disko
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.ConfigIniFiles
|
||||
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".pkgs.perlPackages.FileSlurp
|
||||
|
||||
@@ -80,7 +81,7 @@
|
||||
|
||||
# Some distros like to automount disks with spaces
|
||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdb && mount /dev/vdb "/mnt/with spaces"')
|
||||
machine.succeed("clan flash write --debug --flake ${../..} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
};
|
||||
|
||||
@@ -8,7 +8,6 @@ let
|
||||
{ modulesPath, pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.clan.deployment.file
|
||||
@@ -16,6 +15,7 @@ let
|
||||
pkgs.bash.drvPath
|
||||
pkgs.nixos-anywhere
|
||||
pkgs.bubblewrap
|
||||
pkgs.buildPackages.xorg.lndir
|
||||
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
|
||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
||||
in
|
||||
@@ -198,7 +198,7 @@ in
|
||||
installer.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
|
||||
installer.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@localhost hostname")
|
||||
installer.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||
installer.succeed("cp -r ${self.checks.x86_64-linux.clan-core-for-checks} test-flake && chmod -R +w test-flake")
|
||||
|
||||
installer.succeed("clan machines install --no-reboot --debug --flake test-flake --yes test-install-machine-without-system --target-host nonrootuser@localhost --update-hardware-config nixos-facter >&2")
|
||||
installer.shutdown()
|
||||
@@ -218,7 +218,7 @@ in
|
||||
installer.start()
|
||||
installer.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../assets/ssh/privkey} /root/.ssh/id_ed25519")
|
||||
installer.wait_until_succeeds("timeout 2 ssh -o StrictHostKeyChecking=accept-new -v nonrootuser@localhost hostname")
|
||||
installer.succeed("cp -r ${../..} test-flake && chmod -R +w test-flake")
|
||||
installer.succeed("cp -r ${self.checks.x86_64-linux.clan-core-for-checks} test-flake && chmod -R +w test-flake")
|
||||
installer.fail("test -f test-flake/machines/test-install-machine/hardware-configuration.nix")
|
||||
installer.fail("test -f test-flake/machines/test-install-machine/facter.json")
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
dependencies = [
|
||||
self
|
||||
pkgs.stdenv.drvPath
|
||||
pkgs.stdenvNoCC
|
||||
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
|
||||
@@ -44,14 +43,18 @@
|
||||
{
|
||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||
system.extraDependencies = dependencies;
|
||||
|
||||
virtualisation.memorySize = 2048;
|
||||
virtualisation.useNixStoreImage = true;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli-full ];
|
||||
};
|
||||
};
|
||||
testScript = ''
|
||||
start_all()
|
||||
actual.fail("cat /etc/testfile")
|
||||
actual.succeed("env CLAN_DIR=${self} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
|
||||
actual.succeed("env CLAN_DIR=${self.checks.x86_64-linux.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
|
||||
assert actual.succeed("cat /etc/testfile") == "morphed"
|
||||
'';
|
||||
} { inherit pkgs self; };
|
||||
|
||||
@@ -8,5 +8,8 @@
|
||||
(modulesPath + "/profiles/minimal.nix")
|
||||
];
|
||||
|
||||
virtualisation.useNixStoreImage = true;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
clan.core.enableRecommendedDefaults = false;
|
||||
}
|
||||
|
||||
@@ -105,10 +105,7 @@ in
|
||||
private_key = {
|
||||
inherit owner;
|
||||
};
|
||||
public_key = {
|
||||
inherit owner;
|
||||
secret = false;
|
||||
};
|
||||
public_key.secret = false;
|
||||
};
|
||||
|
||||
runtimeInputs = [
|
||||
@@ -134,10 +131,7 @@ in
|
||||
private_key = {
|
||||
inherit owner;
|
||||
};
|
||||
public_key = {
|
||||
inherit owner;
|
||||
secret = false;
|
||||
};
|
||||
public_key.secret = false;
|
||||
};
|
||||
|
||||
runtimeInputs = [
|
||||
|
||||
@@ -210,14 +210,18 @@ in
|
||||
data_dir = Path('data')
|
||||
data_dir.mkdir(mode=0o770, exist_ok=True)
|
||||
|
||||
# Create a temporary config file
|
||||
# with appropriate permissions
|
||||
tmp_config_path = data_dir / '.config.json'
|
||||
tmp_config_path.touch(mode=0o660, exist_ok=False)
|
||||
|
||||
# Write the config with secrets back
|
||||
config_path = data_dir / 'config.json'
|
||||
with open(config_path, 'w') as f:
|
||||
with open(tmp_config_path, 'w') as f:
|
||||
f.write(json.dumps(config, indent=4))
|
||||
|
||||
# Set file permissions to read and write
|
||||
# only by the user and group
|
||||
config_path.chmod(0o660)
|
||||
# Move config into place
|
||||
config_path = data_dir / 'config.json'
|
||||
tmp_config_path.rename(config_path)
|
||||
|
||||
# Set file permissions to read
|
||||
# and write only by the user and group
|
||||
|
||||
@@ -7,8 +7,12 @@ features = [ "inventory" ]
|
||||
After the system was installed/deployed the following command can be used to display the root-password:
|
||||
|
||||
```bash
|
||||
clan secrets get {machine_name}-password
|
||||
clan vars get [machine_name] root-password/root-password
|
||||
```
|
||||
|
||||
See also: [Vars](../../manual/vars-backend.md)
|
||||
|
||||
See also: [Facts / Secrets](../../getting-started/secrets.md)
|
||||
To regenerate the password run:
|
||||
```
|
||||
clan vars generate --regenerate [machine_name] --generator root-password
|
||||
```
|
||||
|
||||
@@ -13,9 +13,12 @@ If setting the option prompt to true, the user will be prompted to type in their
|
||||
After the system was installed/deployed the following command can be used to display the user-password:
|
||||
|
||||
```bash
|
||||
clan secrets get {machine_name}-user-password
|
||||
clan vars get [machine_name] root-password/root-password
|
||||
```
|
||||
|
||||
See also: [Facts / Secrets](../../getting-started/secrets.md)
|
||||
See also: [Vars](../../manual/vars-backend.md)
|
||||
|
||||
To regenerate the password, delete the password files in the clan directory and redeploy the machine.
|
||||
To regenerate the password run:
|
||||
```
|
||||
clan vars generate --regenerate [machine_name] --generator user-password
|
||||
```
|
||||
|
||||
@@ -4,7 +4,15 @@
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/hello-word";
|
||||
|
||||
roles.peer = { };
|
||||
roles.peer = {
|
||||
interface =
|
||||
{ lib, ... }:
|
||||
{
|
||||
options.foo = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
perMachine =
|
||||
{ machine, ... }:
|
||||
|
||||
@@ -10,9 +10,6 @@ let
|
||||
};
|
||||
in
|
||||
{
|
||||
clan.inventory.modules = {
|
||||
hello-world = module;
|
||||
};
|
||||
clan.modules = {
|
||||
hello-world = module;
|
||||
};
|
||||
@@ -50,6 +47,7 @@ in
|
||||
hello-service = import ./tests/vm/default.nix {
|
||||
inherit module;
|
||||
inherit self inputs pkgs;
|
||||
# clanLib is exposed from inputs.clan-core
|
||||
clanLib = self.clanLib;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -18,7 +18,7 @@ let
|
||||
};
|
||||
|
||||
# Register the module for the test
|
||||
inventory.modules.hello-world = module;
|
||||
modules.hello-world = module;
|
||||
|
||||
# Use the module in the test
|
||||
inventory.instances = {
|
||||
|
||||
@@ -14,6 +14,9 @@ clanLib.test.makeTestClan {
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
modules = {
|
||||
hello-service = module;
|
||||
};
|
||||
inventory = {
|
||||
machines.peer1 = { };
|
||||
|
||||
@@ -21,10 +24,6 @@ clanLib.test.makeTestClan {
|
||||
module.name = "hello-service";
|
||||
roles.peer.machines.peer1 = { };
|
||||
};
|
||||
|
||||
modules = {
|
||||
hello-service = module;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ We might not be sure whether all of those will exist but the architecture should
|
||||
## Decision
|
||||
|
||||
This leads to the conclusion that we should do `library` centric development.
|
||||
With the current `clan` python code beeing a library that can be imported to create various tools ontop of it.
|
||||
With the current `clan` python code being a library that can be imported to create various tools ontop of it.
|
||||
All **CLI** or **UI** related parts should be moved out of the main library.
|
||||
|
||||
*Note: The next person who wants implement any new frontend should do this first. Currently it looks like the TUI is the next one.*
|
||||
|
||||
47
decisions/03-adr-numbering-process.md
Normal file
47
decisions/03-adr-numbering-process.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# ADR Numbering process
|
||||
|
||||
## Status
|
||||
|
||||
Proposed after some conversation between @lassulus, @Mic92, & @lopter.
|
||||
|
||||
## Context
|
||||
|
||||
It can be useful to refer to ADRs by their numbers, rather than their full title. To that end, short and sequential numbers are useful.
|
||||
|
||||
The issue is that an ADR number is effectively assigned when the ADR is merged, before being merged its number is provisional. Because multiple ADRs can be written at the same time, you end-up with multiple provisional ADRs with the same number, for example this is the third ADR-3:
|
||||
|
||||
1. ADR-3-clan-compat: see [#3212];
|
||||
2. ADR-3-fetching-nix-from-python: see [#3452];
|
||||
3. ADR-3-numbering-process: this ADR.
|
||||
|
||||
This situation makes it impossible to refer to an ADR by its number, and why I (@lopter) went with the arbitrary number 7 in [#3196].
|
||||
|
||||
We could solve this problem by using the PR number as the ADR number (@lassulus). The issue is that PR numbers are getting big in clan-core which does not make them easy to remember, or use in conversation and code (@lopter).
|
||||
|
||||
Another approach would be to move the ADRs in a different repository, this would reset the counter back to 1, and make it straightforward to keep ADR and PR numbers in sync (@lopter). The issue then is that ADR are not in context with their changes which makes them more difficult to review (@Mic92).
|
||||
|
||||
## Decision
|
||||
|
||||
A third approach would be to:
|
||||
|
||||
1. Commit ADRs before they are approved, so that the next ADR number gets assigned;
|
||||
1. Open a PR for the proposed ADR;
|
||||
1. Update the ADR file committed in step 1, so that its markdown contents point to the PR that tracks it.
|
||||
|
||||
## Consequences
|
||||
|
||||
### ADR have unique and memorable numbers trough their entire life cycle
|
||||
|
||||
This makes it easier to refer to them in conversation or in code.
|
||||
|
||||
### You need to have commit access to get an ADR number assigned
|
||||
|
||||
This makes it more difficult for someone external to the project to contribute an ADR.
|
||||
|
||||
### Creating a new ADR requires multiple commits
|
||||
|
||||
Maybe a script or CI flow could help with that if it becomes painful.
|
||||
|
||||
[#3212]: https://git.clan.lol/clan/clan-core/pulls/3212/
|
||||
[#3452]: https://git.clan.lol/clan/clan-core/pulls/3452/
|
||||
[#3196]: https://git.clan.lol/clan/clan-core/pulls/3196/
|
||||
@@ -58,7 +58,7 @@ nav:
|
||||
- Autoincludes: manual/adding-machines.md
|
||||
- Inventory:
|
||||
- Inventory: manual/inventory.md
|
||||
- Instances: manual/distributed-services.md
|
||||
- Services: manual/distributed-services.md
|
||||
- Secure Boot: manual/secure-boot.md
|
||||
- Flake-parts: manual/flake-parts.md
|
||||
- Authoring:
|
||||
|
||||
@@ -26,8 +26,7 @@ writeShellScriptBin "deploy-docs" ''
|
||||
trap "rm -rf $tmpdir" EXIT
|
||||
|
||||
if [ -n "''${SSH_HOMEPAGE_KEY-}" ]; then
|
||||
echo "$SSH_HOMEPAGE_KEY" > "$tmpdir/ssh_key"
|
||||
chmod 600 "$tmpdir/ssh_key"
|
||||
( umask 0177 && echo "$SSH_HOMEPAGE_KEY" > "$tmpdir/ssh_key" )
|
||||
sshExtraArgs="-i $tmpdir/ssh_key"
|
||||
else
|
||||
sshExtraArgs=
|
||||
|
||||
@@ -12,7 +12,7 @@ We discussed the initial architecture in [01-clan-service-modules](https://git.c
|
||||
|
||||
### A Minimal module
|
||||
|
||||
First of all we need to register our module into the `inventory.modules` attribute. Make sure to choose a unique name so the module doesn't have a name collision with any of the core modules.
|
||||
First of all we need to register our module into the `clan.modules` attribute. Make sure to choose a unique name so the module doesn't have a name collision with any of the core modules.
|
||||
|
||||
While not required we recommend to prefix your module attribute name.
|
||||
|
||||
@@ -22,20 +22,15 @@ i.e. `@hsjobeki/customNetworking`
|
||||
|
||||
```nix title="flake.nix"
|
||||
# ...
|
||||
|
||||
outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({
|
||||
outputs = inputs: inputs.flake-parts.lib.mkFlake { inherit inputs; } ({
|
||||
imports = [ inputs.clan-core.flakeModules.default ];
|
||||
# ...
|
||||
clan = {
|
||||
inventory = {
|
||||
# We could also inline the complete module spec here
|
||||
# For example
|
||||
# {...}: { _class = "clan.service"; ... };
|
||||
modules."@hsjobeki/customNetworking" = import ./service-modules/networking.nix;
|
||||
};
|
||||
|
||||
# If needed: Exporting the module for other people
|
||||
modules."@hsjobeki/customNetworking" = import ./service-modules/networking.nix;
|
||||
# We could also inline the complete module spec here
|
||||
# For example
|
||||
# {...}: { _class = "clan.service"; ... };
|
||||
};
|
||||
})
|
||||
```
|
||||
@@ -221,9 +216,6 @@ outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({self, lib, ...}:
|
||||
# ...
|
||||
clan = {
|
||||
# Register the module
|
||||
inventory.modules."@hsjobeki/messaging" = lib.importApply ./service-modules/messaging.nix { inherit self; };
|
||||
|
||||
# Expose the module for downstream users, 'self' would always point to this flake.
|
||||
modules."@hsjobeki/messaging" = lib.importApply ./service-modules/messaging.nix { inherit self; };
|
||||
};
|
||||
})
|
||||
@@ -250,7 +242,7 @@ outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({self, lib, ...}:
|
||||
# ...
|
||||
clan = {
|
||||
# Register the module
|
||||
inventory.modules."@hsjobeki/messaging" = {
|
||||
modules."@hsjobeki/messaging" = {
|
||||
# Create an option 'myClan' and assign it to 'self'
|
||||
options.myClan = lib.mkOption {
|
||||
default = self;
|
||||
|
||||
@@ -32,7 +32,7 @@ VM tests should be avoided wherever it is possible to implement a cheaper unit t
|
||||
|
||||
Existing nixos vm tests in clan-core can be found by using ripgrep:
|
||||
```shellSession
|
||||
rg "import.*/lib/test-base.nix"
|
||||
rg self.clanLib.test.baseTest
|
||||
```
|
||||
|
||||
### Locating definitions of failing VM tests
|
||||
@@ -50,7 +50,7 @@ example: locating the vm test named `borgbackup`:
|
||||
```shellSession
|
||||
$ rg "borgbackup =" ./checks
|
||||
./checks/flake-module.nix
|
||||
41: borgbackup = import ./borgbackup nixosTestArgs;
|
||||
44- wayland-proxy-virtwl = self.clanLib.test.baseTest ./wayland-proxy-virtwl nixosTestArgs;
|
||||
```
|
||||
|
||||
-> the location of that test is `/checks/flake-module.nix` line `41`.
|
||||
@@ -99,15 +99,15 @@ Basically everything stated under the NixOS VM tests sections applies here, exce
|
||||
|
||||
Limitations:
|
||||
|
||||
- does not yet support networking
|
||||
- supports only one machine as of now
|
||||
|
||||
- Cannot run in interactive mode, however while the container test runs, it logs a nsenter command that can be used to log into each of the container.
|
||||
- setuid binaries don't work
|
||||
|
||||
### Where to find examples for NixOS container tests
|
||||
|
||||
Existing nixos container tests in clan-core can be found by using ripgrep:
|
||||
|
||||
```shellSession
|
||||
rg "import.*/lib/container-test.nix"
|
||||
rg self.clanLib.test.containerTest
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# :material-api: Overview
|
||||
|
||||
This section of the site provides an **automatically extracted** overview of the available options and commands within the Clan Framework.
|
||||
This section of the site provides an overview of available options and commands within the Clan Framework.
|
||||
|
||||
---
|
||||
|
||||
|
||||
56
flake.lock
generated
56
flake.lock
generated
@@ -16,17 +16,15 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745889637,
|
||||
"narHash": "sha256-+BW9rppchFYIiJldD+fZA3MS2OtPNrb8l27SC3GyoSk=",
|
||||
"ref": "refs/heads/main",
|
||||
"rev": "11b5673d9c7290a6b96c2b6c6c5be600304f310f",
|
||||
"revCount": 415,
|
||||
"type": "git",
|
||||
"url": "https://git.clan.lol/clan/data-mesher"
|
||||
"lastModified": 1747008053,
|
||||
"narHash": "sha256-rob/qftmEuk+/JVGCIrOpv+LWjdmayFtebEKqRZXVAI=",
|
||||
"rev": "2666bb11f4287cfbdf3b7c5f55231c6b5772a436",
|
||||
"type": "tarball",
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/2666bb11f4287cfbdf3b7c5f55231c6b5772a436.tar.gz"
|
||||
},
|
||||
"original": {
|
||||
"type": "git",
|
||||
"url": "https://git.clan.lol/clan/data-mesher"
|
||||
"type": "tarball",
|
||||
"url": "https://git.clan.lol/clan/data-mesher/archive/main.tar.gz"
|
||||
}
|
||||
},
|
||||
"disko": {
|
||||
@@ -36,11 +34,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745812220,
|
||||
"narHash": "sha256-hotBG0EJ9VmAHJYF0yhWuTVZpENHvwcJ2SxvIPrXm+g=",
|
||||
"lastModified": 1746729224,
|
||||
"narHash": "sha256-9R4sOLAK1w3Bq54H3XOJogdc7a6C2bLLmatOQ+5pf5w=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "d0c543d740fad42fe2c035b43c9d41127e073c78",
|
||||
"rev": "85555d27ded84604ad6657ecca255a03fd878607",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -76,11 +74,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745816321,
|
||||
"narHash": "sha256-Gyh/fkCDqVNGM0BWvk+4UAS17w2UI6iwnbQQCmc1TDI=",
|
||||
"lastModified": 1746708654,
|
||||
"narHash": "sha256-GeC99gu5H6+AjBXsn5dOhP4/ApuioGCBkufdmEIWPRs=",
|
||||
"owner": "nix-darwin",
|
||||
"repo": "nix-darwin",
|
||||
"rev": "4515dacafb0ccd42e5395aacc49fd58a43027e01",
|
||||
"rev": "6cb36e8327421c61e5a3bbd08ed63491b616364a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -93,15 +91,13 @@
|
||||
"locked": {
|
||||
"lastModified": 1745005516,
|
||||
"narHash": "sha256-IVaoOGDIvAa/8I0sdiiZuKptDldrkDWUNf/+ezIRhyc=",
|
||||
"ref": "refs/heads/main",
|
||||
"rev": "69d8bf596194c5c35a4e90dd02c52aa530caddf8",
|
||||
"revCount": 40,
|
||||
"type": "git",
|
||||
"url": "https://git.clan.lol/clan/nix-select"
|
||||
"type": "tarball",
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/69d8bf596194c5c35a4e90dd02c52aa530caddf8.tar.gz"
|
||||
},
|
||||
"original": {
|
||||
"type": "git",
|
||||
"url": "https://git.clan.lol/clan/nix-select"
|
||||
"type": "tarball",
|
||||
"url": "https://git.clan.lol/clan/nix-select/archive/main.tar.gz"
|
||||
}
|
||||
},
|
||||
"nixos-facter-modules": {
|
||||
@@ -122,10 +118,10 @@
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 315532800,
|
||||
"narHash": "sha256-+Elxpf3FLkgKfh81xrEjVolpJEn8+fKWqEJ3ZXbAbS4=",
|
||||
"rev": "29335f23bea5e34228349ea739f31ee79e267b88",
|
||||
"narHash": "sha256-EbVl0wIdDYZWrxpQoxPlXfliaR4KHA9xP5dVjG1CZxI=",
|
||||
"rev": "ed30f8aba41605e3ab46421e3dcb4510ec560ff8",
|
||||
"type": "tarball",
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre791229.29335f23bea5/nixexprs.tar.xz"
|
||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre794180.ed30f8aba416/nixexprs.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
@@ -153,11 +149,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745310711,
|
||||
"narHash": "sha256-ePyTpKEJTgX0gvgNQWd7tQYQ3glIkbqcW778RpHlqgA=",
|
||||
"lastModified": 1746485181,
|
||||
"narHash": "sha256-PxrrSFLaC7YuItShxmYbMgSuFFuwxBB+qsl9BZUnRvg=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "5e3e92b16d6fdf9923425a8d4df7496b2434f39c",
|
||||
"rev": "e93ee1d900ad264d65e9701a5c6f895683433386",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -188,11 +184,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745929750,
|
||||
"narHash": "sha256-k5ELLpTwRP/OElcLpNaFWLNf8GRDq4/eHBmFy06gGko=",
|
||||
"lastModified": 1746989248,
|
||||
"narHash": "sha256-uoQ21EWsAhyskNo8QxrTVZGjG/dV4x5NM1oSgrmNDJY=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "82bf32e541b30080d94e46af13d46da0708609ea",
|
||||
"rev": "708ec80ca82e2bbafa93402ccb66a35ff87900c5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -23,10 +23,10 @@
|
||||
treefmt-nix.url = "github:numtide/treefmt-nix";
|
||||
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
nix-select.url = "git+https://git.clan.lol/clan/nix-select";
|
||||
nix-select.url = "https://git.clan.lol/clan/nix-select/archive/main.tar.gz";
|
||||
|
||||
data-mesher = {
|
||||
url = "git+https://git.clan.lol/clan/data-mesher";
|
||||
url = "https://git.clan.lol/clan/data-mesher/archive/main.tar.gz";
|
||||
inputs = {
|
||||
flake-parts.follows = "flake-parts";
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
@@ -40,7 +40,6 @@
|
||||
inputs@{
|
||||
flake-parts,
|
||||
nixpkgs,
|
||||
self,
|
||||
systems,
|
||||
...
|
||||
}:
|
||||
|
||||
@@ -37,7 +37,7 @@ let
|
||||
done
|
||||
if ! test -e ~/clan-core; then
|
||||
# git clone https://git.clan.lol/clan/clan-core.git ~/clan-core
|
||||
cp -rv ${self} clan-core
|
||||
cp -rv ${self.checks.x86_64-linux.clan-core-for-checks} clan-core
|
||||
fi
|
||||
cd clan-core
|
||||
clan machines morph demo-template --i-will-be-fired-for-using-this
|
||||
|
||||
@@ -10,6 +10,11 @@ let
|
||||
in
|
||||
{
|
||||
options = {
|
||||
_prefix = lib.mkOption {
|
||||
type = types.listOf types.str;
|
||||
internal = true;
|
||||
default = [ ];
|
||||
};
|
||||
self = lib.mkOption {
|
||||
type = types.raw;
|
||||
default = self;
|
||||
@@ -160,7 +165,6 @@ in
|
||||
# Those options are interfaced by the CLI
|
||||
# We don't specify the type here, for better performance.
|
||||
inventory = lib.mkOption { type = lib.types.raw; };
|
||||
inventoryValuesPrios = lib.mkOption { type = lib.types.raw; };
|
||||
# all exported clan templates from this clan
|
||||
templates = lib.mkOption { type = lib.types.raw; };
|
||||
# all exported clan modules from this clan
|
||||
@@ -169,6 +173,7 @@ in
|
||||
inventoryFile = lib.mkOption { type = lib.types.raw; };
|
||||
# The machine 'imports' generated by the inventory per machine
|
||||
inventoryClass = lib.mkOption { type = lib.types.raw; };
|
||||
evalServiceSchema = lib.mkOption { };
|
||||
# clan-core's modules
|
||||
clanModules = lib.mkOption { type = lib.types.raw; };
|
||||
source = lib.mkOption { type = lib.types.raw; };
|
||||
|
||||
@@ -44,6 +44,10 @@ let
|
||||
buildInventory {
|
||||
inherit inventory directory;
|
||||
flakeInputs = config.self.inputs;
|
||||
prefix = config._prefix ++ [ "inventoryClass" ];
|
||||
# TODO: remove inventory.modules, this is here for backwards compatibility
|
||||
localModuleSet =
|
||||
lib.filterAttrs (n: _: !inventory._legacyModules ? ${n}) inventory.modules // config.modules;
|
||||
}
|
||||
);
|
||||
|
||||
@@ -177,6 +181,7 @@ in
|
||||
# Merge the meta attributes from the buildClan function
|
||||
{
|
||||
inventory.modules = clan-core.clanModules;
|
||||
inventory._legacyModules = clan-core.clanModules;
|
||||
}
|
||||
# config.inventory.meta <- config.meta
|
||||
{ inventory.meta = config.meta; }
|
||||
@@ -204,20 +209,21 @@ in
|
||||
|
||||
inherit inventoryClass;
|
||||
|
||||
# Endpoint that can be called to get a service schema
|
||||
evalServiceSchema = clan-core.clanLib.evalServiceSchema config.self;
|
||||
|
||||
# TODO: unify this interface
|
||||
# We should have only clan.modules. (consistent with clan.templates)
|
||||
inherit (clan-core) clanModules clanLib;
|
||||
modules = config.modules;
|
||||
|
||||
inherit inventoryFile;
|
||||
inventoryValuesPrios =
|
||||
# Temporary workaround
|
||||
builtins.removeAttrs (clan-core.clanLib.introspection.getPrios { options = inventory.options; })
|
||||
# tags are freeformType which is not supported yet.
|
||||
[ "tags" ];
|
||||
|
||||
templates = config.templates;
|
||||
inventory = config.inventory;
|
||||
# TODO: Remove this in about a month
|
||||
# It is only here for backwards compatibility for people with older CLI versions
|
||||
inventoryValuesPrios = inventoryClass.introspection;
|
||||
meta = config.inventory.meta;
|
||||
secrets = config.secrets;
|
||||
|
||||
|
||||
@@ -15,10 +15,27 @@ lib.fix (clanLib: {
|
||||
*/
|
||||
callLib = file: args: import file ({ inherit lib clanLib; } // args);
|
||||
|
||||
# ------------------------------------
|
||||
buildClan = clanLib.buildClanModule.buildClanWith {
|
||||
clan-core = self;
|
||||
inherit nixpkgs nix-darwin;
|
||||
};
|
||||
evalServiceSchema =
|
||||
self:
|
||||
{
|
||||
moduleSpec,
|
||||
flakeInputs ? self.inputs,
|
||||
localModuleSet ? self.clan.modules,
|
||||
}:
|
||||
let
|
||||
resolvedModule = clanLib.inventory.resolveModule {
|
||||
inherit moduleSpec flakeInputs localModuleSet;
|
||||
};
|
||||
in
|
||||
(clanLib.inventory.evalClanService {
|
||||
modules = [ resolvedModule ];
|
||||
prefix = [ ];
|
||||
}).config.result.api.schema;
|
||||
# ------------------------------------
|
||||
# ClanLib functions
|
||||
evalClan = clanLib.callLib ./inventory/eval-clan-modules { };
|
||||
|
||||
@@ -12,27 +12,38 @@ let
|
||||
inventory,
|
||||
directory,
|
||||
flakeInputs,
|
||||
prefix ? [ ],
|
||||
localModuleSet ? { },
|
||||
}:
|
||||
(lib.evalModules {
|
||||
# TODO: remove clanLib from specialArgs
|
||||
specialArgs = {
|
||||
inherit clanLib;
|
||||
};
|
||||
modules = [
|
||||
./builder
|
||||
(lib.modules.importApply ./service-list-from-inputs.nix {
|
||||
inherit flakeInputs clanLib localModuleSet;
|
||||
})
|
||||
{ inherit directory inventory; }
|
||||
(
|
||||
# config.distributedServices.allMachines.${name} or [ ];
|
||||
{ config, ... }:
|
||||
{
|
||||
|
||||
distributedServices = clanLib.inventory.mapInstances {
|
||||
inherit (config) inventory;
|
||||
inherit localModuleSet;
|
||||
inherit flakeInputs;
|
||||
prefix = prefix ++ [ "distributedServices" ];
|
||||
};
|
||||
machines = lib.mapAttrs (_machineName: v: {
|
||||
machineImports = v;
|
||||
}) config.distributedServices.allMachines;
|
||||
|
||||
}
|
||||
)
|
||||
(lib.modules.importApply ./inventory-introspection.nix { inherit clanLib; })
|
||||
];
|
||||
}).config;
|
||||
in
|
||||
|
||||
@@ -96,6 +96,12 @@ in
|
||||
./assertions.nix
|
||||
];
|
||||
options = {
|
||||
_legacyModules = lib.mkOption {
|
||||
internal = true;
|
||||
visible = false;
|
||||
default = { };
|
||||
};
|
||||
|
||||
options = lib.mkOption {
|
||||
internal = true;
|
||||
visible = false;
|
||||
@@ -138,6 +144,28 @@ in
|
||||
};
|
||||
```
|
||||
'';
|
||||
|
||||
apply =
|
||||
moduleSet:
|
||||
let
|
||||
allowedNames = lib.attrNames config._legacyModules;
|
||||
in
|
||||
if builtins.all (moduleName: builtins.elem moduleName allowedNames) (lib.attrNames moduleSet) then
|
||||
moduleSet
|
||||
else
|
||||
lib.warn ''
|
||||
`inventory.modules` will be deprecated soon.
|
||||
|
||||
Please migrate the following modules into `clan.service` modules
|
||||
and register them in `clan.modules`
|
||||
|
||||
${lib.concatStringsSep "\n" (
|
||||
map (m: "'${m}'") (lib.attrNames (lib.filterAttrs (n: _v: !builtins.elem n allowedNames) moduleSet))
|
||||
)}
|
||||
|
||||
See: https://docs.clan.lol/manual/distributed-services/
|
||||
And: https://docs.clan.lol/authoring/clanServices/
|
||||
'' moduleSet;
|
||||
};
|
||||
|
||||
assertions = lib.mkOption {
|
||||
|
||||
17
lib/inventory/build-inventory/inventory-introspection.nix
Normal file
17
lib/inventory/build-inventory/inventory-introspection.nix
Normal file
@@ -0,0 +1,17 @@
|
||||
{ clanLib }:
|
||||
{
|
||||
config,
|
||||
options,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options.introspection = lib.mkOption {
|
||||
readOnly = true;
|
||||
# TODO: use options.inventory instead of the evaluate config attribute
|
||||
default =
|
||||
builtins.removeAttrs (clanLib.introspection.getPrios { options = config.inventory.options; })
|
||||
# tags are freeformType which is not supported yet.
|
||||
[ "tags" ];
|
||||
};
|
||||
}
|
||||
43
lib/inventory/build-inventory/service-list-from-inputs.nix
Normal file
43
lib/inventory/build-inventory/service-list-from-inputs.nix
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
flakeInputs,
|
||||
clanLib,
|
||||
localModuleSet,
|
||||
}:
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
|
||||
inspectModule =
|
||||
inputName: moduleName: module:
|
||||
let
|
||||
eval = clanLib.inventory.evalClanService {
|
||||
modules = [ module ];
|
||||
prefix = [
|
||||
inputName
|
||||
"clan"
|
||||
"modules"
|
||||
moduleName
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
manifest = eval.config.manifest;
|
||||
roles = lib.mapAttrs (_n: _v: { }) eval.config.roles;
|
||||
};
|
||||
in
|
||||
{
|
||||
options.modulesPerSource = lib.mkOption {
|
||||
# { sourceName :: { moduleName :: {} }}
|
||||
default =
|
||||
let
|
||||
inputsWithModules = lib.filterAttrs (_inputName: v: v ? clan.modules) flakeInputs;
|
||||
|
||||
in
|
||||
lib.mapAttrs (
|
||||
inputName: v: lib.mapAttrs (inspectModule inputName) v.clan.modules
|
||||
) inputsWithModules;
|
||||
};
|
||||
options.localModules = lib.mkOption {
|
||||
default = lib.mapAttrs (inspectModule "self") localModuleSet;
|
||||
};
|
||||
}
|
||||
@@ -3,7 +3,7 @@ let
|
||||
services = clanLib.callLib ./distributed-service/inventory-adapter.nix { };
|
||||
in
|
||||
{
|
||||
inherit (services) evalClanService mapInstances;
|
||||
inherit (services) evalClanService mapInstances resolveModule;
|
||||
inherit (import ./build-inventory { inherit lib clanLib; }) buildInventory;
|
||||
interface = ./build-inventory/interface.nix;
|
||||
# Returns the list of machine names
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# This module enables itself if
|
||||
# manifest.features.API = true
|
||||
# It converts the roles.interface to a json-schema
|
||||
{ clanLib, attrName }:
|
||||
{ clanLib, prefix }:
|
||||
let
|
||||
converter = clanLib.jsonschema {
|
||||
includeDefaults = true;
|
||||
@@ -45,7 +45,7 @@ in
|
||||
|
||||
To see the evaluation problem run
|
||||
|
||||
nix eval .#clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.${attrName}.config.result.api.schema.${roleName}
|
||||
nix eval .#${lib.concatStringsSep "." prefix}.config.result.api.schema.${roleName}
|
||||
'';
|
||||
assertion = (builtins.tryEval (lib.deepSeq config.result.api.schema.${roleName} true)).success;
|
||||
};
|
||||
|
||||
@@ -16,27 +16,73 @@
|
||||
}:
|
||||
let
|
||||
evalClanService =
|
||||
{ modules, key }:
|
||||
{ modules, prefix }:
|
||||
(lib.evalModules {
|
||||
class = "clan.service";
|
||||
modules = [
|
||||
./service-module.nix
|
||||
# feature modules
|
||||
(lib.modules.importApply ./api-feature.nix {
|
||||
inherit clanLib;
|
||||
attrName = key;
|
||||
inherit clanLib prefix;
|
||||
})
|
||||
] ++ modules;
|
||||
});
|
||||
|
||||
resolveModule =
|
||||
{
|
||||
moduleSpec,
|
||||
flakeInputs,
|
||||
localModuleSet,
|
||||
}:
|
||||
let
|
||||
# TODO:
|
||||
resolvedModuleSet =
|
||||
# If the module.name is self then take the modules defined in the flake
|
||||
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
|
||||
if moduleSpec.input == null then
|
||||
localModuleSet
|
||||
else
|
||||
let
|
||||
input =
|
||||
flakeInputs.${moduleSpec.input} or (throw ''
|
||||
Flake doesn't provide input with name '${moduleSpec.input}'
|
||||
|
||||
Choose one of the following inputs:
|
||||
- ${
|
||||
builtins.concatStringsSep "\n- " (
|
||||
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flakeInputs)
|
||||
)
|
||||
}
|
||||
|
||||
To import a local module from 'clan.modules' remove the 'input' attribute from the module definition
|
||||
Remove the following line from the module definition:
|
||||
|
||||
...
|
||||
- module.input = "${moduleSpec.input}"
|
||||
|
||||
'');
|
||||
clanAttrs =
|
||||
input.clan
|
||||
or (throw "It seems the flake input ${moduleSpec.input} doesn't export any clan resources");
|
||||
in
|
||||
clanAttrs.modules;
|
||||
|
||||
resolvedModule =
|
||||
resolvedModuleSet.${moduleSpec.name}
|
||||
or (throw "flake doesn't provide clan-module with name ${moduleSpec.name}");
|
||||
in
|
||||
resolvedModule;
|
||||
in
|
||||
{
|
||||
inherit evalClanService;
|
||||
inherit evalClanService resolveModule;
|
||||
mapInstances =
|
||||
{
|
||||
# This is used to resolve the module imports from 'flake.inputs'
|
||||
flakeInputs,
|
||||
# The clan inventory
|
||||
inventory,
|
||||
localModuleSet,
|
||||
prefix ? [ ],
|
||||
}:
|
||||
let
|
||||
# machineHasTag = machineName: tagName: lib.elem tagName inventory.machines.${machineName}.tags;
|
||||
@@ -45,42 +91,11 @@ in
|
||||
importedModuleWithInstances = lib.mapAttrs (
|
||||
instanceName: instance:
|
||||
let
|
||||
# TODO:
|
||||
resolvedModuleSet =
|
||||
# If the module.name is self then take the modules defined in the flake
|
||||
# Otherwise its an external input which provides the modules via 'clan.modules' attribute
|
||||
if instance.module.input == null then
|
||||
inventory.modules
|
||||
else
|
||||
let
|
||||
input =
|
||||
flakeInputs.${instance.module.input} or (throw ''
|
||||
Flake doesn't provide input with name '${instance.module.input}'
|
||||
|
||||
Choose one of the following inputs:
|
||||
- ${
|
||||
builtins.concatStringsSep "\n- " (
|
||||
lib.attrNames (lib.filterAttrs (_name: input: input ? clan) flakeInputs)
|
||||
)
|
||||
}
|
||||
|
||||
To import a local module from 'inventory.modules' remove the 'input' attribute from the module definition
|
||||
Remove the following line from the module definition:
|
||||
|
||||
...
|
||||
- module.input = "${instance.module.input}"
|
||||
|
||||
|
||||
'');
|
||||
clanAttrs =
|
||||
input.clan
|
||||
or (throw "It seems the flake input ${instance.module.input} doesn't export any clan resources");
|
||||
in
|
||||
clanAttrs.modules;
|
||||
|
||||
resolvedModule =
|
||||
resolvedModuleSet.${instance.module.name}
|
||||
or (throw "flake doesn't provide clan-module with name ${instance.module.name}");
|
||||
resolvedModule = resolveModule {
|
||||
moduleSpec = instance.module;
|
||||
inherit localModuleSet;
|
||||
inherit flakeInputs;
|
||||
};
|
||||
|
||||
# Every instance includes machines via roles
|
||||
# :: { client :: ... }
|
||||
@@ -138,7 +153,7 @@ in
|
||||
importedModulesEvaluated = lib.mapAttrs (
|
||||
module_ident: instances:
|
||||
evalClanService {
|
||||
key = module_ident;
|
||||
prefix = prefix ++ [ module_ident ];
|
||||
modules =
|
||||
[
|
||||
# Import the resolved module.
|
||||
|
||||
@@ -255,7 +255,9 @@ in
|
||||
{
|
||||
options.API = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
# This is read only, because we don't support turning it off yet
|
||||
readOnly = true;
|
||||
default = true;
|
||||
description = ''
|
||||
Enables automatic API schema conversion for the interface of this module.
|
||||
'';
|
||||
|
||||
@@ -41,9 +41,13 @@ let
|
||||
|
||||
callInventoryAdapter =
|
||||
inventoryModule:
|
||||
let
|
||||
inventory = evalInventory inventoryModule;
|
||||
in
|
||||
clanLib.inventory.mapInstances {
|
||||
flakeInputs = flakeInputsFixture;
|
||||
inventory = evalInventory inventoryModule;
|
||||
inherit inventory;
|
||||
localModuleSet = inventory.modules;
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
@@ -92,7 +92,7 @@ in
|
||||
lib.lazyDerivation {
|
||||
# lazyDerivation improves performance when only passthru items and/or meta are used.
|
||||
derivation = hostPkgs.stdenv.mkDerivation {
|
||||
name = "vm-test-run-${config.name}";
|
||||
name = "container-test-run-${config.name}";
|
||||
|
||||
requiredSystemFeatures = [ "uid-range" ];
|
||||
|
||||
|
||||
@@ -2,9 +2,11 @@ import argparse
|
||||
import ctypes
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import types
|
||||
import uuid
|
||||
from collections.abc import Callable
|
||||
from contextlib import _GeneratorContextManager
|
||||
from dataclasses import dataclass
|
||||
@@ -13,6 +15,8 @@ from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from .logger import AbstractLogger, CompositeLogger, TerminalLogger
|
||||
|
||||
# Load the C library
|
||||
@@ -187,6 +191,22 @@ class Machine:
|
||||
if line_pattern.match(line)
|
||||
)
|
||||
|
||||
def nsenter_command(self, command: str) -> list[str]:
|
||||
return [
|
||||
"nsenter",
|
||||
"--target",
|
||||
str(self.container_pid),
|
||||
"--mount",
|
||||
"--uts",
|
||||
"--ipc",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--cgroup",
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
command,
|
||||
]
|
||||
|
||||
def execute(
|
||||
self,
|
||||
command: str,
|
||||
@@ -228,23 +248,10 @@ class Machine:
|
||||
"""
|
||||
|
||||
# Always run command with shell opts
|
||||
command = f"set -eo pipefail; source /etc/profile; set -u; {command}"
|
||||
command = f"set -eo pipefail; source /etc/profile; set -xu; {command}"
|
||||
|
||||
proc = subprocess.run(
|
||||
[
|
||||
"nsenter",
|
||||
"--target",
|
||||
str(self.container_pid),
|
||||
"--mount",
|
||||
"--uts",
|
||||
"--ipc",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--cgroup",
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
command,
|
||||
],
|
||||
self.nsenter_command(command),
|
||||
timeout=timeout,
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
@@ -465,6 +472,43 @@ class Driver:
|
||||
print(f"Starting {machine.name}")
|
||||
machine.start()
|
||||
|
||||
# Print copy-pastable nsenter command to debug container tests
|
||||
for machine in self.machines:
|
||||
nspawn_uuid = uuid.uuid4()
|
||||
|
||||
# We lauch a sleep here, so we can pgrep the process cmdline for
|
||||
# the uuid
|
||||
sleep = shutil.which("sleep")
|
||||
assert sleep is not None, "sleep command not found"
|
||||
machine.execute(
|
||||
f"systemd-run /bin/sh -c '{sleep} 999999999 && echo {nspawn_uuid}'",
|
||||
)
|
||||
|
||||
print(f"nsenter for {machine.name}:")
|
||||
print(
|
||||
" ".join(
|
||||
[
|
||||
Style.BRIGHT,
|
||||
Fore.CYAN,
|
||||
"sudo",
|
||||
"nsenter",
|
||||
"--user",
|
||||
"--target",
|
||||
f"$(\\pgrep -f '^/bin/sh.*{nspawn_uuid}')",
|
||||
"--mount",
|
||||
"--uts",
|
||||
"--ipc",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--cgroup",
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"bash",
|
||||
Style.RESET_ALL,
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
def test_symbols(self) -> dict[str, Any]:
|
||||
general_symbols = {
|
||||
"start_all": self.start_all,
|
||||
|
||||
@@ -22,6 +22,9 @@ in
|
||||
pkgs,
|
||||
self,
|
||||
useContainers ? true,
|
||||
# Displayed for better error messages, otherwise the placeholder
|
||||
system ? "<system>",
|
||||
attrName ? "<check_name>",
|
||||
...
|
||||
}:
|
||||
let
|
||||
@@ -35,7 +38,7 @@ in
|
||||
{
|
||||
imports = [
|
||||
nixosTest
|
||||
] ++ lib.optionals (useContainers) [ ./container-test-driver/driver-module.nix ];
|
||||
] ++ lib.optionals useContainers [ ./container-test-driver/driver-module.nix ];
|
||||
options = {
|
||||
clanSettings = mkOption {
|
||||
default = { };
|
||||
@@ -60,6 +63,15 @@ in
|
||||
};
|
||||
modules = [
|
||||
clanLib.buildClanModule.flakePartsModule
|
||||
{
|
||||
_prefix = [
|
||||
"checks"
|
||||
system
|
||||
attrName
|
||||
"config"
|
||||
"clan"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
21
module.nix
Normal file
21
module.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "test";
|
||||
|
||||
roles.peer.interface =
|
||||
{ ... }:
|
||||
{
|
||||
options.debug = lib.mkOption { default = 1; };
|
||||
};
|
||||
|
||||
roles.peer.perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule = {
|
||||
options.debug = lib.mkOption {
|
||||
default = settings;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -39,9 +39,44 @@ in
|
||||
type = submodule { imports = [ ./interface.nix ]; };
|
||||
};
|
||||
|
||||
config.system.clan.deployment.data = {
|
||||
vars = config.clan.core.vars._serialized;
|
||||
inherit (config.clan.core.networking) targetHost buildHost;
|
||||
inherit (config.clan.core.deployment) requireExplicitUpdate;
|
||||
config = {
|
||||
# check all that all non-secret files have no owner/group/mode set
|
||||
warnings = lib.foldl' (
|
||||
warnings: generator:
|
||||
warnings
|
||||
++ lib.foldl' (
|
||||
warnings: file:
|
||||
warnings
|
||||
++
|
||||
lib.optional
|
||||
(
|
||||
!file.secret
|
||||
&& (
|
||||
file.owner != "root"
|
||||
|| file.group != (if _class == "darwin" then "wheel" else "root")
|
||||
|| file.mode != "0400"
|
||||
)
|
||||
)
|
||||
''
|
||||
The config.clan.core.vars.generators.${generator.name}.files.${file.name} is not secret:
|
||||
${lib.optionalString (file.owner != "root") ''
|
||||
The owner is set to ${file.owner}, but should be root.
|
||||
''}
|
||||
${lib.optionalString (file.group != (if _class == "darwin" then "wheel" else "root")) ''
|
||||
The group is set to ${file.group}, but should be ${if _class == "darwin" then "wheel" else "root"}.
|
||||
''}
|
||||
${lib.optionalString (file.mode != "0400") ''
|
||||
The mode is set to ${file.mode}, but should be 0400.
|
||||
''}
|
||||
This doesn't work because the file will be added to the nix store
|
||||
''
|
||||
) [ ] (lib.attrValues generator.files)
|
||||
) [ ] (lib.attrValues config.clan.core.vars.generators);
|
||||
|
||||
system.clan.deployment.data = {
|
||||
vars = config.clan.core.vars._serialized;
|
||||
inherit (config.clan.core.networking) targetHost buildHost;
|
||||
inherit (config.clan.core.deployment) requireExplicitUpdate;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ in
|
||||
internal = true;
|
||||
description = ''
|
||||
JSON serialization of the generators.
|
||||
This is read from the python client to generate the specified ressources.
|
||||
This is read from the python client to generate the specified resources.
|
||||
'';
|
||||
default = {
|
||||
# TODO: We don't support per-machine choice of backends
|
||||
@@ -241,12 +241,30 @@ in
|
||||
type = bool;
|
||||
default = true;
|
||||
};
|
||||
flakePath = lib.mkOption {
|
||||
description = ''
|
||||
The path to the file containing the content of the generated value.
|
||||
This will be set automatically
|
||||
'';
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
};
|
||||
path = lib.mkOption {
|
||||
description = ''
|
||||
The path to the file containing the content of the generated value.
|
||||
This will be set automatically
|
||||
'';
|
||||
type = str;
|
||||
defaultText = ''
|
||||
builtins.path {
|
||||
name = "$${generator.config._module.args.name}_$${file.config._module.args.name}";
|
||||
path = file.config.flakePath;
|
||||
}
|
||||
'';
|
||||
default = builtins.path {
|
||||
name = "${generator.config._module.args.name}_${file.config._module.args.name}";
|
||||
path = file.config.flakePath;
|
||||
};
|
||||
};
|
||||
neededFor = lib.mkOption {
|
||||
description = ''
|
||||
|
||||
@@ -11,7 +11,7 @@ in
|
||||
config.clan.core.vars.settings = mkIf (config.clan.core.vars.settings.publicStore == "in_repo") {
|
||||
publicModule = "clan_cli.vars.public_modules.in_repo";
|
||||
fileModule = file: {
|
||||
path = mkIf (file.config.secret == false) (
|
||||
flakePath = mkIf (file.config.secret == false) (
|
||||
if file.config.share then
|
||||
(
|
||||
config.clan.core.settings.directory
|
||||
@@ -25,9 +25,9 @@ in
|
||||
);
|
||||
value = mkIf (file.config.secret == false) (
|
||||
# dynamically adjust priority to allow overriding with mkDefault in case the file is not found
|
||||
if (pathExists file.config.path) then
|
||||
if (pathExists file.config.flakePath) then
|
||||
# if the file is found it should have normal priority
|
||||
readFile file.config.path
|
||||
readFile file.config.flakePath
|
||||
else
|
||||
# if the file is not found, we want to downgrade the priority, to allow overriding via mkDefault
|
||||
mkOptionDefault (
|
||||
|
||||
@@ -18,7 +18,7 @@ let
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${machineName}/${secret.generator}/${secret.name}/secret";
|
||||
|
||||
vars = collectFiles config.clan.core.vars;
|
||||
vars = collectFiles config.clan.core.vars.generators;
|
||||
in
|
||||
{
|
||||
config.clan.core.vars.settings = lib.mkIf (config.clan.core.vars.settings.secretStore == "sops") {
|
||||
@@ -49,7 +49,10 @@ in
|
||||
mode
|
||||
neededForUsers
|
||||
;
|
||||
sopsFile = secretPath secret;
|
||||
sopsFile = builtins.path {
|
||||
name = "${secret.generator}_${secret.name}";
|
||||
path = secretPath secret;
|
||||
};
|
||||
format = "binary";
|
||||
};
|
||||
}) (builtins.filter (x: builtins.pathExists (secretPath x)) vars)
|
||||
|
||||
@@ -13,7 +13,7 @@ in
|
||||
{
|
||||
|
||||
collectFiles =
|
||||
vars:
|
||||
generators:
|
||||
let
|
||||
relevantFiles =
|
||||
generator:
|
||||
@@ -30,7 +30,7 @@ in
|
||||
inherit (generator) share;
|
||||
inherit (file) owner group mode;
|
||||
}) (relevantFiles generator)
|
||||
) vars.generators
|
||||
) generators
|
||||
);
|
||||
in
|
||||
allFiles;
|
||||
|
||||
@@ -29,6 +29,8 @@ mkShell {
|
||||
export GIT_ROOT=$(git rev-parse --show-toplevel)
|
||||
export PKG_ROOT=$GIT_ROOT/pkgs/clan-app
|
||||
|
||||
export CLAN_CORE_PATH="$GIT_ROOT"
|
||||
|
||||
# Add current package to PYTHONPATH
|
||||
export PYTHONPATH="$PKG_ROOT''${PYTHONPATH:+:$PYTHONPATH:}"
|
||||
|
||||
|
||||
@@ -6,13 +6,13 @@ from pathlib import Path
|
||||
from types import ModuleType
|
||||
|
||||
# These imports are unused, but necessary for @API.register to run once.
|
||||
from clan_lib.api import admin, directory, disk, iwd, mdns_discovery, modules
|
||||
from clan_lib.api import directory, disk, iwd, mdns_discovery, modules
|
||||
|
||||
from .arg_actions import AppendOptionAction
|
||||
from .clan import show, update
|
||||
|
||||
# API endpoints that are not used in the cli.
|
||||
__all__ = ["admin", "directory", "disk", "iwd", "mdns_discovery", "modules", "update"]
|
||||
__all__ = ["directory", "disk", "iwd", "mdns_discovery", "modules", "update"]
|
||||
|
||||
from . import (
|
||||
backups,
|
||||
|
||||
@@ -19,21 +19,23 @@ def create_backup(machine: Machine, provider: str | None = None) -> None:
|
||||
if not backup_scripts["providers"]:
|
||||
msg = "No providers specified"
|
||||
raise ClanError(msg)
|
||||
for provider in backup_scripts["providers"]:
|
||||
proc = machine.target_host.run(
|
||||
[backup_scripts["providers"][provider]["create"]],
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
msg = "failed to start backup"
|
||||
raise ClanError(msg)
|
||||
print("successfully started backup")
|
||||
with machine.target_host() as host:
|
||||
for provider in backup_scripts["providers"]:
|
||||
proc = host.run(
|
||||
[backup_scripts["providers"][provider]["create"]],
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
msg = "failed to start backup"
|
||||
raise ClanError(msg)
|
||||
print("successfully started backup")
|
||||
else:
|
||||
if provider not in backup_scripts["providers"]:
|
||||
msg = f"provider {provider} not found"
|
||||
raise ClanError(msg)
|
||||
proc = machine.target_host.run(
|
||||
[backup_scripts["providers"][provider]["create"]],
|
||||
)
|
||||
with machine.target_host() as host:
|
||||
proc = host.run(
|
||||
[backup_scripts["providers"][provider]["create"]],
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
msg = "failed to start backup"
|
||||
raise ClanError(msg)
|
||||
|
||||
@@ -10,6 +10,7 @@ from clan_cli.completions import (
|
||||
)
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -18,11 +19,11 @@ class Backup:
|
||||
job_name: str | None = None
|
||||
|
||||
|
||||
def list_provider(machine: Machine, provider: str) -> list[Backup]:
|
||||
def list_provider(machine: Machine, host: Host, provider: str) -> list[Backup]:
|
||||
results = []
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
list_command = backup_metadata["providers"][provider]["list"]
|
||||
proc = machine.target_host.run(
|
||||
proc = host.run(
|
||||
[list_command],
|
||||
RunOpts(log=Log.NONE, check=False),
|
||||
)
|
||||
@@ -48,12 +49,13 @@ def list_provider(machine: Machine, provider: str) -> list[Backup]:
|
||||
def list_backups(machine: Machine, provider: str | None = None) -> list[Backup]:
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
results = []
|
||||
if provider is None:
|
||||
for _provider in backup_metadata["providers"]:
|
||||
results += list_provider(machine, _provider)
|
||||
with machine.target_host() as host:
|
||||
if provider is None:
|
||||
for _provider in backup_metadata["providers"]:
|
||||
results += list_provider(machine, host, _provider)
|
||||
|
||||
else:
|
||||
results += list_provider(machine, provider)
|
||||
else:
|
||||
results += list_provider(machine, host, provider)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@@ -8,9 +8,12 @@ from clan_cli.completions import (
|
||||
)
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
|
||||
def restore_service(machine: Machine, name: str, provider: str, service: str) -> None:
|
||||
def restore_service(
|
||||
machine: Machine, host: Host, name: str, provider: str, service: str
|
||||
) -> None:
|
||||
backup_metadata = machine.eval_nix("config.clan.core.backups")
|
||||
backup_folders = machine.eval_nix("config.clan.core.state")
|
||||
|
||||
@@ -25,7 +28,7 @@ def restore_service(machine: Machine, name: str, provider: str, service: str) ->
|
||||
env["FOLDERS"] = ":".join(set(folders))
|
||||
|
||||
if pre_restore := backup_folders[service]["preRestoreCommand"]:
|
||||
proc = machine.target_host.run(
|
||||
proc = host.run(
|
||||
[pre_restore],
|
||||
RunOpts(log=Log.STDERR),
|
||||
extra_env=env,
|
||||
@@ -34,7 +37,7 @@ def restore_service(machine: Machine, name: str, provider: str, service: str) ->
|
||||
msg = f"failed to run preRestoreCommand: {pre_restore}, error was: {proc.stdout}"
|
||||
raise ClanError(msg)
|
||||
|
||||
proc = machine.target_host.run(
|
||||
proc = host.run(
|
||||
[backup_metadata["providers"][provider]["restore"]],
|
||||
RunOpts(log=Log.STDERR),
|
||||
extra_env=env,
|
||||
@@ -44,7 +47,7 @@ def restore_service(machine: Machine, name: str, provider: str, service: str) ->
|
||||
raise ClanError(msg)
|
||||
|
||||
if post_restore := backup_folders[service]["postRestoreCommand"]:
|
||||
proc = machine.target_host.run(
|
||||
proc = host.run(
|
||||
[post_restore],
|
||||
RunOpts(log=Log.STDERR),
|
||||
extra_env=env,
|
||||
@@ -61,18 +64,19 @@ def restore_backup(
|
||||
service: str | None = None,
|
||||
) -> None:
|
||||
errors = []
|
||||
if service is None:
|
||||
backup_folders = machine.eval_nix("config.clan.core.state")
|
||||
for _service in backup_folders:
|
||||
with machine.target_host() as host:
|
||||
if service is None:
|
||||
backup_folders = machine.eval_nix("config.clan.core.state")
|
||||
for _service in backup_folders:
|
||||
try:
|
||||
restore_service(machine, host, name, provider, _service)
|
||||
except ClanError as e:
|
||||
errors.append(f"{_service}: {e}")
|
||||
else:
|
||||
try:
|
||||
restore_service(machine, name, provider, _service)
|
||||
restore_service(machine, host, name, provider, service)
|
||||
except ClanError as e:
|
||||
errors.append(f"{_service}: {e}")
|
||||
else:
|
||||
try:
|
||||
restore_service(machine, name, provider, service)
|
||||
except ClanError as e:
|
||||
errors.append(f"{service}: {e}")
|
||||
errors.append(f"{service}: {e}")
|
||||
if errors:
|
||||
raise ClanError(
|
||||
"Restore failed for the following services:\n" + "\n".join(errors)
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
from clan_cli.cmd import run
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from clan_cli.cmd import Log, RunOpts, run
|
||||
from clan_cli.nix import nix_shell
|
||||
|
||||
_works: bool | None = None
|
||||
@@ -12,6 +16,11 @@ def bubblewrap_works() -> bool:
|
||||
|
||||
|
||||
def _bubblewrap_works() -> bool:
|
||||
real_bash_path = Path("bash")
|
||||
if os.environ.get("IN_NIX_SANDBOX"):
|
||||
bash_executable_path = Path(str(shutil.which("bash")))
|
||||
real_bash_path = bash_executable_path.resolve()
|
||||
|
||||
# fmt: off
|
||||
cmd = nix_shell(
|
||||
[
|
||||
@@ -30,13 +39,10 @@ def _bubblewrap_works() -> bool:
|
||||
"--gid", "1000",
|
||||
"--",
|
||||
# do nothing, just test if bash executes
|
||||
"bash", "-c", ":"
|
||||
str(real_bash_path), "-c", ":"
|
||||
],
|
||||
)
|
||||
|
||||
# fmt: on
|
||||
try:
|
||||
run(cmd)
|
||||
except Exception:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
res = run(cmd, RunOpts(log=Log.BOTH, check=False))
|
||||
return res.returncode == 0
|
||||
|
||||
@@ -107,7 +107,7 @@ def create_clan(opts: CreateOptions) -> CreateClanResponse:
|
||||
response.flake_update = flake_update
|
||||
|
||||
if opts.initial:
|
||||
init_inventory(str(opts.dest), init=opts.initial)
|
||||
init_inventory(Flake(str(opts.dest)), init=opts.initial)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@@ -6,8 +6,9 @@ from urllib.parse import urlparse
|
||||
|
||||
from clan_lib.api import API
|
||||
|
||||
from clan_cli.cmd import run_no_stdout
|
||||
from clan_cli.cmd import run
|
||||
from clan_cli.errors import ClanCmdError, ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import Meta
|
||||
from clan_cli.nix import nix_eval
|
||||
|
||||
@@ -15,23 +16,26 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@API.register
|
||||
def show_clan_meta(uri: str | Path) -> Meta:
|
||||
def show_clan_meta(flake: Flake) -> Meta:
|
||||
if flake.is_local and not flake.path.exists():
|
||||
msg = f"Path {flake} does not exist"
|
||||
raise ClanError(msg, description="clan directory does not exist")
|
||||
cmd = nix_eval(
|
||||
[
|
||||
f"{uri}#clanInternals.inventory.meta",
|
||||
f"{flake}#clanInternals.inventory.meta",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
res = "{}"
|
||||
|
||||
try:
|
||||
proc = run_no_stdout(cmd)
|
||||
proc = run(cmd)
|
||||
res = proc.stdout.strip()
|
||||
except ClanCmdError as e:
|
||||
msg = "Evaluation failed on meta attribute"
|
||||
raise ClanError(
|
||||
msg,
|
||||
location=f"show_clan {uri}",
|
||||
location=f"show_clan {flake}",
|
||||
description=str(e.cmd),
|
||||
) from e
|
||||
|
||||
@@ -50,16 +54,16 @@ def show_clan_meta(uri: str | Path) -> Meta:
|
||||
msg = "Invalid absolute path"
|
||||
raise ClanError(
|
||||
msg,
|
||||
location=f"show_clan {uri}",
|
||||
location=f"show_clan {flake}",
|
||||
description="Icon path must be a URL or a relative path",
|
||||
)
|
||||
|
||||
icon_path = str((Path(uri) / meta_icon).resolve())
|
||||
icon_path = str((flake.path / meta_icon).resolve())
|
||||
else:
|
||||
msg = "Invalid schema"
|
||||
raise ClanError(
|
||||
msg,
|
||||
location=f"show_clan {uri}",
|
||||
location=f"show_clan {flake}",
|
||||
description="Icon path must be a URL or a relative path",
|
||||
)
|
||||
|
||||
|
||||
@@ -2,20 +2,21 @@ from dataclasses import dataclass
|
||||
|
||||
from clan_lib.api import API
|
||||
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import Inventory, Meta, load_inventory_json, set_inventory
|
||||
|
||||
|
||||
@dataclass
|
||||
class UpdateOptions:
|
||||
directory: str
|
||||
flake: Flake
|
||||
meta: Meta
|
||||
|
||||
|
||||
@API.register
|
||||
def update_clan_meta(options: UpdateOptions) -> Inventory:
|
||||
inventory = load_inventory_json(options.directory)
|
||||
inventory = load_inventory_json(options.flake)
|
||||
inventory["meta"] = options.meta
|
||||
|
||||
set_inventory(inventory, options.directory, "Update clan metadata")
|
||||
set_inventory(inventory, options.flake, "Update clan metadata")
|
||||
|
||||
return inventory
|
||||
|
||||
@@ -244,12 +244,12 @@ class TimeTable:
|
||||
# Print in default color
|
||||
print(f"Took {v} for command: '{k}'")
|
||||
|
||||
def add(self, cmd: str, time: float) -> None:
|
||||
def add(self, cmd: str, duration: float) -> None:
|
||||
with self.lock:
|
||||
if cmd in self.table:
|
||||
self.table[cmd] += time
|
||||
self.table[cmd] += duration
|
||||
else:
|
||||
self.table[cmd] = time
|
||||
self.table[cmd] = duration
|
||||
|
||||
|
||||
TIME_TABLE = None
|
||||
@@ -259,7 +259,7 @@ if os.environ.get("CLAN_CLI_PERF"):
|
||||
|
||||
@dataclass
|
||||
class RunOpts:
|
||||
input: bytes | None = None
|
||||
input: IO[bytes] | bytes | None = None
|
||||
stdout: IO[bytes] | None = None
|
||||
stderr: IO[bytes] | None = None
|
||||
env: dict[str, str] | None = None
|
||||
@@ -329,7 +329,7 @@ def run(
|
||||
if options.requires_root_perm:
|
||||
cmd = cmd_with_root(cmd, options.graphical_perm)
|
||||
|
||||
if options.input:
|
||||
if options.input and isinstance(options.input, bytes):
|
||||
if any(not ch.isprintable() for ch in options.input.decode("ascii", "replace")):
|
||||
filtered_input = "<<binary_blob>>"
|
||||
else:
|
||||
@@ -344,7 +344,7 @@ def run(
|
||||
|
||||
start = timeit.default_timer()
|
||||
with ExitStack() as stack:
|
||||
stdin = subprocess.PIPE if options.input is not None else None
|
||||
stdin = subprocess.PIPE if isinstance(options.input, bytes) else options.input
|
||||
process = stack.enter_context(
|
||||
subprocess.Popen(
|
||||
cmd,
|
||||
@@ -364,13 +364,18 @@ def run(
|
||||
else:
|
||||
stack.enter_context(terminate_process_group(process))
|
||||
|
||||
if isinstance(options.input, bytes):
|
||||
input_bytes = options.input
|
||||
else:
|
||||
input_bytes = None
|
||||
|
||||
stdout_buf, stderr_buf = handle_io(
|
||||
process,
|
||||
options.log,
|
||||
prefix=options.prefix,
|
||||
msg_color=options.msg_color,
|
||||
timeout=options.timeout,
|
||||
input_bytes=options.input,
|
||||
input_bytes=input_bytes,
|
||||
stdout=options.stdout,
|
||||
stderr=options.stderr,
|
||||
)
|
||||
@@ -398,26 +403,3 @@ def run(
|
||||
raise ClanCmdError(cmd_out)
|
||||
|
||||
return cmd_out
|
||||
|
||||
|
||||
def run_no_stdout(
|
||||
cmd: list[str],
|
||||
opts: RunOpts | None = None,
|
||||
) -> CmdOut:
|
||||
"""
|
||||
Like run, but automatically suppresses all output, if not in DEBUG log level.
|
||||
If in DEBUG log level the stdout of commands will be shown.
|
||||
"""
|
||||
if opts is None:
|
||||
opts = RunOpts()
|
||||
|
||||
if cmdlog.isEnabledFor(logging.DEBUG):
|
||||
opts.log = opts.log if opts.log.value > Log.STDERR.value else Log.STDERR
|
||||
|
||||
return run(
|
||||
cmd,
|
||||
opts,
|
||||
)
|
||||
|
||||
|
||||
# type: ignore
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
import pytest
|
||||
|
||||
from clan_cli.custom_logger import setup_logging
|
||||
|
||||
# collect_ignore = ["./nixpkgs"]
|
||||
|
||||
pytest_plugins = [
|
||||
"clan_cli.tests.temporary_dir",
|
||||
"clan_cli.tests.root",
|
||||
@@ -19,13 +13,3 @@ pytest_plugins = [
|
||||
"clan_cli.tests.stdout",
|
||||
"clan_cli.tests.nix_config",
|
||||
]
|
||||
|
||||
|
||||
# Executed on pytest session start
|
||||
def pytest_sessionstart(session: pytest.Session) -> None:
|
||||
# This function will be called once at the beginning of the test session
|
||||
print("Starting pytest session")
|
||||
# You can access the session config, items, testsfailed, etc.
|
||||
print(f"Session config: {session.config}")
|
||||
|
||||
setup_logging(level="INFO")
|
||||
|
||||
@@ -4,9 +4,14 @@ import sys
|
||||
import urllib
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .errors import ClanError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.machines.machines import Machine
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -131,12 +136,17 @@ def vm_state_dir(flake_url: str, vm_name: str) -> Path:
|
||||
return user_data_dir() / "clan" / "vmstate" / clan_key / vm_name
|
||||
|
||||
|
||||
def machines_dir(flake_dir: Path) -> Path:
|
||||
return flake_dir / "machines"
|
||||
def machines_dir(flake: "Flake") -> Path:
|
||||
if flake.is_local:
|
||||
return flake.path / "machines"
|
||||
|
||||
store_path = flake.store_path
|
||||
assert store_path is not None, "Invalid flake object. Doesn't have a store path"
|
||||
return Path(store_path) / "machines"
|
||||
|
||||
|
||||
def specific_machine_dir(flake_dir: Path, machine: str) -> Path:
|
||||
return machines_dir(flake_dir) / machine
|
||||
def specific_machine_dir(machine: "Machine") -> Path:
|
||||
return machines_dir(machine.flake) / machine.name
|
||||
|
||||
|
||||
def module_root() -> Path:
|
||||
|
||||
@@ -48,6 +48,7 @@ def bubblewrap_cmd(generator: str, facts_dir: Path, secrets_dir: Path) -> list[s
|
||||
"--unshare-all",
|
||||
"--tmpfs", "/",
|
||||
"--ro-bind", "/nix/store", "/nix/store",
|
||||
"--ro-bind", "/bin/sh", "/bin/sh",
|
||||
"--dev", "/dev",
|
||||
# not allowed to bind procfs in some sandboxes
|
||||
"--bind", str(facts_dir), str(facts_dir),
|
||||
|
||||
@@ -4,6 +4,7 @@ from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
|
||||
import clan_cli.machines.machines as machines
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
|
||||
class SecretStoreBase(ABC):
|
||||
@@ -25,7 +26,7 @@ class SecretStoreBase(ABC):
|
||||
def exists(self, service: str, name: str) -> bool:
|
||||
pass
|
||||
|
||||
def needs_upload(self) -> bool:
|
||||
def needs_upload(self, host: Host) -> bool:
|
||||
return True
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import override
|
||||
from clan_cli.cmd import Log, RunOpts
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_shell
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
from . import SecretStoreBase
|
||||
|
||||
@@ -93,9 +94,9 @@ class SecretStore(SecretStoreBase):
|
||||
return b"\n".join(hashes)
|
||||
|
||||
@override
|
||||
def needs_upload(self) -> bool:
|
||||
def needs_upload(self, host: Host) -> bool:
|
||||
local_hash = self.generate_hash()
|
||||
remote_hash = self.machine.target_host.run(
|
||||
remote_hash = host.run(
|
||||
# TODO get the path to the secrets from the machine
|
||||
["cat", f"{self.machine.secrets_upload_directory}/.pass_info"],
|
||||
RunOpts(log=Log.STDERR, check=False),
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
from pathlib import Path
|
||||
from typing import override
|
||||
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.secrets.folders import sops_secrets_folder
|
||||
from clan_cli.secrets.machines import add_machine, has_machine
|
||||
from clan_cli.secrets.secrets import decrypt_secret, encrypt_secret, has_secret
|
||||
from clan_cli.secrets.sops import generate_private_key
|
||||
from clan_cli.ssh.host import Host
|
||||
|
||||
from . import SecretStoreBase
|
||||
|
||||
@@ -58,13 +60,10 @@ class SecretStore(SecretStoreBase):
|
||||
sops_secrets_folder(self.machine.flake_dir) / f"{self.machine.name}-{name}",
|
||||
)
|
||||
|
||||
@override
|
||||
def needs_upload(self, host: Host) -> bool:
|
||||
return False
|
||||
|
||||
# We rely now on the vars backend to upload the age key
|
||||
def upload(self, output_dir: Path) -> None:
|
||||
key_name = f"{self.machine.name}-age.key"
|
||||
if not has_secret(sops_secrets_folder(self.machine.flake_dir) / key_name):
|
||||
# skip uploading the secret, not managed by us
|
||||
return
|
||||
key = decrypt_secret(
|
||||
self.machine.flake_dir,
|
||||
sops_secrets_folder(self.machine.flake_dir) / key_name,
|
||||
)
|
||||
(output_dir / "key.txt").write_text(key)
|
||||
pass
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import override
|
||||
|
||||
from clan_cli.dirs import vm_state_dir
|
||||
from clan_cli.machines.machines import Machine
|
||||
@@ -28,6 +29,7 @@ class SecretStore(SecretStoreBase):
|
||||
def exists(self, service: str, name: str) -> bool:
|
||||
return (self.dir / service / name).exists()
|
||||
|
||||
@override
|
||||
def upload(self, output_dir: Path) -> None:
|
||||
if output_dir.exists():
|
||||
shutil.rmtree(output_dir)
|
||||
|
||||
@@ -11,16 +11,16 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def upload_secrets(machine: Machine) -> None:
|
||||
if not machine.secret_facts_store.needs_upload():
|
||||
machine.info("Secrets already uploaded")
|
||||
return
|
||||
with machine.target_host() as host:
|
||||
if not machine.secret_facts_store.needs_upload(host):
|
||||
machine.info("Secrets already uploaded")
|
||||
return
|
||||
|
||||
with TemporaryDirectory(prefix="facts-upload-") as _tempdir:
|
||||
local_secret_dir = Path(_tempdir).resolve()
|
||||
machine.secret_facts_store.upload(local_secret_dir)
|
||||
remote_secret_dir = Path(machine.secrets_upload_directory)
|
||||
|
||||
upload(machine.target_host, local_secret_dir, remote_secret_dir)
|
||||
with TemporaryDirectory(prefix="facts-upload-") as _tempdir:
|
||||
local_secret_dir = Path(_tempdir).resolve()
|
||||
machine.secret_facts_store.upload(local_secret_dir)
|
||||
remote_secret_dir = Path(machine.secrets_upload_directory)
|
||||
upload(host, local_secret_dir, remote_secret_dir)
|
||||
|
||||
|
||||
def upload_command(args: argparse.Namespace) -> None:
|
||||
|
||||
@@ -14,6 +14,7 @@ from clan_cli.nix import (
|
||||
nix_build,
|
||||
nix_command,
|
||||
nix_config,
|
||||
nix_eval,
|
||||
nix_metadata,
|
||||
nix_test_store,
|
||||
)
|
||||
@@ -344,9 +345,6 @@ class FlakeCacheEntry:
|
||||
|
||||
def is_cached(self, selectors: list[Selector]) -> bool:
|
||||
selector: Selector
|
||||
if selectors == []:
|
||||
return self.fetched_all
|
||||
selector = selectors[0]
|
||||
|
||||
# for store paths we have to check if they still exist, otherwise they have to be rebuild and are thus not cached
|
||||
if isinstance(self.value, str) and self.value.startswith("/nix/store/"):
|
||||
@@ -356,6 +354,10 @@ class FlakeCacheEntry:
|
||||
if isinstance(self.value, str | float | int | None):
|
||||
return True
|
||||
|
||||
if selectors == []:
|
||||
return self.fetched_all
|
||||
selector = selectors[0]
|
||||
|
||||
# we just fetch all subkeys, so we need to check of we inserted all keys at this level before
|
||||
if selector.type == SelectorType.ALL:
|
||||
assert isinstance(self.value, dict)
|
||||
@@ -458,7 +460,7 @@ class FlakeCacheEntry:
|
||||
result = []
|
||||
for index in keys_to_select:
|
||||
result.append(self.value[index].select(selectors[1:]))
|
||||
return result
|
||||
return result
|
||||
|
||||
# otherwise return a dict
|
||||
return {k: self.value[k].select(selectors[1:]) for k in keys_to_select}
|
||||
@@ -573,12 +575,12 @@ class Flake:
|
||||
identifier: str
|
||||
inputs_from: str | None = None
|
||||
hash: str | None = None
|
||||
flake_cache_path: Path | None = None
|
||||
store_path: str | None = None
|
||||
cache: FlakeCache | None = None
|
||||
_cache: FlakeCache | None = None
|
||||
_path: Path | None = None
|
||||
_is_local: bool | None = None
|
||||
|
||||
_flake_cache_path: Path | None = field(init=False, default=None)
|
||||
_cache: FlakeCache | None = field(init=False, default=None)
|
||||
_path: Path | None = field(init=False, default=None)
|
||||
_is_local: bool | None = field(init=False, default=None)
|
||||
|
||||
@classmethod
|
||||
def from_json(cls: type["Flake"], data: dict[str, Any]) -> "Flake":
|
||||
@@ -618,11 +620,9 @@ class Flake:
|
||||
except Exception as e:
|
||||
log.warning(f"Failed load eval cache: {e}. Continue without cache")
|
||||
|
||||
def invalidate_cache(self) -> None:
|
||||
def prefetch(self) -> None:
|
||||
"""
|
||||
Invalidate the cache and reload it.
|
||||
|
||||
This method is used to refresh the cache by reloading it from the flake.
|
||||
Loads the flake into the store and populates self.store_path and self.hash such that the flake can evaluate locally and offline
|
||||
"""
|
||||
cmd = [
|
||||
"flake",
|
||||
@@ -641,6 +641,15 @@ class Flake:
|
||||
flake_metadata = json.loads(flake_prefetch.stdout)
|
||||
self.store_path = flake_metadata["storePath"]
|
||||
self.hash = flake_metadata["hash"]
|
||||
self.flake_metadata = flake_metadata
|
||||
|
||||
def invalidate_cache(self) -> None:
|
||||
"""
|
||||
Invalidate the cache and reload it.
|
||||
|
||||
This method is used to refresh the cache by reloading it from the flake.
|
||||
"""
|
||||
self.prefetch()
|
||||
|
||||
self._cache = FlakeCache()
|
||||
assert self.hash is not None
|
||||
@@ -650,17 +659,17 @@ class Flake:
|
||||
)
|
||||
self.load_cache()
|
||||
|
||||
if "original" not in flake_metadata:
|
||||
flake_metadata = nix_metadata(self.identifier)
|
||||
if "original" not in self.flake_metadata:
|
||||
self.flake_metadata = nix_metadata(self.identifier)
|
||||
|
||||
if flake_metadata["original"].get("url", "").startswith("file:"):
|
||||
if self.flake_metadata["original"].get("url", "").startswith("file:"):
|
||||
self._is_local = True
|
||||
path = flake_metadata["original"]["url"].removeprefix("file://")
|
||||
path = self.flake_metadata["original"]["url"].removeprefix("file://")
|
||||
path = path.removeprefix("file:")
|
||||
self._path = Path(path)
|
||||
elif flake_metadata["original"].get("path"):
|
||||
elif self.flake_metadata["original"].get("path"):
|
||||
self._is_local = True
|
||||
self._path = Path(flake_metadata["original"]["path"])
|
||||
self._path = Path(self.flake_metadata["original"]["path"])
|
||||
else:
|
||||
self._is_local = False
|
||||
assert self.store_path is not None
|
||||
@@ -754,6 +763,56 @@ class Flake:
|
||||
if self.flake_cache_path:
|
||||
self._cache.save_to_file(self.flake_cache_path)
|
||||
|
||||
def uncached_nix_eval_with_args(
|
||||
self,
|
||||
attr_path: str,
|
||||
f_args: dict[str, str],
|
||||
nix_options: list[str] | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Calls a nix function with the provided arguments 'f_args'
|
||||
The argument must be an attribute set.
|
||||
|
||||
Args:
|
||||
attr_path (str): The attribute path to the nix function
|
||||
f_args (dict[str, nix_expr]): A python dictionary mapping from the name of the argument to a raw nix expression.
|
||||
|
||||
Example
|
||||
|
||||
flake.uncached_nix_eval_with_args(
|
||||
"clanInternals.evalServiceSchema",
|
||||
{ "moduleSpec": "{ name = \"hello-world\"; input = null; }" }
|
||||
)
|
||||
> '{ ...JSONSchema... }'
|
||||
|
||||
"""
|
||||
# Always prefetch, so we don't get any stale information
|
||||
self.prefetch()
|
||||
|
||||
if nix_options is None:
|
||||
nix_options = []
|
||||
|
||||
arg_expr = "{"
|
||||
for arg_name, arg_value in f_args.items():
|
||||
arg_expr += f" {arg_name} = {arg_value}; "
|
||||
arg_expr += "}"
|
||||
|
||||
nix_code = f"""
|
||||
let
|
||||
flake = builtins.getFlake "path:{self.store_path}?narHash={self.hash}";
|
||||
in
|
||||
flake.{attr_path} {arg_expr}
|
||||
"""
|
||||
if tmp_store := nix_test_store():
|
||||
nix_options += ["--store", str(tmp_store)]
|
||||
nix_options.append("--impure")
|
||||
|
||||
output = run(
|
||||
nix_eval(["--expr", nix_code, *nix_options]), RunOpts(log=Log.NONE)
|
||||
).stdout.strip()
|
||||
|
||||
return output
|
||||
|
||||
def precache(
|
||||
self,
|
||||
selectors: list[str],
|
||||
|
||||
@@ -14,7 +14,7 @@ from clan_cli.facts.generate import generate_facts
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_shell
|
||||
from clan_cli.vars.generate import generate_vars
|
||||
from clan_cli.vars.upload import upload_secret_vars
|
||||
from clan_cli.vars.upload import populate_secret_vars
|
||||
|
||||
from .automount import pause_automounting
|
||||
from .list import list_possible_keymaps, list_possible_languages
|
||||
@@ -35,6 +35,7 @@ class Disk:
|
||||
device: str
|
||||
|
||||
|
||||
# TODO: unify this with machine install
|
||||
@API.register
|
||||
def flash_machine(
|
||||
machine: Machine,
|
||||
@@ -107,7 +108,7 @@ def flash_machine(
|
||||
|
||||
local_dir.mkdir(parents=True)
|
||||
machine.secret_facts_store.upload(local_dir)
|
||||
upload_secret_vars(machine, local_dir)
|
||||
populate_secret_vars(machine, local_dir)
|
||||
disko_install = []
|
||||
|
||||
if os.geteuid() != 0:
|
||||
|
||||
@@ -21,8 +21,9 @@ from typing import Any
|
||||
|
||||
from clan_lib.api import API, dataclass_to_dict, from_dict
|
||||
|
||||
from clan_cli.cmd import run_no_stdout
|
||||
from clan_cli.cmd import run
|
||||
from clan_cli.errors import ClanCmdError, ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.git import commit_file
|
||||
from clan_cli.nix import nix_eval
|
||||
|
||||
@@ -49,11 +50,11 @@ __all__ = [
|
||||
]
|
||||
|
||||
|
||||
def get_inventory_path(flake_dir: str | Path) -> Path:
|
||||
def get_inventory_path(flake: Flake) -> Path:
|
||||
"""
|
||||
Get the path to the inventory file in the flake directory
|
||||
"""
|
||||
inventory_file = (Path(flake_dir) / "inventory.json").resolve()
|
||||
inventory_file = (flake.path / "inventory.json").resolve()
|
||||
return inventory_file
|
||||
|
||||
|
||||
@@ -61,8 +62,7 @@ def get_inventory_path(flake_dir: str | Path) -> Path:
|
||||
default_inventory: Inventory = {"meta": {"name": "New Clan"}}
|
||||
|
||||
|
||||
@API.register
|
||||
def load_inventory_eval(flake_dir: str | Path) -> Inventory:
|
||||
def load_inventory_eval(flake_dir: Flake) -> Inventory:
|
||||
"""
|
||||
Loads the evaluated inventory.
|
||||
After all merge operations with eventual nix code in buildClan.
|
||||
@@ -80,7 +80,7 @@ def load_inventory_eval(flake_dir: str | Path) -> Inventory:
|
||||
]
|
||||
)
|
||||
|
||||
proc = run_no_stdout(cmd)
|
||||
proc = run(cmd)
|
||||
|
||||
try:
|
||||
res = proc.stdout.strip()
|
||||
@@ -355,7 +355,7 @@ def determine_writeability(
|
||||
return results
|
||||
|
||||
|
||||
def get_inventory_current_priority(flake_dir: str | Path) -> dict:
|
||||
def get_inventory_current_priority(flake: Flake) -> dict:
|
||||
"""
|
||||
Returns the current priority of the inventory values
|
||||
|
||||
@@ -375,12 +375,12 @@ def get_inventory_current_priority(flake_dir: str | Path) -> dict:
|
||||
"""
|
||||
cmd = nix_eval(
|
||||
[
|
||||
f"{flake_dir}#clanInternals.inventoryValuesPrios",
|
||||
f"{flake}#clanInternals.inventoryClass.introspection",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
|
||||
proc = run_no_stdout(cmd)
|
||||
proc = run(cmd)
|
||||
|
||||
try:
|
||||
res = proc.stdout.strip()
|
||||
@@ -393,7 +393,7 @@ def get_inventory_current_priority(flake_dir: str | Path) -> dict:
|
||||
|
||||
|
||||
@API.register
|
||||
def load_inventory_json(flake_dir: str | Path) -> Inventory:
|
||||
def load_inventory_json(flake: Flake) -> Inventory:
|
||||
"""
|
||||
Load the inventory FILE from the flake directory
|
||||
If no file is found, returns an empty dictionary
|
||||
@@ -403,7 +403,7 @@ def load_inventory_json(flake_dir: str | Path) -> Inventory:
|
||||
Use load_inventory_eval instead
|
||||
"""
|
||||
|
||||
inventory_file = get_inventory_path(flake_dir)
|
||||
inventory_file = get_inventory_path(flake)
|
||||
|
||||
if not inventory_file.exists():
|
||||
return {}
|
||||
@@ -473,14 +473,14 @@ def patch(d: dict[str, Any], path: str, content: Any) -> None:
|
||||
|
||||
|
||||
@API.register
|
||||
def patch_inventory_with(base_dir: Path, section: str, content: dict[str, Any]) -> None:
|
||||
def patch_inventory_with(flake: Flake, section: str, content: dict[str, Any]) -> None:
|
||||
"""
|
||||
Pass only the section to update and the content to update with.
|
||||
Make sure you pass only attributes that you would like to persist.
|
||||
ATTENTION: Don't pass nix eval values unintentionally.
|
||||
"""
|
||||
|
||||
inventory_file = get_inventory_path(base_dir)
|
||||
inventory_file = get_inventory_path(flake)
|
||||
|
||||
curr_inventory = {}
|
||||
if inventory_file.exists():
|
||||
@@ -492,7 +492,9 @@ def patch_inventory_with(base_dir: Path, section: str, content: dict[str, Any])
|
||||
with inventory_file.open("w") as f:
|
||||
json.dump(curr_inventory, f, indent=2)
|
||||
|
||||
commit_file(inventory_file, base_dir, commit_message=f"inventory.{section}: Update")
|
||||
commit_file(
|
||||
inventory_file, flake.path, commit_message=f"inventory.{section}: Update"
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -504,16 +506,16 @@ class WriteInfo:
|
||||
|
||||
@API.register
|
||||
def get_inventory_with_writeable_keys(
|
||||
flake_dir: str | Path,
|
||||
flake: Flake,
|
||||
) -> WriteInfo:
|
||||
"""
|
||||
Load the inventory and determine the writeable keys
|
||||
Performs 2 nix evaluations to get the current priority and the inventory
|
||||
"""
|
||||
current_priority = get_inventory_current_priority(flake_dir)
|
||||
current_priority = get_inventory_current_priority(flake)
|
||||
|
||||
data_eval: Inventory = load_inventory_eval(flake_dir)
|
||||
data_disk: Inventory = load_inventory_json(flake_dir)
|
||||
data_eval: Inventory = load_inventory_eval(flake)
|
||||
data_disk: Inventory = load_inventory_json(flake)
|
||||
|
||||
writeables = determine_writeability(
|
||||
current_priority, dict(data_eval), dict(data_disk)
|
||||
@@ -522,16 +524,17 @@ def get_inventory_with_writeable_keys(
|
||||
return WriteInfo(writeables, data_eval, data_disk)
|
||||
|
||||
|
||||
# TODO: remove this function in favor of a proper read/write API
|
||||
@API.register
|
||||
def set_inventory(
|
||||
inventory: Inventory, flake_dir: str | Path, message: str, commit: bool = True
|
||||
inventory: Inventory, flake: Flake, message: str, commit: bool = True
|
||||
) -> None:
|
||||
"""
|
||||
Write the inventory to the flake directory
|
||||
and commit it to git with the given message
|
||||
"""
|
||||
|
||||
write_info = get_inventory_with_writeable_keys(flake_dir)
|
||||
write_info = get_inventory_with_writeable_keys(flake)
|
||||
|
||||
# Remove internals from the inventory
|
||||
inventory.pop("tags", None) # type: ignore
|
||||
@@ -552,43 +555,43 @@ def set_inventory(
|
||||
for delete_path in delete_set:
|
||||
delete_by_path(persisted, delete_path)
|
||||
|
||||
inventory_file = get_inventory_path(flake_dir)
|
||||
inventory_file = get_inventory_path(flake)
|
||||
with inventory_file.open("w") as f:
|
||||
json.dump(persisted, f, indent=2)
|
||||
|
||||
if commit:
|
||||
commit_file(inventory_file, Path(flake_dir), commit_message=message)
|
||||
commit_file(inventory_file, flake.path, commit_message=message)
|
||||
|
||||
|
||||
@API.register
|
||||
def delete(directory: str | Path, delete_set: set[str]) -> None:
|
||||
# TODO: wrap this in a proper persistence API
|
||||
def delete(flake: Flake, delete_set: set[str]) -> None:
|
||||
"""
|
||||
Delete keys from the inventory
|
||||
"""
|
||||
write_info = get_inventory_with_writeable_keys(directory)
|
||||
write_info = get_inventory_with_writeable_keys(flake)
|
||||
|
||||
data_disk = dict(write_info.data_disk)
|
||||
|
||||
for delete_path in delete_set:
|
||||
delete_by_path(data_disk, delete_path)
|
||||
|
||||
inventory_file = get_inventory_path(directory)
|
||||
inventory_file = get_inventory_path(flake)
|
||||
with inventory_file.open("w") as f:
|
||||
json.dump(data_disk, f, indent=2)
|
||||
|
||||
commit_file(
|
||||
inventory_file,
|
||||
Path(directory),
|
||||
flake.path,
|
||||
commit_message=f"Delete inventory keys {delete_set}",
|
||||
)
|
||||
|
||||
|
||||
def init_inventory(directory: str, init: Inventory | None = None) -> None:
|
||||
def init_inventory(flake: Flake, init: Inventory | None = None) -> None:
|
||||
inventory = None
|
||||
# Try reading the current flake
|
||||
if init is None:
|
||||
with contextlib.suppress(ClanCmdError):
|
||||
inventory = load_inventory_eval(directory)
|
||||
inventory = load_inventory_eval(flake)
|
||||
|
||||
if init is not None:
|
||||
inventory = init
|
||||
@@ -596,9 +599,9 @@ def init_inventory(directory: str, init: Inventory | None = None) -> None:
|
||||
# Write inventory.json file
|
||||
if inventory is not None:
|
||||
# Persist creates a commit message for each change
|
||||
set_inventory(inventory, directory, "Init inventory")
|
||||
set_inventory(inventory, flake, "Init inventory")
|
||||
|
||||
|
||||
@API.register
|
||||
def get_inventory(base_path: str | Path) -> Inventory:
|
||||
return load_inventory_eval(base_path)
|
||||
def get_inventory(flake: Flake) -> Inventory:
|
||||
return load_inventory_eval(flake)
|
||||
|
||||
@@ -110,7 +110,7 @@ def create_machine(opts: CreateOptions, commit: bool = True) -> None:
|
||||
new_machine["deploy"] = {"targetHost": target_host}
|
||||
|
||||
patch_inventory_with(
|
||||
clan_dir, f"machines.{machine_name}", dataclass_to_dict(new_machine)
|
||||
opts.clan_dir, f"machines.{machine_name}", dataclass_to_dict(new_machine)
|
||||
)
|
||||
|
||||
# Commit at the end in that order to avoid committing halve-baked machines
|
||||
|
||||
@@ -5,9 +5,10 @@ from pathlib import Path
|
||||
|
||||
from clan_lib.api import API
|
||||
|
||||
from clan_cli import Flake, inventory
|
||||
from clan_cli import inventory
|
||||
from clan_cli.completions import add_dynamic_completer, complete_machines
|
||||
from clan_cli.dirs import specific_machine_dir
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.secrets.folders import sops_secrets_folder
|
||||
from clan_cli.secrets.machines import has_machine as secrets_has_machine
|
||||
from clan_cli.secrets.machines import remove_machine as secrets_machine_remove
|
||||
@@ -15,49 +16,46 @@ from clan_cli.secrets.secrets import (
|
||||
list_secrets,
|
||||
)
|
||||
|
||||
from .machines import Machine
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@API.register
|
||||
def delete_machine(flake: Flake, name: str) -> None:
|
||||
def delete_machine(machine: Machine) -> None:
|
||||
try:
|
||||
inventory.delete(str(flake.path), {f"machines.{name}"})
|
||||
inventory.delete(machine.flake, {f"machines.{machine.name}"})
|
||||
except KeyError as exc:
|
||||
# louis@(2025-03-09): test infrastructure does not seem to set the
|
||||
# inventory properly, but more importantly only one machine in my
|
||||
# personal clan ended up in the inventory for some reason, so I think
|
||||
# it makes sense to eat the exception here.
|
||||
log.warning(
|
||||
f"{name} was missing or already deleted from the machines inventory: {exc}"
|
||||
f"{machine.name} was missing or already deleted from the machines inventory: {exc}"
|
||||
)
|
||||
|
||||
changed_paths: list[Path] = []
|
||||
|
||||
folder = specific_machine_dir(flake.path, name)
|
||||
folder = specific_machine_dir(machine)
|
||||
if folder.exists():
|
||||
changed_paths.append(folder)
|
||||
shutil.rmtree(folder)
|
||||
|
||||
# louis@(2025-02-04): clean-up legacy (pre-vars) secrets:
|
||||
sops_folder = sops_secrets_folder(flake.path)
|
||||
filter_fn = lambda secret_name: secret_name.startswith(f"{name}-")
|
||||
for secret_name in list_secrets(flake.path, filter_fn):
|
||||
sops_folder = sops_secrets_folder(machine.flake.path)
|
||||
filter_fn = lambda secret_name: secret_name.startswith(f"{machine.name}-")
|
||||
for secret_name in list_secrets(machine.flake.path, filter_fn):
|
||||
secret_path = sops_folder / secret_name
|
||||
changed_paths.append(secret_path)
|
||||
shutil.rmtree(secret_path)
|
||||
|
||||
machine = Machine(name, flake)
|
||||
changed_paths.extend(machine.public_vars_store.delete_store())
|
||||
changed_paths.extend(machine.secret_vars_store.delete_store())
|
||||
# Remove the machine's key, and update secrets & vars that referenced it:
|
||||
if secrets_has_machine(flake.path, name):
|
||||
secrets_machine_remove(flake.path, name)
|
||||
if secrets_has_machine(machine.flake.path, machine.name):
|
||||
secrets_machine_remove(machine.flake.path, machine.name)
|
||||
|
||||
|
||||
def delete_command(args: argparse.Namespace) -> None:
|
||||
delete_machine(args.flake, args.name)
|
||||
delete_machine(Machine(flake=args.flake, name=args.name))
|
||||
|
||||
|
||||
def register_delete_parser(parser: argparse.ArgumentParser) -> None:
|
||||
|
||||
@@ -7,14 +7,13 @@ from pathlib import Path
|
||||
|
||||
from clan_lib.api import API
|
||||
|
||||
from clan_cli.cmd import RunOpts, run, run_no_stdout
|
||||
from clan_cli.cmd import RunOpts, run
|
||||
from clan_cli.completions import add_dynamic_completer, complete_machines
|
||||
from clan_cli.dirs import specific_machine_dir
|
||||
from clan_cli.errors import ClanCmdError, ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.git import commit_file
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_config, nix_eval, nix_shell
|
||||
from clan_cli.nix import nix_config, nix_eval
|
||||
|
||||
from .types import machine_name_type
|
||||
|
||||
@@ -26,61 +25,35 @@ class HardwareConfig(Enum):
|
||||
NIXOS_GENERATE_CONFIG = "nixos-generate-config"
|
||||
NONE = "none"
|
||||
|
||||
def config_path(self, clan_dir: Path, machine_name: str) -> Path:
|
||||
machine_dir = specific_machine_dir(clan_dir, machine_name)
|
||||
def config_path(self, machine: Machine) -> Path:
|
||||
machine_dir = specific_machine_dir(machine)
|
||||
if self == HardwareConfig.NIXOS_FACTER:
|
||||
return machine_dir / "facter.json"
|
||||
return machine_dir / "hardware-configuration.nix"
|
||||
|
||||
@classmethod
|
||||
def detect_type(
|
||||
cls: type["HardwareConfig"], clan_dir: Path, machine_name: str
|
||||
) -> "HardwareConfig":
|
||||
hardware_config = HardwareConfig.NIXOS_GENERATE_CONFIG.config_path(
|
||||
clan_dir, machine_name
|
||||
)
|
||||
def detect_type(cls: type["HardwareConfig"], machine: Machine) -> "HardwareConfig":
|
||||
hardware_config = HardwareConfig.NIXOS_GENERATE_CONFIG.config_path(machine)
|
||||
|
||||
if hardware_config.exists() and "throw" not in hardware_config.read_text():
|
||||
return HardwareConfig.NIXOS_GENERATE_CONFIG
|
||||
|
||||
if HardwareConfig.NIXOS_FACTER.config_path(clan_dir, machine_name).exists():
|
||||
if HardwareConfig.NIXOS_FACTER.config_path(machine).exists():
|
||||
return HardwareConfig.NIXOS_FACTER
|
||||
|
||||
return HardwareConfig.NONE
|
||||
|
||||
|
||||
@API.register
|
||||
def show_machine_hardware_config(clan_dir: Path, machine_name: str) -> HardwareConfig:
|
||||
def show_machine_hardware_config(machine: Machine) -> HardwareConfig:
|
||||
"""
|
||||
Show hardware information for a machine returns None if none exist.
|
||||
"""
|
||||
return HardwareConfig.detect_type(clan_dir, machine_name)
|
||||
return HardwareConfig.detect_type(machine)
|
||||
|
||||
|
||||
@API.register
|
||||
def show_machine_deployment_target(clan_dir: Path, machine_name: str) -> str | None:
|
||||
"""
|
||||
Show deployment target for a machine returns None if none exist.
|
||||
"""
|
||||
config = nix_config()
|
||||
system = config["system"]
|
||||
cmd = nix_eval(
|
||||
[
|
||||
f"{clan_dir}#clanInternals.machines.{system}.{machine_name}",
|
||||
"--apply",
|
||||
"machine: { inherit (machine.config.clan.core.networking) targetHost; }",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
proc = run_no_stdout(cmd, RunOpts(prefix=machine_name))
|
||||
res = proc.stdout.strip()
|
||||
|
||||
target_host = json.loads(res)
|
||||
return target_host.get("targetHost", None)
|
||||
|
||||
|
||||
@API.register
|
||||
def show_machine_hardware_platform(clan_dir: Path, machine_name: str) -> str | None:
|
||||
def show_machine_hardware_platform(machine: Machine) -> str | None:
|
||||
"""
|
||||
Show hardware information for a machine returns None if none exist.
|
||||
"""
|
||||
@@ -88,13 +61,13 @@ def show_machine_hardware_platform(clan_dir: Path, machine_name: str) -> str | N
|
||||
system = config["system"]
|
||||
cmd = nix_eval(
|
||||
[
|
||||
f"{clan_dir}#clanInternals.machines.{system}.{machine_name}",
|
||||
f"{machine.flake}#clanInternals.machines.{system}.{machine.name}",
|
||||
"--apply",
|
||||
"machine: { inherit (machine.pkgs) system; }",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
proc = run_no_stdout(cmd, RunOpts(prefix=machine_name))
|
||||
proc = run(cmd, RunOpts(prefix=machine.name))
|
||||
res = proc.stdout.strip()
|
||||
|
||||
host_platform = json.loads(res)
|
||||
@@ -103,11 +76,8 @@ def show_machine_hardware_platform(clan_dir: Path, machine_name: str) -> str | N
|
||||
|
||||
@dataclass
|
||||
class HardwareGenerateOptions:
|
||||
flake: Flake
|
||||
machine: str
|
||||
machine: Machine
|
||||
backend: HardwareConfig
|
||||
target_host: str | None = None
|
||||
keyfile: str | None = None
|
||||
password: str | None = None
|
||||
|
||||
|
||||
@@ -118,11 +88,9 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
|
||||
and place the resulting *.nix file in the machine's directory.
|
||||
"""
|
||||
|
||||
machine = Machine(opts.machine, flake=opts.flake)
|
||||
if opts.target_host is not None:
|
||||
machine.override_target_host = opts.target_host
|
||||
machine = opts.machine
|
||||
|
||||
hw_file = opts.backend.config_path(opts.flake.path, opts.machine)
|
||||
hw_file = opts.backend.config_path(opts.machine)
|
||||
hw_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if opts.backend == HardwareConfig.NIXOS_FACTER:
|
||||
@@ -135,48 +103,26 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
|
||||
"--show-hardware-config",
|
||||
]
|
||||
|
||||
host = machine.target_host
|
||||
with machine.target_host() as host:
|
||||
host.ssh_options["StrictHostKeyChecking"] = "accept-new"
|
||||
host.ssh_options["UserKnownHostsFile"] = "/dev/null"
|
||||
if opts.password:
|
||||
host.password = opts.password
|
||||
|
||||
# HACK: to make non-root user work
|
||||
if host.user != "root":
|
||||
config_command.insert(0, "sudo")
|
||||
out = host.run(config_command, become_root=True, opts=RunOpts(check=False))
|
||||
if out.returncode != 0:
|
||||
if "nixos-facter" in out.stderr and "not found" in out.stderr:
|
||||
machine.error(str(out.stderr))
|
||||
msg = (
|
||||
"Please use our custom nixos install images from https://github.com/nix-community/nixos-images/releases/tag/nixos-unstable. "
|
||||
"nixos-factor only works on nixos / clan systems currently."
|
||||
)
|
||||
raise ClanError(msg)
|
||||
|
||||
deps = ["openssh"]
|
||||
if opts.password:
|
||||
deps += ["sshpass"]
|
||||
|
||||
cmd = nix_shell(
|
||||
deps,
|
||||
[
|
||||
*(["sshpass", "-p", opts.password] if opts.password else []),
|
||||
"ssh",
|
||||
*(["-i", f"{opts.keyfile}"] if opts.keyfile else []),
|
||||
# Disable known hosts file
|
||||
"-o",
|
||||
"UserKnownHostsFile=/dev/null",
|
||||
# Disable strict host key checking. The GUI user cannot type "yes" into the ssh terminal.
|
||||
"-o",
|
||||
"StrictHostKeyChecking=accept-new",
|
||||
*(
|
||||
["-p", str(machine.target_host.port)]
|
||||
if machine.target_host.port
|
||||
else []
|
||||
),
|
||||
host.target,
|
||||
*config_command,
|
||||
],
|
||||
)
|
||||
out = run(cmd, RunOpts(needs_user_terminal=True, prefix=machine.name, check=False))
|
||||
if out.returncode != 0:
|
||||
if "nixos-facter" in out.stderr and "not found" in out.stderr:
|
||||
machine.error(str(out.stderr))
|
||||
msg = "Please use our custom nixos install images. nixos-factor only works on nixos / clan systems currently."
|
||||
machine.error(str(out))
|
||||
msg = f"Failed to inspect {opts.machine}. Address: {host.target}"
|
||||
raise ClanError(msg)
|
||||
|
||||
machine.error(str(out))
|
||||
msg = f"Failed to inspect {opts.machine}. Address: {host.target}"
|
||||
raise ClanError(msg)
|
||||
|
||||
backup_file = None
|
||||
if hw_file.exists():
|
||||
backup_file = hw_file.with_suffix(".bak")
|
||||
@@ -189,11 +135,11 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
|
||||
|
||||
commit_file(
|
||||
hw_file,
|
||||
opts.flake.path,
|
||||
opts.machine.flake.path,
|
||||
f"machines/{opts.machine}/{hw_file.name}: update hardware configuration",
|
||||
)
|
||||
try:
|
||||
show_machine_hardware_platform(opts.flake.path, opts.machine)
|
||||
show_machine_hardware_platform(opts.machine)
|
||||
if backup_file:
|
||||
backup_file.unlink(missing_ok=True)
|
||||
except ClanCmdError as e:
|
||||
@@ -214,10 +160,13 @@ def generate_machine_hardware_info(opts: HardwareGenerateOptions) -> HardwareCon
|
||||
|
||||
|
||||
def update_hardware_config_command(args: argparse.Namespace) -> None:
|
||||
opts = HardwareGenerateOptions(
|
||||
machine = Machine(
|
||||
flake=args.flake,
|
||||
machine=args.machine,
|
||||
target_host=args.target_host,
|
||||
name=args.machine,
|
||||
override_target_host=args.target_host,
|
||||
)
|
||||
opts = HardwareGenerateOptions(
|
||||
machine=machine,
|
||||
password=args.password,
|
||||
backend=HardwareConfig(args.backend),
|
||||
)
|
||||
|
||||
@@ -36,7 +36,6 @@ class BuildOn(Enum):
|
||||
@dataclass
|
||||
class InstallOptions:
|
||||
machine: Machine
|
||||
target_host: str
|
||||
kexec: str | None = None
|
||||
debug: bool = False
|
||||
no_reboot: bool = False
|
||||
@@ -52,17 +51,16 @@ class InstallOptions:
|
||||
@API.register
|
||||
def install_machine(opts: InstallOptions) -> None:
|
||||
machine = opts.machine
|
||||
machine.override_target_host = opts.target_host
|
||||
|
||||
machine.info(f"installing {machine.name}")
|
||||
|
||||
h = machine.target_host
|
||||
machine.info(f"target host: {h.target}")
|
||||
machine.debug(f"installing {machine.name}")
|
||||
|
||||
generate_facts([machine])
|
||||
generate_vars([machine])
|
||||
|
||||
with TemporaryDirectory(prefix="nixos-install-") as _base_directory:
|
||||
with (
|
||||
TemporaryDirectory(prefix="nixos-install-") as _base_directory,
|
||||
machine.target_host() as host,
|
||||
):
|
||||
base_directory = Path(_base_directory).resolve()
|
||||
activation_secrets = base_directory / "activation_secrets"
|
||||
upload_dir = activation_secrets / machine.secrets_upload_directory.lstrip("/")
|
||||
@@ -113,11 +111,7 @@ def install_machine(opts: InstallOptions) -> None:
|
||||
[
|
||||
"--generate-hardware-config",
|
||||
str(opts.update_hardware_config.value),
|
||||
str(
|
||||
opts.update_hardware_config.config_path(
|
||||
machine.flake.path, machine.name
|
||||
)
|
||||
),
|
||||
str(opts.update_hardware_config.config_path(machine)),
|
||||
]
|
||||
)
|
||||
|
||||
@@ -134,14 +128,14 @@ def install_machine(opts: InstallOptions) -> None:
|
||||
if opts.build_on:
|
||||
cmd += ["--build-on", opts.build_on.value]
|
||||
|
||||
if h.port:
|
||||
cmd += ["--ssh-port", str(h.port)]
|
||||
if host.port:
|
||||
cmd += ["--ssh-port", str(host.port)]
|
||||
if opts.kexec:
|
||||
cmd += ["--kexec", opts.kexec]
|
||||
|
||||
if opts.debug:
|
||||
cmd.append("--debug")
|
||||
cmd.append(h.target)
|
||||
cmd.append(host.target)
|
||||
if opts.use_tor:
|
||||
# nix copy does not support tor socks proxy
|
||||
# cmd.append("--ssh-option")
|
||||
@@ -164,7 +158,32 @@ def install_machine(opts: InstallOptions) -> None:
|
||||
def install_command(args: argparse.Namespace) -> None:
|
||||
host_key_check = HostKeyCheck.from_str(args.host_key_check)
|
||||
try:
|
||||
machine = Machine(name=args.machine, flake=args.flake, nix_options=args.option)
|
||||
# Only if the caller did not specify a target_host via args.target_host
|
||||
# Find a suitable target_host that is reachable
|
||||
target_host = args.target_host
|
||||
deploy_info: DeployInfo | None = ssh_command_parse(args)
|
||||
|
||||
if deploy_info and not args.target_host:
|
||||
host = find_reachable_host(deploy_info, host_key_check)
|
||||
if host is None:
|
||||
use_tor = True
|
||||
target_host = f"root@{deploy_info.tor}"
|
||||
else:
|
||||
target_host = host.target
|
||||
|
||||
if args.password:
|
||||
password = args.password
|
||||
elif deploy_info and deploy_info.pwd:
|
||||
password = deploy_info.pwd
|
||||
else:
|
||||
password = None
|
||||
|
||||
machine = Machine(
|
||||
name=args.machine,
|
||||
flake=args.flake,
|
||||
nix_options=args.option,
|
||||
override_target_host=target_host,
|
||||
)
|
||||
use_tor = False
|
||||
|
||||
if machine._class_ == "darwin":
|
||||
@@ -175,41 +194,16 @@ def install_command(args: argparse.Namespace) -> None:
|
||||
msg = "Could not find clan flake toplevel directory"
|
||||
raise ClanError(msg)
|
||||
|
||||
deploy_info: DeployInfo | None = ssh_command_parse(args)
|
||||
|
||||
if args.target_host:
|
||||
target_host = args.target_host
|
||||
elif deploy_info:
|
||||
host = find_reachable_host(deploy_info, host_key_check)
|
||||
if host is None:
|
||||
use_tor = True
|
||||
target_host = f"root@{deploy_info.tor}"
|
||||
else:
|
||||
target_host = host.target
|
||||
password = deploy_info.pwd
|
||||
else:
|
||||
target_host = machine.target_host.target
|
||||
|
||||
if args.password:
|
||||
password = args.password
|
||||
elif deploy_info and deploy_info.pwd:
|
||||
password = deploy_info.pwd
|
||||
else:
|
||||
password = None
|
||||
|
||||
if not target_host:
|
||||
msg = "No target host provided, please provide a target host."
|
||||
raise ClanError(msg)
|
||||
|
||||
if not args.yes:
|
||||
ask = input(f"Install {args.machine} to {target_host}? [y/N] ")
|
||||
ask = input(
|
||||
f"Install {args.machine} to {machine.target_host_address}? [y/N] "
|
||||
)
|
||||
if ask != "y":
|
||||
return None
|
||||
|
||||
return install_machine(
|
||||
InstallOptions(
|
||||
machine=machine,
|
||||
target_host=target_host,
|
||||
kexec=args.kexec,
|
||||
phases=args.phases,
|
||||
debug=args.debug,
|
||||
|
||||
@@ -2,6 +2,7 @@ import argparse
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
@@ -11,38 +12,38 @@ from clan_lib.api.disk import MachineDiskMatter
|
||||
from clan_lib.api.modules import parse_frontmatter
|
||||
from clan_lib.api.serde import dataclass_to_dict
|
||||
|
||||
from clan_cli.cmd import RunOpts, run_no_stdout
|
||||
from clan_cli.cmd import RunOpts, run
|
||||
from clan_cli.completions import add_dynamic_completer, complete_tags
|
||||
from clan_cli.dirs import specific_machine_dir
|
||||
from clan_cli.errors import ClanCmdError, ClanError
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import (
|
||||
Machine,
|
||||
load_inventory_eval,
|
||||
patch_inventory_with,
|
||||
)
|
||||
from clan_cli.inventory.classes import Machine as InventoryMachine
|
||||
from clan_cli.machines.hardware import HardwareConfig
|
||||
from clan_cli.nix import nix_eval, nix_shell
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_eval
|
||||
from clan_cli.tags import list_nixos_machines_by_tags
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@API.register
|
||||
def set_machine(flake_url: Path, machine_name: str, machine: Machine) -> None:
|
||||
patch_inventory_with(
|
||||
flake_url, f"machines.{machine_name}", dataclass_to_dict(machine)
|
||||
)
|
||||
def set_machine(flake: Flake, machine_name: str, machine: InventoryMachine) -> None:
|
||||
patch_inventory_with(flake, f"machines.{machine_name}", dataclass_to_dict(machine))
|
||||
|
||||
|
||||
@API.register
|
||||
def list_inventory_machines(flake_url: str | Path) -> dict[str, Machine]:
|
||||
inventory = load_inventory_eval(flake_url)
|
||||
def list_machines(flake: Flake) -> dict[str, InventoryMachine]:
|
||||
inventory = load_inventory_eval(flake)
|
||||
return inventory.get("machines", {})
|
||||
|
||||
|
||||
@dataclass
|
||||
class MachineDetails:
|
||||
machine: Machine
|
||||
machine: InventoryMachine
|
||||
hw_config: HardwareConfig | None = None
|
||||
disk_schema: MachineDiskMatter | None = None
|
||||
|
||||
@@ -59,16 +60,16 @@ def extract_header(c: str) -> str:
|
||||
|
||||
|
||||
@API.register
|
||||
def get_inventory_machine_details(flake_url: Path, machine_name: str) -> MachineDetails:
|
||||
inventory = load_inventory_eval(flake_url)
|
||||
machine = inventory.get("machines", {}).get(machine_name)
|
||||
if machine is None:
|
||||
msg = f"Machine {machine_name} not found in inventory"
|
||||
def get_machine_details(machine: Machine) -> MachineDetails:
|
||||
inventory = load_inventory_eval(machine.flake)
|
||||
machine_inv = inventory.get("machines", {}).get(machine.name)
|
||||
if machine_inv is None:
|
||||
msg = f"Machine {machine.name} not found in inventory"
|
||||
raise ClanError(msg)
|
||||
|
||||
hw_config = HardwareConfig.detect_type(flake_url, machine_name)
|
||||
hw_config = HardwareConfig.detect_type(machine)
|
||||
|
||||
machine_dir = specific_machine_dir(flake_url, machine_name)
|
||||
machine_dir = specific_machine_dir(machine)
|
||||
disk_schema: MachineDiskMatter | None = None
|
||||
disk_path = machine_dir / "disko.nix"
|
||||
if disk_path.exists():
|
||||
@@ -79,7 +80,9 @@ def get_inventory_machine_details(flake_url: Path, machine_name: str) -> Machine
|
||||
if data:
|
||||
disk_schema = data # type: ignore
|
||||
|
||||
return MachineDetails(machine=machine, hw_config=hw_config, disk_schema=disk_schema)
|
||||
return MachineDetails(
|
||||
machine=machine_inv, hw_config=hw_config, disk_schema=disk_schema
|
||||
)
|
||||
|
||||
|
||||
def list_nixos_machines(flake_url: str | Path) -> list[str]:
|
||||
@@ -92,7 +95,7 @@ def list_nixos_machines(flake_url: str | Path) -> list[str]:
|
||||
]
|
||||
)
|
||||
|
||||
proc = run_no_stdout(cmd)
|
||||
proc = run(cmd)
|
||||
|
||||
try:
|
||||
res = proc.stdout.strip()
|
||||
@@ -106,53 +109,36 @@ def list_nixos_machines(flake_url: str | Path) -> list[str]:
|
||||
|
||||
@dataclass
|
||||
class ConnectionOptions:
|
||||
keyfile: str | None = None
|
||||
timeout: int = 2
|
||||
retries: int = 10
|
||||
|
||||
|
||||
from clan_cli.machines.machines import Machine
|
||||
|
||||
|
||||
@API.register
|
||||
def check_machine_online(
|
||||
flake_url: str | Path, machine_name: str, opts: ConnectionOptions | None
|
||||
machine: Machine, opts: ConnectionOptions | None = None
|
||||
) -> Literal["Online", "Offline"]:
|
||||
machine = load_inventory_eval(flake_url).get("machines", {}).get(machine_name)
|
||||
if not machine:
|
||||
msg = f"Machine {machine_name} not found in inventory"
|
||||
raise ClanError(msg)
|
||||
|
||||
hostname = machine.get("deploy", {}).get("targetHost")
|
||||
|
||||
hostname = machine.target_host_address
|
||||
if not hostname:
|
||||
msg = f"Machine {machine_name} does not specify a targetHost"
|
||||
msg = f"Machine {machine.name} does not specify a targetHost"
|
||||
raise ClanError(msg)
|
||||
|
||||
timeout = opts.timeout if opts and opts.timeout else 20
|
||||
timeout = opts.timeout if opts and opts.timeout else 2
|
||||
|
||||
cmd = nix_shell(
|
||||
["util-linux", *(["openssh"] if hostname else [])],
|
||||
[
|
||||
"ssh",
|
||||
*(["-i", f"{opts.keyfile}"] if opts and opts.keyfile else []),
|
||||
# Disable strict host key checking
|
||||
"-o",
|
||||
"StrictHostKeyChecking=accept-new",
|
||||
# Disable known hosts file
|
||||
"-o",
|
||||
"UserKnownHostsFile=/dev/null",
|
||||
"-o",
|
||||
f"ConnectTimeout={timeout}",
|
||||
f"{hostname}",
|
||||
"true",
|
||||
"&> /dev/null",
|
||||
],
|
||||
)
|
||||
try:
|
||||
proc = run_no_stdout(cmd, RunOpts(needs_user_terminal=True))
|
||||
if proc.returncode != 0:
|
||||
return "Offline"
|
||||
except ClanCmdError:
|
||||
return "Offline"
|
||||
else:
|
||||
return "Online"
|
||||
for _ in range(opts.retries if opts and opts.retries else 10):
|
||||
with machine.target_host() as target:
|
||||
res = target.run(
|
||||
["true"],
|
||||
RunOpts(timeout=timeout, check=False, needs_user_terminal=True),
|
||||
)
|
||||
|
||||
if res.returncode == 0:
|
||||
return "Online"
|
||||
time.sleep(timeout)
|
||||
|
||||
return "Offline"
|
||||
|
||||
|
||||
def list_command(args: argparse.Namespace) -> None:
|
||||
|
||||
@@ -2,12 +2,14 @@ import importlib
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import Iterator
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from functools import cached_property
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from clan_cli.cmd import Log, RunOpts, run_no_stdout
|
||||
from clan_cli.cmd import Log, RunOpts, run
|
||||
from clan_cli.errors import ClanCmdError, ClanError
|
||||
from clan_cli.facts import public_modules as facts_public_modules
|
||||
from clan_cli.facts import secret_modules as facts_secret_modules
|
||||
@@ -24,7 +26,7 @@ if TYPE_CHECKING:
|
||||
from clan_cli.vars.generate import Generator
|
||||
|
||||
|
||||
@dataclass
|
||||
@dataclass(frozen=True)
|
||||
class Machine:
|
||||
name: str
|
||||
flake: Flake
|
||||
@@ -145,34 +147,37 @@ class Machine:
|
||||
def flake_dir(self) -> Path:
|
||||
return self.flake.path
|
||||
|
||||
@property
|
||||
def target_host(self) -> Host:
|
||||
return parse_deployment_address(
|
||||
@contextmanager
|
||||
def target_host(self) -> Iterator[Host]:
|
||||
with parse_deployment_address(
|
||||
self.name,
|
||||
self.target_host_address,
|
||||
self.host_key_check,
|
||||
private_key=self.private_key,
|
||||
meta={"machine": self},
|
||||
)
|
||||
) as target_host:
|
||||
yield target_host
|
||||
|
||||
@property
|
||||
def build_host(self) -> Host:
|
||||
@contextmanager
|
||||
def build_host(self) -> Iterator[Host | None]:
|
||||
"""
|
||||
The host where the machine is built and deployed from.
|
||||
Can be the same as the target host.
|
||||
"""
|
||||
build_host = self.override_build_host or self.deployment.get("buildHost")
|
||||
if build_host is None:
|
||||
return self.target_host
|
||||
yield None
|
||||
return
|
||||
# enable ssh agent forwarding to allow the build host to access the target host
|
||||
return parse_deployment_address(
|
||||
with parse_deployment_address(
|
||||
self.name,
|
||||
build_host,
|
||||
self.host_key_check,
|
||||
forward_agent=True,
|
||||
private_key=self.private_key,
|
||||
meta={"machine": self, "target_host": self.target_host},
|
||||
)
|
||||
meta={"machine": self},
|
||||
) as build_host:
|
||||
yield build_host
|
||||
|
||||
@cached_property
|
||||
def deploy_as_root(self) -> bool:
|
||||
@@ -183,7 +188,7 @@ class Machine:
|
||||
# however there is a soon to be merged PR that requires deployment
|
||||
# as root to match NixOS: https://github.com/nix-darwin/nix-darwin/pull/1341
|
||||
return json.loads(
|
||||
run_no_stdout(
|
||||
run(
|
||||
nix_eval(
|
||||
[
|
||||
f"{self.flake}#darwinConfigurations.{self.name}.options.system",
|
||||
|
||||
@@ -5,11 +5,12 @@ import os
|
||||
import re
|
||||
import shlex
|
||||
import sys
|
||||
from contextlib import ExitStack
|
||||
|
||||
from clan_lib.api import API
|
||||
|
||||
from clan_cli.async_run import AsyncContext, AsyncOpts, AsyncRuntime, is_async_cancelled
|
||||
from clan_cli.cmd import MsgColor, RunOpts, run
|
||||
from clan_cli.cmd import Log, MsgColor, RunOpts, run
|
||||
from clan_cli.colors import AnsiColor
|
||||
from clan_cli.completions import (
|
||||
add_dynamic_completer,
|
||||
@@ -20,14 +21,13 @@ from clan_cli.facts.generate import generate_facts
|
||||
from clan_cli.facts.upload import upload_secrets
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.inventory import Machine as InventoryMachine
|
||||
from clan_cli.machines.list import list_machines
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.nix import nix_command, nix_config, nix_metadata
|
||||
from clan_cli.ssh.host import Host, HostKeyCheck
|
||||
from clan_cli.vars.generate import generate_vars
|
||||
from clan_cli.vars.upload import upload_secret_vars
|
||||
|
||||
from .inventory import get_all_machines, get_selected_machines
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -43,8 +43,7 @@ def is_local_input(node: dict[str, dict[str, str]]) -> bool:
|
||||
)
|
||||
|
||||
|
||||
def upload_sources(machine: Machine) -> str:
|
||||
host = machine.build_host
|
||||
def upload_sources(machine: Machine, host: Host) -> str:
|
||||
env = host.nix_ssh_env(os.environ.copy())
|
||||
|
||||
flake_url = (
|
||||
@@ -69,7 +68,12 @@ def upload_sources(machine: Machine) -> str:
|
||||
)
|
||||
run(
|
||||
cmd,
|
||||
RunOpts(env=env, error_msg="failed to upload sources", prefix=machine.name),
|
||||
RunOpts(
|
||||
env=env,
|
||||
needs_user_terminal=True,
|
||||
error_msg="failed to upload sources",
|
||||
prefix=machine.name,
|
||||
),
|
||||
)
|
||||
return path
|
||||
|
||||
@@ -84,7 +88,12 @@ def upload_sources(machine: Machine) -> str:
|
||||
flake_url,
|
||||
]
|
||||
)
|
||||
proc = run(cmd, RunOpts(env=env, error_msg="failed to upload sources"))
|
||||
proc = run(
|
||||
cmd,
|
||||
RunOpts(
|
||||
env=env, needs_user_terminal=True, error_msg="failed to upload sources"
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
return json.loads(proc.stdout)["path"]
|
||||
@@ -101,41 +110,41 @@ def update_machines(base_path: str, machines: list[InventoryMachine]) -> None:
|
||||
flake = Flake(base_path)
|
||||
for machine in machines:
|
||||
name = machine.get("name")
|
||||
# prefer target host set via inventory, but fallback to the one set in the machine
|
||||
target_host = machine.get("deploy", {}).get("targetHost")
|
||||
|
||||
if not name:
|
||||
msg = "Machine name is not set"
|
||||
raise ClanError(msg)
|
||||
m = Machine(
|
||||
name,
|
||||
flake=flake,
|
||||
override_target_host=target_host,
|
||||
)
|
||||
if not machine.get("deploy", {}).get("targetHost"):
|
||||
msg = f"'TargetHost' is not set for machine '{name}'"
|
||||
raise ClanError(msg)
|
||||
# Copy targetHost to machine
|
||||
m.override_target_host = machine.get("deploy", {}).get("targetHost")
|
||||
# Would be nice to have?
|
||||
# m.override_build_host = machine.deploy.buildHost
|
||||
group_machines.append(m)
|
||||
|
||||
deploy_machines(group_machines)
|
||||
|
||||
|
||||
def deploy_machines(machines: list[Machine]) -> None:
|
||||
"""
|
||||
Deploy to all hosts in parallel
|
||||
"""
|
||||
def deploy_machine(machine: Machine) -> None:
|
||||
with ExitStack() as stack:
|
||||
target_host = stack.enter_context(machine.target_host())
|
||||
build_host = stack.enter_context(machine.build_host())
|
||||
|
||||
if machine._class_ == "darwin":
|
||||
if not machine.deploy_as_root and target_host.user == "root":
|
||||
msg = f"'targetHost' should be set to a non-root user for deploying to nix-darwin on machine '{machine.name}'"
|
||||
raise ClanError(msg)
|
||||
|
||||
host = build_host or target_host
|
||||
|
||||
def deploy(machine: Machine) -> None:
|
||||
host = machine.build_host
|
||||
generate_facts([machine], service=None, regenerate=False)
|
||||
generate_vars([machine], generator_name=None, regenerate=False)
|
||||
|
||||
upload_secrets(machine)
|
||||
upload_secret_vars(machine)
|
||||
upload_secret_vars(machine, target_host)
|
||||
|
||||
path = upload_sources(
|
||||
machine=machine,
|
||||
)
|
||||
path = upload_sources(machine, host)
|
||||
|
||||
nix_options = [
|
||||
"--show-trace",
|
||||
@@ -160,8 +169,7 @@ def deploy_machines(machines: list[Machine]) -> None:
|
||||
"",
|
||||
]
|
||||
|
||||
target_host: Host | None = host.meta.get("target_host")
|
||||
if target_host:
|
||||
if build_host:
|
||||
become_root = False
|
||||
nix_options += ["--target-host", target_host.target]
|
||||
|
||||
@@ -171,19 +179,19 @@ def deploy_machines(machines: list[Machine]) -> None:
|
||||
switch_cmd = [f"{machine._class_}-rebuild", "switch", *nix_options]
|
||||
test_cmd = [f"{machine._class_}-rebuild", "test", *nix_options]
|
||||
|
||||
env = host.nix_ssh_env(None)
|
||||
remote_env = host.nix_ssh_env(None, local_ssh=False)
|
||||
ret = host.run(
|
||||
switch_cmd,
|
||||
RunOpts(check=False, msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
|
||||
extra_env=env,
|
||||
RunOpts(
|
||||
check=False,
|
||||
log=Log.BOTH,
|
||||
msg_color=MsgColor(stderr=AnsiColor.DEFAULT),
|
||||
needs_user_terminal=True,
|
||||
),
|
||||
extra_env=remote_env,
|
||||
become_root=become_root,
|
||||
)
|
||||
|
||||
# Last output line (config store path) is printed to stdout instead of stderr
|
||||
lines = ret.stdout.splitlines()
|
||||
if lines:
|
||||
print(lines[-1])
|
||||
|
||||
if is_async_cancelled():
|
||||
return
|
||||
|
||||
@@ -197,24 +205,28 @@ def deploy_machines(machines: list[Machine]) -> None:
|
||||
)
|
||||
ret = host.run(
|
||||
test_cmd if is_mobile else switch_cmd,
|
||||
RunOpts(msg_color=MsgColor(stderr=AnsiColor.DEFAULT)),
|
||||
extra_env=env,
|
||||
RunOpts(
|
||||
log=Log.BOTH,
|
||||
msg_color=MsgColor(stderr=AnsiColor.DEFAULT),
|
||||
needs_user_terminal=True,
|
||||
),
|
||||
extra_env=remote_env,
|
||||
become_root=become_root,
|
||||
)
|
||||
|
||||
|
||||
def deploy_machines(machines: list[Machine]) -> None:
|
||||
"""
|
||||
Deploy to all hosts in parallel
|
||||
"""
|
||||
|
||||
with AsyncRuntime() as runtime:
|
||||
for machine in machines:
|
||||
if machine._class_ == "darwin":
|
||||
if not machine.deploy_as_root and machine.target_host.user == "root":
|
||||
msg = f"'TargetHost' should be set to a non-root user for deploying to nix-darwin on machine '{machine.name}'"
|
||||
raise ClanError(msg)
|
||||
|
||||
machine.info(f"Updating {machine.name}")
|
||||
runtime.async_run(
|
||||
AsyncOpts(
|
||||
tid=machine.name, async_ctx=AsyncContext(prefix=machine.name)
|
||||
),
|
||||
deploy,
|
||||
deploy_machine,
|
||||
machine,
|
||||
)
|
||||
runtime.join_all()
|
||||
@@ -226,61 +238,73 @@ def update_command(args: argparse.Namespace) -> None:
|
||||
if args.flake is None:
|
||||
msg = "Could not find clan flake toplevel directory"
|
||||
raise ClanError(msg)
|
||||
machines = []
|
||||
if len(args.machines) == 1 and args.target_host is not None:
|
||||
machine = Machine(
|
||||
name=args.machines[0], flake=args.flake, nix_options=args.option
|
||||
)
|
||||
machine.override_target_host = args.target_host
|
||||
machine.override_build_host = args.build_host
|
||||
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
|
||||
machines.append(machine)
|
||||
|
||||
elif args.target_host is not None:
|
||||
print("target host can only be specified for a single machine")
|
||||
exit(1)
|
||||
else:
|
||||
if len(args.machines) == 0:
|
||||
ignored_machines = []
|
||||
for machine in get_all_machines(args.flake, args.option):
|
||||
if machine.deployment.get("requireExplicitUpdate", False):
|
||||
continue
|
||||
try:
|
||||
machine.build_host # noqa: B018
|
||||
except ClanError: # check if we have a build host set
|
||||
ignored_machines.append(machine)
|
||||
continue
|
||||
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
|
||||
machine.override_build_host = args.build_host
|
||||
machines.append(machine)
|
||||
|
||||
if not machines and ignored_machines != []:
|
||||
print(
|
||||
"WARNING: No machines to update."
|
||||
"The following defined machines were ignored because they"
|
||||
"do not have the `clan.core.networking.targetHost` nixos option set:",
|
||||
file=sys.stderr,
|
||||
)
|
||||
for machine in ignored_machines:
|
||||
print(machine, file=sys.stderr)
|
||||
|
||||
else:
|
||||
machines = get_selected_machines(args.flake, args.option, args.machines)
|
||||
for machine in machines:
|
||||
machine.override_build_host = args.build_host
|
||||
machine.host_key_check = HostKeyCheck.from_str(args.host_key_check)
|
||||
|
||||
config = nix_config()
|
||||
system = config["system"]
|
||||
machine_names = [machine.name for machine in machines]
|
||||
args.flake.precache(
|
||||
[
|
||||
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.clan.core.vars.generators.*.validationHash",
|
||||
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.system.clan.deployment.file",
|
||||
]
|
||||
machines: list[Machine] = []
|
||||
# if no machines are passed, we will update all machines
|
||||
selected_machines = (
|
||||
args.machines if args.machines else list_machines(args.flake).keys()
|
||||
)
|
||||
|
||||
deploy_machines(machines)
|
||||
if args.target_host is not None and len(args.machines) > 1:
|
||||
msg = "Target Host can only be set for one machines"
|
||||
raise ClanError(msg)
|
||||
|
||||
for machine_name in selected_machines:
|
||||
machine = Machine(
|
||||
name=machine_name,
|
||||
flake=args.flake,
|
||||
nix_options=args.option,
|
||||
override_target_host=args.target_host,
|
||||
override_build_host=args.build_host,
|
||||
host_key_check=HostKeyCheck.from_str(args.host_key_check),
|
||||
)
|
||||
machines.append(machine)
|
||||
|
||||
def filter_machine(m: Machine) -> bool:
|
||||
if m.deployment.get("requireExplicitUpdate", False):
|
||||
return False
|
||||
|
||||
try:
|
||||
# check if the machine has a target host set
|
||||
m.target_host # noqa: B018
|
||||
except ClanError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
machines_to_update = machines
|
||||
implicit_all: bool = len(args.machines) == 0
|
||||
if implicit_all:
|
||||
machines_to_update = list(filter(filter_machine, machines))
|
||||
|
||||
# machines that are in the list but not included in the update list
|
||||
ignored_machines = {m.name for m in machines if m not in machines_to_update}
|
||||
|
||||
if not machines_to_update and ignored_machines:
|
||||
print(
|
||||
"WARNING: No machines to update.\n"
|
||||
"The following defined machines were ignored because they\n"
|
||||
"- Require explicit update (see 'requireExplicitUpdate')\n",
|
||||
"- Might not have the `clan.core.networking.targetHost` nixos option set:\n",
|
||||
file=sys.stderr,
|
||||
)
|
||||
for m in ignored_machines:
|
||||
print(m, file=sys.stderr)
|
||||
|
||||
if machines_to_update:
|
||||
# Prepopulate the cache
|
||||
config = nix_config()
|
||||
system = config["system"]
|
||||
machine_names = [machine.name for machine in machines_to_update]
|
||||
args.flake.precache(
|
||||
[
|
||||
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.clan.core.vars.generators.*.validationHash",
|
||||
f"clanInternals.machines.{system}.{{{','.join(machine_names)}}}.config.system.clan.deployment.file",
|
||||
]
|
||||
)
|
||||
# Run the deplyoyment
|
||||
deploy_machines(machines_to_update)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
log.warning("Interrupted by user")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from functools import cache
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from clan_cli.cmd import run, run_no_stdout
|
||||
from clan_cli.cmd import run
|
||||
from clan_cli.dirs import nixpkgs_flake, nixpkgs_source
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.locked_open import locked_open
|
||||
@@ -55,7 +56,7 @@ def nix_add_to_gcroots(nix_path: Path, dest: Path) -> None:
|
||||
@cache
|
||||
def nix_config() -> dict[str, Any]:
|
||||
cmd = nix_command(["config", "show", "--json"])
|
||||
proc = run_no_stdout(cmd)
|
||||
proc = run(cmd)
|
||||
data = json.loads(proc.stdout)
|
||||
config = {}
|
||||
for key, value in data.items():
|
||||
@@ -131,7 +132,16 @@ class Packages:
|
||||
cls.static_packages = set(
|
||||
os.environ.get("CLAN_PROVIDED_PACKAGES", "").split(":")
|
||||
)
|
||||
return program in cls.static_packages
|
||||
|
||||
if program in cls.static_packages:
|
||||
if shutil.which(program) is None:
|
||||
log.warning(
|
||||
"Program %s is not in the path even though it should be shipped with clan",
|
||||
program,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
# Features:
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"age-plugin-sss",
|
||||
"age-plugin-tpm",
|
||||
"age-plugin-yubikey",
|
||||
"age-plugin-1p",
|
||||
"avahi",
|
||||
"bash",
|
||||
"bubblewrap",
|
||||
|
||||
@@ -18,7 +18,8 @@ from clan_lib.api import API
|
||||
from clan_cli.cmd import Log, RunOpts, run
|
||||
from clan_cli.dirs import user_config_dir
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.nix import nix_eval, nix_shell
|
||||
from clan_cli.flake import Flake
|
||||
from clan_cli.nix import nix_shell
|
||||
|
||||
from .folders import sops_users_folder
|
||||
|
||||
@@ -196,26 +197,11 @@ def load_age_plugins(flake_dir: str | Path) -> list[str]:
|
||||
msg = "Missing flake directory"
|
||||
raise ClanError(msg)
|
||||
|
||||
cmd = nix_eval(
|
||||
[
|
||||
f"{flake_dir}#clanInternals.secrets.age.plugins",
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
|
||||
try:
|
||||
result = run(cmd)
|
||||
except Exception as e:
|
||||
msg = f"Failed to load age plugins {flake_dir}"
|
||||
raise ClanError(msg) from e
|
||||
|
||||
json_str = result.stdout.strip()
|
||||
|
||||
try:
|
||||
plugins = json.loads(json_str)
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"Failed to decode '{json_str}': {e}"
|
||||
raise ClanError(msg) from e
|
||||
flake = Flake(str(flake_dir))
|
||||
result = flake.select("clanInternals.?secrets.?age.?plugins")
|
||||
plugins = result["secrets"]["age"]["plugins"]
|
||||
if plugins == {}:
|
||||
plugins = []
|
||||
|
||||
if isinstance(plugins, list):
|
||||
return plugins
|
||||
|
||||
@@ -87,7 +87,7 @@ def ssh_shell_from_deploy(
|
||||
deploy_info: DeployInfo, runtime: AsyncRuntime, host_key_check: HostKeyCheck
|
||||
) -> None:
|
||||
if host := find_reachable_host(deploy_info, host_key_check):
|
||||
host.connect_ssh_shell(password=deploy_info.pwd)
|
||||
host.interactive_ssh()
|
||||
else:
|
||||
log.info("Could not reach host via clearnet 'addrs'")
|
||||
log.info(f"Trying to reach host via tor '{deploy_info.tor}'")
|
||||
@@ -96,8 +96,7 @@ def ssh_shell_from_deploy(
|
||||
msg = "No tor address provided, please provide a tor address."
|
||||
raise ClanError(msg)
|
||||
if ssh_tor_reachable(TorTarget(onion=deploy_info.tor, port=22)):
|
||||
host = Host(host=deploy_info.tor)
|
||||
host.connect_ssh_shell(password=deploy_info.pwd, tor_socks=True)
|
||||
host = Host(host=deploy_info.tor, password=deploy_info.pwd, tor_socks=True)
|
||||
else:
|
||||
msg = "Could not reach host via tor either."
|
||||
raise ClanError(msg)
|
||||
|
||||
@@ -5,9 +5,11 @@ import os
|
||||
import shlex
|
||||
import socket
|
||||
import subprocess
|
||||
import types
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from shlex import quote
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any
|
||||
|
||||
from clan_cli.cmd import CmdOut, RunOpts, run
|
||||
@@ -29,12 +31,32 @@ class Host:
|
||||
user: str | None = None
|
||||
port: int | None = None
|
||||
private_key: Path | None = None
|
||||
password: str | None = None
|
||||
forward_agent: bool = False
|
||||
command_prefix: str | None = None
|
||||
host_key_check: HostKeyCheck = HostKeyCheck.ASK
|
||||
meta: dict[str, Any] = field(default_factory=dict)
|
||||
verbose_ssh: bool = False
|
||||
ssh_options: dict[str, str] = field(default_factory=dict)
|
||||
tor_socks: bool = False
|
||||
|
||||
_temp_dir: TemporaryDirectory | None = None
|
||||
|
||||
def __enter__(self) -> "Host":
|
||||
self._temp_dir = TemporaryDirectory(prefix="clan-ssh-")
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: types.TracebackType | None,
|
||||
) -> None:
|
||||
try:
|
||||
if self._temp_dir:
|
||||
self._temp_dir.cleanup()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if not self.command_prefix:
|
||||
@@ -106,6 +128,9 @@ class Host:
|
||||
if extra_env is None:
|
||||
extra_env = {}
|
||||
|
||||
if opts is None:
|
||||
opts = RunOpts()
|
||||
|
||||
# If we are not root and we need to become root, prepend sudo
|
||||
sudo = ""
|
||||
if become_root and self.user != "root":
|
||||
@@ -116,11 +141,10 @@ class Host:
|
||||
for k, v in extra_env.items():
|
||||
env_vars.append(f"{shlex.quote(k)}={shlex.quote(v)}")
|
||||
|
||||
if opts is None:
|
||||
opts = RunOpts()
|
||||
else:
|
||||
opts.needs_user_terminal = True
|
||||
if opts.prefix is None:
|
||||
opts.prefix = self.command_prefix
|
||||
# always set needs_user_terminal to True because ssh asks for passwords
|
||||
opts.needs_user_terminal = True
|
||||
|
||||
if opts.cwd is not None:
|
||||
msg = "cwd is not supported for remote commands"
|
||||
@@ -158,15 +182,17 @@ class Host:
|
||||
# Run the ssh command
|
||||
return run(ssh_cmd, opts)
|
||||
|
||||
def nix_ssh_env(self, env: dict[str, str] | None) -> dict[str, str]:
|
||||
def nix_ssh_env(
|
||||
self, env: dict[str, str] | None, local_ssh: bool = True
|
||||
) -> dict[str, str]:
|
||||
if env is None:
|
||||
env = {}
|
||||
env["NIX_SSHOPTS"] = " ".join(self.ssh_cmd_opts)
|
||||
env["NIX_SSHOPTS"] = " ".join(self.ssh_cmd_opts(local_ssh=local_ssh))
|
||||
return env
|
||||
|
||||
@property
|
||||
def ssh_cmd_opts(
|
||||
self,
|
||||
local_ssh: bool = True,
|
||||
) -> list[str]:
|
||||
ssh_opts = ["-A"] if self.forward_agent else []
|
||||
if self.port:
|
||||
@@ -180,32 +206,40 @@ class Host:
|
||||
if self.private_key:
|
||||
ssh_opts.extend(["-i", str(self.private_key)])
|
||||
|
||||
if local_ssh and self._temp_dir:
|
||||
ssh_opts.extend(["-o", "ControlPersist=30m"])
|
||||
ssh_opts.extend(
|
||||
[
|
||||
"-o",
|
||||
f"ControlPath={Path(self._temp_dir.name) / 'clan-%h-%p-%r'}",
|
||||
]
|
||||
)
|
||||
ssh_opts.extend(["-o", "ControlMaster=auto"])
|
||||
|
||||
return ssh_opts
|
||||
|
||||
def ssh_cmd(
|
||||
self,
|
||||
verbose_ssh: bool = False,
|
||||
tor_socks: bool = False,
|
||||
tty: bool = False,
|
||||
password: str | None = None,
|
||||
) -> list[str]:
|
||||
packages = []
|
||||
password_args = []
|
||||
if password:
|
||||
if self.password:
|
||||
packages.append("sshpass")
|
||||
password_args = [
|
||||
"sshpass",
|
||||
"-p",
|
||||
password,
|
||||
self.password,
|
||||
]
|
||||
|
||||
ssh_opts = self.ssh_cmd_opts
|
||||
ssh_opts = self.ssh_cmd_opts()
|
||||
if verbose_ssh or self.verbose_ssh:
|
||||
ssh_opts.extend(["-v"])
|
||||
if tty:
|
||||
ssh_opts.extend(["-t"])
|
||||
|
||||
if tor_socks:
|
||||
if self.tor_socks:
|
||||
packages.append("netcat")
|
||||
ssh_opts.append("-o")
|
||||
ssh_opts.append("ProxyCommand=nc -x 127.0.0.1:9050 -X 5 %h %p")
|
||||
@@ -219,12 +253,8 @@ class Host:
|
||||
|
||||
return nix_shell(packages, cmd)
|
||||
|
||||
def connect_ssh_shell(
|
||||
self, *, password: str | None = None, tor_socks: bool = False
|
||||
) -> None:
|
||||
cmd = self.ssh_cmd(tor_socks=tor_socks, password=password)
|
||||
|
||||
subprocess.run(cmd)
|
||||
def interactive_ssh(self) -> None:
|
||||
subprocess.run(self.ssh_cmd())
|
||||
|
||||
|
||||
def is_ssh_reachable(host: Host) -> bool:
|
||||
|
||||
@@ -63,7 +63,8 @@ def upload(
|
||||
for mdir in dirs:
|
||||
dir_path = Path(root) / mdir
|
||||
tarinfo = tar.gettarinfo(
|
||||
dir_path, arcname=str(dir_path.relative_to(str(local_src)))
|
||||
dir_path,
|
||||
arcname=str(dir_path.relative_to(str(local_src))),
|
||||
)
|
||||
tarinfo.mode = dir_mode
|
||||
tarinfo.uname = file_user
|
||||
|
||||
@@ -3,7 +3,7 @@ import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from clan_cli.cmd import RunOpts, run_no_stdout
|
||||
from clan_cli.cmd import RunOpts, run
|
||||
from clan_cli.completions import (
|
||||
add_dynamic_completer,
|
||||
complete_machines,
|
||||
@@ -32,7 +32,7 @@ def list_state_folders(machine: Machine, service: None | str = None) -> None:
|
||||
res = "{}"
|
||||
|
||||
try:
|
||||
proc = run_no_stdout(cmd, opts=RunOpts(prefix=machine.name))
|
||||
proc = run(cmd, RunOpts(prefix=machine.name))
|
||||
res = proc.stdout.strip()
|
||||
except ClanCmdError as e:
|
||||
msg = "Clan might not have meta attributes"
|
||||
|
||||
@@ -2,7 +2,7 @@ import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from clan_cli.cmd import run_no_stdout
|
||||
from clan_cli.cmd import run
|
||||
from clan_cli.errors import ClanError
|
||||
from clan_cli.nix import nix_eval
|
||||
|
||||
@@ -18,7 +18,7 @@ def list_tagged_machines(flake_url: str | Path) -> dict[str, Any]:
|
||||
"--json",
|
||||
]
|
||||
)
|
||||
proc = run_no_stdout(cmd)
|
||||
proc = run(cmd)
|
||||
|
||||
try:
|
||||
res = proc.stdout.strip()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user