Compare commits
78 Commits
terst
...
vm-mamange
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5859eeac5a | ||
|
|
62ef90e959 | ||
|
|
7fdbd2e3eb | ||
|
|
7daebd5ee0 | ||
|
|
cc8dd0564b | ||
|
|
23e52954c9 | ||
|
|
4717d1f149 | ||
|
|
e28f280036 | ||
|
|
6fa2a977df | ||
|
|
65dba2508f | ||
|
|
9884643070 | ||
|
|
5083992f7b | ||
|
|
6bd8839128 | ||
|
|
765bdb262a | ||
|
|
05c00fbe82 | ||
|
|
7e97734797 | ||
|
|
6384c4654e | ||
|
|
72d3ad09a4 | ||
|
|
a535450ec0 | ||
|
|
aaeb616f82 | ||
|
|
434edeaae1 | ||
|
|
a4efd3cb16 | ||
|
|
13131ccd6e | ||
|
|
3a8309b01f | ||
|
|
10065a7c8f | ||
|
|
176b54e29d | ||
|
|
be048d8307 | ||
|
|
52fcab30e7 | ||
|
|
d3b423328f | ||
|
|
1177e84dcc | ||
|
|
414952dfa3 | ||
|
|
24194011ac | ||
|
|
4f78a8ff94 | ||
|
|
068b5d4c1e | ||
|
|
adccef4757 | ||
|
|
980d94d47d | ||
|
|
a50b25eea2 | ||
|
|
017989841d | ||
|
|
c14a5fcc69 | ||
|
|
4f60345ba7 | ||
|
|
ece48d3b5f | ||
|
|
4eea8d24f0 | ||
|
|
49099df3fb | ||
|
|
62ccba9fb5 | ||
|
|
0b44770f1f | ||
|
|
61c3d7284a | ||
|
|
44b1be5ed4 | ||
|
|
88871bea69 | ||
|
|
5141ea047c | ||
|
|
ff6a03a646 | ||
|
|
bc379c985d | ||
|
|
69d8b029d6 | ||
|
|
f3617b0407 | ||
|
|
a5205681cc | ||
|
|
9880847d43 | ||
|
|
8aa88b22ab | ||
|
|
ff979eba61 | ||
|
|
5d1abbd303 | ||
|
|
92e9bb2ed8 | ||
|
|
ea75c9bfa9 | ||
|
|
2adf65482d | ||
|
|
5684ddf104 | ||
|
|
f74e444120 | ||
|
|
0ef57bfc8e | ||
|
|
8f43af3c48 | ||
|
|
eeaec583cb | ||
|
|
a9d1ff83f2 | ||
|
|
89cb22147c | ||
|
|
1006fc755e | ||
|
|
f100177df3 | ||
|
|
cbd3b08296 | ||
|
|
2608bee30a | ||
|
|
a29459a384 | ||
|
|
1abdd45821 | ||
|
|
b058fcc8eb | ||
|
|
24ae95a007 | ||
|
|
39510b613f | ||
|
|
dcdab61d13 |
@@ -19,8 +19,7 @@ jobs:
|
|||||||
uses: Mic92/update-flake-inputs-gitea@main
|
uses: Mic92/update-flake-inputs-gitea@main
|
||||||
with:
|
with:
|
||||||
# Exclude private flakes and update-clan-core checks flake
|
# Exclude private flakes and update-clan-core checks flake
|
||||||
|
exclude-patterns: "checks/impure/flake.nix"
|
||||||
exclude-patterns: "devFlake/private/flake.nix,checks/impure/flake.nix"
|
|
||||||
auto-merge: true
|
auto-merge: true
|
||||||
gitea-token: ${{ secrets.CI_BOT_TOKEN }}
|
gitea-token: ${{ secrets.CI_BOT_TOKEN }}
|
||||||
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}
|
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
name: "Update private flake inputs"
|
|
||||||
on:
|
|
||||||
repository_dispatch:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *" # Run daily at 3 AM
|
|
||||||
jobs:
|
|
||||||
update-private-flake:
|
|
||||||
runs-on: nix
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
- name: Update private flake inputs
|
|
||||||
run: |
|
|
||||||
# Update the private flake lock file
|
|
||||||
cd devFlake/private
|
|
||||||
nix flake update
|
|
||||||
cd ../..
|
|
||||||
|
|
||||||
# Update the narHash
|
|
||||||
bash ./devFlake/update-private-narhash
|
|
||||||
- name: Create pull request
|
|
||||||
env:
|
|
||||||
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
|
||||||
run: |
|
|
||||||
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
|
|
||||||
|
|
||||||
# Check if there are any changes
|
|
||||||
if ! git diff --quiet; then
|
|
||||||
git add devFlake/private/flake.lock devFlake/private.narHash
|
|
||||||
git commit -m "Update dev flake"
|
|
||||||
|
|
||||||
# Use shared PR creation script
|
|
||||||
export PR_BRANCH="update-dev-flake"
|
|
||||||
export PR_TITLE="Update dev flake"
|
|
||||||
export PR_BODY="This PR updates the dev flake inputs and corresponding narHash."
|
|
||||||
else
|
|
||||||
echo "No changes detected in dev flake inputs"
|
|
||||||
fi
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
nixosModules/clanCore/vars/.* @lopter
|
|
||||||
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @lopter
|
|
||||||
|
|||||||
@@ -104,6 +104,7 @@ in
|
|||||||
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
||||||
|
|
||||||
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
||||||
|
wireguard = import ./wireguard nixosTestArgs;
|
||||||
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
config,
|
config,
|
||||||
self,
|
self,
|
||||||
lib,
|
lib,
|
||||||
privateInputs,
|
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
@@ -85,7 +84,7 @@
|
|||||||
|
|
||||||
# Some distros like to automount disks with spaces
|
# Some distros like to automount disks with spaces
|
||||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
|
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
|
||||||
machine.succeed("clan flash write --debug --flake ${privateInputs.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
||||||
'';
|
'';
|
||||||
} { inherit pkgs self; };
|
} { inherit pkgs self; };
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -208,7 +208,7 @@
|
|||||||
# Prepare test flake and Nix store
|
# Prepare test flake and Nix store
|
||||||
flake_dir = prepare_test_flake(
|
flake_dir = prepare_test_flake(
|
||||||
temp_dir,
|
temp_dir,
|
||||||
"${privateInputs.clan-core-for-checks}",
|
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||||
"${closureInfo}"
|
"${closureInfo}"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -272,7 +272,7 @@
|
|||||||
# Prepare test flake and Nix store
|
# Prepare test flake and Nix store
|
||||||
flake_dir = prepare_test_flake(
|
flake_dir = prepare_test_flake(
|
||||||
temp_dir,
|
temp_dir,
|
||||||
"${privateInputs.clan-core-for-checks}",
|
"${self.checks.x86_64-linux.clan-core-for-checks}",
|
||||||
"${closureInfo}"
|
"${closureInfo}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
{
|
{
|
||||||
self,
|
self,
|
||||||
privateInputs,
|
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
@@ -55,7 +54,7 @@
|
|||||||
testScript = ''
|
testScript = ''
|
||||||
start_all()
|
start_all()
|
||||||
actual.fail("cat /etc/testfile")
|
actual.fail("cat /etc/testfile")
|
||||||
actual.succeed("env CLAN_DIR=${privateInputs.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
|
actual.succeed("env CLAN_DIR=${self.checks.x86_64-linux.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
|
||||||
assert actual.succeed("cat /etc/testfile") == "morphed"
|
assert actual.succeed("cat /etc/testfile") == "morphed"
|
||||||
'';
|
'';
|
||||||
} { inherit pkgs self; };
|
} { inherit pkgs self; };
|
||||||
|
|||||||
@@ -174,7 +174,7 @@
|
|||||||
|
|
||||||
|
|
||||||
##############
|
##############
|
||||||
print("TEST: update with --build-host localhost --target-host localhost")
|
print("TEST: update with --build-host local")
|
||||||
with open(machine_config_path, "w") as f:
|
with open(machine_config_path, "w") as f:
|
||||||
f.write("""
|
f.write("""
|
||||||
{
|
{
|
||||||
@@ -197,6 +197,15 @@
|
|||||||
check=True
|
check=True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# allow machine to ssh into itself
|
||||||
|
subprocess.run([
|
||||||
|
"ssh",
|
||||||
|
"-o", "UserKnownHostsFile=/dev/null",
|
||||||
|
"-o", "StrictHostKeyChecking=no",
|
||||||
|
f"root@192.168.1.1",
|
||||||
|
"mkdir -p /root/.ssh && chmod 700 /root/.ssh && echo \"$(cat \"${../assets/ssh/privkey}\")\" > /root/.ssh/id_ed25519 && chmod 600 /root/.ssh/id_ed25519",
|
||||||
|
], check=True)
|
||||||
|
|
||||||
# install the clan-cli package into the container's Nix store
|
# install the clan-cli package into the container's Nix store
|
||||||
subprocess.run(
|
subprocess.run(
|
||||||
[
|
[
|
||||||
@@ -216,7 +225,7 @@
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run ssh on the host to run the clan update command via --build-host localhost
|
# Run ssh on the host to run the clan update command via --build-host local
|
||||||
subprocess.run([
|
subprocess.run([
|
||||||
"ssh",
|
"ssh",
|
||||||
"-o", "UserKnownHostsFile=/dev/null",
|
"-o", "UserKnownHostsFile=/dev/null",
|
||||||
@@ -230,8 +239,8 @@
|
|||||||
"--host-key-check", "none",
|
"--host-key-check", "none",
|
||||||
"--upload-inputs", # Use local store instead of fetching from network
|
"--upload-inputs", # Use local store instead of fetching from network
|
||||||
"--build-host", "localhost",
|
"--build-host", "localhost",
|
||||||
"--target-host", "localhost",
|
|
||||||
"test-update-machine",
|
"test-update-machine",
|
||||||
|
"--target-host", f"root@localhost",
|
||||||
], check=True)
|
], check=True)
|
||||||
|
|
||||||
# Verify the update was successful
|
# Verify the update was successful
|
||||||
|
|||||||
115
checks/wireguard/default.nix
Normal file
115
checks/wireguard/default.nix
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
{
|
||||||
|
pkgs,
|
||||||
|
nixosLib,
|
||||||
|
clan-core,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
nixosLib.runTest (
|
||||||
|
{ ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
machines = [
|
||||||
|
"controller1"
|
||||||
|
"controller2"
|
||||||
|
"peer1"
|
||||||
|
"peer2"
|
||||||
|
"peer3"
|
||||||
|
];
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
clan-core.modules.nixosTest.clanTest
|
||||||
|
];
|
||||||
|
|
||||||
|
hostPkgs = pkgs;
|
||||||
|
|
||||||
|
name = "wireguard";
|
||||||
|
|
||||||
|
clan = {
|
||||||
|
directory = ./.;
|
||||||
|
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
|
||||||
|
inventory = {
|
||||||
|
|
||||||
|
machines = lib.genAttrs machines (_: { });
|
||||||
|
|
||||||
|
instances = {
|
||||||
|
|
||||||
|
/*
|
||||||
|
wg-test-one
|
||||||
|
┌───────────────────────────────┐
|
||||||
|
│ ◄───────────── │
|
||||||
|
│ controller2 controller1
|
||||||
|
│ ▲ ─────────────► ▲ ▲
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ └───────────────┐ │ │ │ │
|
||||||
|
│ │ │ └──────────────┐ │ │ │ │ │
|
||||||
|
│ ▼ │ ▼ ▼ ▼
|
||||||
|
└─► peer2 │ peer1 peer3
|
||||||
|
│ ▲
|
||||||
|
└──────────┘
|
||||||
|
*/
|
||||||
|
|
||||||
|
wg-test-one = {
|
||||||
|
|
||||||
|
module.name = "@clan/wireguard";
|
||||||
|
module.input = "self";
|
||||||
|
|
||||||
|
roles.controller.machines."controller1".settings = {
|
||||||
|
endpoint = "192.168.1.1";
|
||||||
|
};
|
||||||
|
|
||||||
|
roles.controller.machines."controller2".settings = {
|
||||||
|
endpoint = "192.168.1.2";
|
||||||
|
};
|
||||||
|
|
||||||
|
roles.peer.machines = {
|
||||||
|
peer1.settings.controller = "controller1";
|
||||||
|
peer2.settings.controller = "controller2";
|
||||||
|
peer3.settings.controller = "controller1";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
|
||||||
|
#wg-test-two = {
|
||||||
|
# module.name = "@clan/wireguard";
|
||||||
|
|
||||||
|
# roles.controller.machines."controller1".settings = {
|
||||||
|
# endpoint = "192.168.1.1";
|
||||||
|
# port = 51922;
|
||||||
|
# };
|
||||||
|
|
||||||
|
# roles.peer.machines = {
|
||||||
|
# peer1 = { };
|
||||||
|
# };
|
||||||
|
#};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
# Show all addresses
|
||||||
|
machines = [peer1, peer2, peer3, controller1, controller2]
|
||||||
|
for m in machines:
|
||||||
|
m.systemctl("start network-online.target")
|
||||||
|
|
||||||
|
for m in machines:
|
||||||
|
m.wait_for_unit("network-online.target")
|
||||||
|
m.wait_for_unit("systemd-networkd.service")
|
||||||
|
|
||||||
|
print("\n\n" + "="*60)
|
||||||
|
print("STARTING PING TESTS")
|
||||||
|
print("="*60)
|
||||||
|
|
||||||
|
for m1 in machines:
|
||||||
|
for m2 in machines:
|
||||||
|
if m1 != m2:
|
||||||
|
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
|
||||||
|
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
)
|
||||||
6
checks/wireguard/sops/machines/controller1/key.json
Executable file
6
checks/wireguard/sops/machines/controller1/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
checks/wireguard/sops/machines/controller2/key.json
Executable file
6
checks/wireguard/sops/machines/controller2/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
checks/wireguard/sops/machines/peer1/key.json
Executable file
6
checks/wireguard/sops/machines/peer1/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
checks/wireguard/sops/machines/peer2/key.json
Executable file
6
checks/wireguard/sops/machines/peer2/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
checks/wireguard/sops/machines/peer3/key.json
Executable file
6
checks/wireguard/sops/machines/peer3/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
15
checks/wireguard/sops/secrets/controller1-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/controller1-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:zDF0RiBqaawpg+GaFkuLPomJ01Xu+lgY5JfUzaIk2j03XkCzIf8EMrmn6pRtBP3iUjPBm+gQSTQk6GHTONrixA5hRNyETV+UgQw=,iv:zUUCAGZ0cz4Tc2t/HOjVYNsdnrAOtid/Ns5ak7rnyCk=,tag:z43WtNSue4Ddf7AVu21IKA==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlY1NEdjAzQm5RMFZWY3BJ\nclp6c01FdlZFK3dOSDB4cHc1NTdwMXErMFJFCnIrRVFNZEFYOG1rVUhFd2xsbTJ2\nVkJHNmdOWXlOcHJoQ0QzM1VyZmxmcGcKLS0tIFk1cEx4dFdvNGRwK1FWdDZsb1lR\nV2d1RFZtNzZqVFdtQ1FzNStEcEgyUUkKx8tkxqJz/Ko3xgvhvd6IYiV/lRGmrY13\nUZpYWR9tsQwZAR9dLjCyVU3JRuXeGB1unXC1CO0Ff3R0A/PuuRHh+g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:37Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:8RGOUhZ2LGmC9ugULwHDgdMrtdo9vzBm3BJmL4XTuNJKm0NlKfgNLi1E4n9DMQ+kD4hKvcwbiUcwSGE8jZD6sm7Sh3bJi/HZCoiWm/O/OIzstli2NNDBGvQBgyWZA5H+kDjZ6aEi6icNWIlm5gsty7KduABnf5B3p0Bn5Uf5Bio=,iv:sGZp0XF+mgocVzAfHF8ATdlSE/5zyz5WUSRMJqNeDQs=,tag:ymYVBRwF5BOSAu5ONU2qKw==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/controller1-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/controller1-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
15
checks/wireguard/sops/secrets/controller2-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/controller2-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:dHM7zWzqnC1QLRKYpbI2t63kOFnSaQy6ur9zlkLQf17Q03CNrqUsZtdEbwMnLR3llu7eVMhtvVRkXjEkvn3leb9HsNFmtk/DP70=,iv:roEZsBFqRypM106O5sehTzo7SySOJUJgAR738rTtOo8=,tag:VDd9/6uU0SAM7pWRLIUhUQ==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKTEVYUmVGbUtOcHZ4cnc3\nKzNETnlxaVRKYTI3eWVHdEoyc3l2SnhsZ1J3CnB2RnZrOXM5Uml6TThDUlZjY25J\nbkJ6eUZ2ckN1NWpNUU9IaE93UDJQdlEKLS0tIC95ZDhkU0R1VHhCdldxdW4zSmps\nN3NqL1cvd05hRTRPdDA3R2pzNUFFajgKS+DJH14fH9AvEAa3PoUC1jEqKAzTmExN\nl32FeHTHbGMo1PKeaFm+Eg0WSpAmFE7beBunc5B73SW30ok6x4FcQw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:47Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:77EnuBQyguvkCtobUg8/6zoLHjmeGDrSBZuIXOZBMxdbJjzhRg++qxQjuu6t0FoWATtz7u4Y3/jzUMGffr/N5HegqSq0D2bhv7AqJwBiVaOwd80fRTtM+YiP/zXsCk52Pj/Gadapg208bDPQ1BBDOyz/DrqZ7w//j+ARJjAnugI=,iv:IuTDmJKZEuHXJXjxrBw0gP2t6vpxAYEqbtpnVbavVCY=,tag:4EnpX6rOamtg1O+AaEQahQ==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/controller2-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/controller2-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
15
checks/wireguard/sops/secrets/peer1-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/peer1-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:wcSsqxTKiMAnzPwxs5DNjcSdLyjVQ9UOrZxfSbOkVfniwx6F7xz6dLNhaDq7MHQ0vRWpg28yNs7NHrp52bYFnb/+eZsis46WiCw=,iv:B4t1lvS2gC601MtsmZfEiEulLWvSGei3/LSajwFS9Vs=,tag:hnRXlZyYEFfLJUrw1SqbSQ==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAybUgya2VEdzMvRG1hdkpu\nM2pGNmcyVmcvYVZ1ZjJlY3A1bXFUUUtkMTI0CmJoRFZmejZjN2UxUXNuc1k5WnE2\nNmxIcnpNQ1lJZ3ZKSmhtSlVURXJTSUUKLS0tIGU4Wi9yZ3VYekJkVW9pNWFHblFk\na0gzbTVKUWdSam1sVjRUaUlTdVd5YWMKntRc9yb9VPOTMibp8QM5m57DilP01N/X\nPTQaw8oI40znnHdctTZz7S+W/3Te6sRnkOhFyalWmsKY0CWg/FELlA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:58Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:8nq+ugkUJxE24lUIySySs/cAF8vnfqr936L/5F0O1QFwNrbpPmKRXkuwa6u0V+187L2952Id20Fym4ke59f3fJJsF840NCKDwDDZhBZ20q9GfOqIKImEom/Nzw6D0WXQLUT3w8EMyJ/F+UaJxnBNPR6f6+Kx4YgStYzCcA6Ahzg=,iv:VBPktEz7qwWBBnXE+xOP/EUVy7/AmNCHPoK56Yt/ZNc=,tag:qXONwOLFAlopymBEf5p4Sw==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/peer1-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/peer1-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
15
checks/wireguard/sops/secrets/peer2-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/peer2-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:4d3ri0EsDmWRtA8vzvpPRLMsSp4MIMKwvtn0n0pRY05uBPXs3KcjnweMPIeTE1nIhqnMR2o2MfLah5TCPpaFax9+wxIt74uacbg=,iv:0LBAldTC/hN4QLCxgXTl6d9UB8WmUTnj4sD2zHQuG2w=,tag:zr/RhG/AU4g9xj9l2BprKw==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvV0JnZDhlU1piU1g2cng0\ncytKOEZ6WlZlNGRGUjV3MmVMd2Nzc0ZwelgwCjBGdThCUGlXbVFYdnNoZWpJZ3Vm\nc2xkRXhxS09vdzltSVoxLzhFSVduak0KLS0tIE5DRjJ6cGxiVlB1eElHWXhxN1pJ\nYWtIMDMvb0Z6akJjUzlqeEFsNHkxL2cKpghv/QegnXimeqd9OPFouGM//jYvoVmw\n2d4mLT2JSMkEhpfGcqb6vswhdJfCiKuqr2B4bqwAnPMaykhsm8DFRQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:20:08Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:BzlQVAJ7HzcxNPKB3JhabqRX/uU0EElj172YecjmOflHnzz/s9xgfdAfJK/c53hXlX4LtGPnubH7a8jOolRq98zmZeBYE27+WLs2aN7Ufld6mYk90/i7u4CqR+Fh2Kfht04SlUJCjnS5A9bTPwU9XGRHJ0BiOhzTuSMUJTRaPRM=,iv:L50K5zc1o99Ix9nP0pb9PRH+VIN2yvq7JqKeVHxVXmc=,tag:XFLkSCsdbTPxbasDYYxcFQ==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/peer2-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/peer2-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
15
checks/wireguard/sops/secrets/peer3-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/peer3-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:qfLm6+g1vYnESCik9uyBeKsY6Ju2Gq3arnn2I8HHNO67Ri5BWbOQTvtz7WT8/q94RwVjv8SGeJ/fsJSpwLSrJSbqTZCPAnYwzzQ=,iv:PnA9Ao8RRELNhNQYbaorstc0KaIXRU7h3+lgDCXZFHk=,tag:VeLgYQYwqthYihIoQTwYiA==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBNWVVQaDJFd0N3WHptRC9Z\nZTgxTWh5bnU1SkpqRWRXZnhPaFhpSVJmVEhrCjFvdHFYenNWaFNrdXlha09iS2xj\nOTZDcUNkcHkvTDUwNjM4Z3gxUkxreUEKLS0tIE5oY3Q2bWhsb2FSQTVGTWVSclJw\nWllrelRwT3duYjJJbTV0d3FwU1VuNlkK2eN3fHFX/sVUWom8TeZC9fddqnSCsC1+\nJRCZsG46uHDxqLcKIfdFWh++2t16XupQYk3kn+NUR/aMc3fR32Uwjw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:20:18Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:nUwsPcP1bsDjAHFjQ1NlVkTwyZY4B+BpzNkMx9gl0rE14j425HVLtlhlLndhRp+XMpnDldQppLAAtSdzMsrw8r5efNgTRl7cu4Fy/b9cHt84k7m0aou5lrGus9SV1bM7/fzC9Xm7CSXBcRzyDGVsKC6UBl1rx+ybh7HyAN05XSo=,iv:It57H+zUUNPkoN1D8sYwyZx5zIFIga7mydhGUHYBCGE=,tag:mBQdYqUpjPknbYa13qESyw==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/peer3-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/peer3-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
4
checks/wireguard/sops/users/admin/key.json
Normal file
4
checks/wireguard/sops/users/admin/key.json
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/controller1
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:noe913+28JWkoDkGGMu++cc1+j5NPDoyIhWixdsowoiVO3cTWGkZ88SUGO5D,iv:ynYMljwqMcBdk8RpVcw/2Jflg2RCF28r4fKUgIAF8B4=,tag:+TsXDJgfUhKgg4iQVXKKlQ==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBhYVRReTZBQ05GYmVBVjhS\nNXM5aFlhVzZRaVl6UHl6S3JnMC9Sb1dwZ1ZjCmVuS2dEVExYZWROVklUZWFCSnM2\nZnlxbVNseTM2c0Q0TjhsT3NzYmtqREUKLS0tIHBRTFpvVGt6d1cxZ2lFclRsUVhZ\nZDlWaG9PcXVrNUZKaEgxWndjUDVpYjgKt0eOhAgcYdkg9JSEakx4FjChLTn3pis+\njOkuGd4JfXMKcwC7vJV5ygQBxzVJSBw+RucP7sYCBPK0m8Voj94ntw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6MFJqNHNraG9DSnJZMFdz\ndU8zVXNTamxROFd1dWtuK2RiekhPdHhleVhFCi8zNWJDNXJMRUlDdjc4Q0UycTIz\nSGFGSmdnNU0wZWlDaTEwTzBqWjh6SFkKLS0tIEJOdjhOMDY2TUFLb3RPczNvMERx\nYkpSeW5VOXZvMlEvdm53MDE3aUFTNjgKyelSTjrTIR9I3rJd3krvzpsrKF1uGs4J\n4MtmQj0/3G+zPYZVBx7b3HF6B3f1Z7LYh05+z7nCnN/duXyPnDjNcg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:37Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:+DmIkPG/H6tCtf8CvB98E1QFXv08QfTcCB3CRsi+XWnIRBkryRd/Au9JahViHMdK7MED8WNf84NWTjY2yH4y824/DjI8XXNMF1iVMo0CqY42xbVHtUuhXrYeT+c8CyEw+M6zfy1jC0+Bm3WQWgagz1G6A9SZk3D2ycu0N08+axA=,iv:kwBjTYebIy5i2hagAajSwwuKnSkrM9GyrnbeQXB2e/w=,tag:EgKJ5gVGYj1NGFUduxLGfg==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
lQfR7GhivN87XoXruTGOPjVPhNu1Brt//wyc3pdwE20=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
7470bb5c79df224a9b7f5a2259acd2e46db763c27e24cb3416c8b591cb328077
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
fd51:19c1:3b:f700
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/controller2
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:2kehACgvNgoYGPwnW7p86BR0yUu689Chth6qZf9zoJtuTY9ATS68dxDyBc5S,iv:qb2iDUtExegTeN3jt6SA8RnU61W5GDDhn56QXiQT4gw=,tag:pSGPICX5p6qlZ1WMVoIEYQ==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSTTR5TDY4RE9VYmlCK1dL\nWkVRcVZqVDlsbmQvUlJmdzF2b1Z1S0k3NngwCkFWNzRVaERtSmFsd0o2aFJOb0ZX\nSU9yUnVaNi9IUjJWeGRFcEpDUXo5WkEKLS0tIEczNkxiYnJsTWRoLzFhQVF1M21n\nWnZEdGV1N2N5d1FZQkJUQ1IrdGFLblkKPTpha2bxS8CCAMXWTDKX/WOcdvggaP3Y\nqewyahDNzb4ggP+LNKp55BtwFjdvoPoq4BpYOOgMRbQMMk+H1o9WFw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYcEZ6Tzk3M0pkV0tOdTBj\nenF2a0tHNnhBa0NrazMwV1VBbXBZR3pzSHpvCnBZOEU0VlFHS1FHcVpTTDdPczVV\nV0RFSlZ0VmIzWGoydEdKVXlIUE9OOEkKLS0tIFZ0cWVBR1loeVlWa2c4U3oweXE2\ncm1ja0JCS3U5Nk41dlAzV2NabDc2bDQKdgCDNnpRZlFPnEGlX6fo0SQX4yOB+E6r\ntnSwofR3xxZvkyme/6JJU5qBZXyCXEAhKMRkFyvJANXzMJAUo/Osow==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:48Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:e3EkL8vwRhLsec83Zi9DE3PKT+4RwgiffpN4QHcJKTgmDW6hzizWc5kAxbNWGJ9Qqe6sso2KY7tc+hg1lHEsmzjCbg153p8h+7lVI2XT6adi/CS8WZ2VpeL+0X9zDQCjqHmrESZAYFBdkLqO4jucdf0Pc3CKKD+N3BDDTwSUvHM=,iv:xvR7dJL8sdYen00ovrYT8PNxhB9XxSWDSRz1IK23I/o=,tag:OyhAvllBgfAp3eGeNpR/Nw==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
5Z7gbLFbXpEFfomW2pKyZBpZN5xvUtiqrIL0GVfNtQ8=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
c3672fdb9fb31ddaf6572fc813cf7a8fe50488ef4e9d534c62d4f29da60a1a99
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
fd51:19c1:c1:aa00
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/peer1
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:b+akw85T3D9xc75CPLHucR//k7inpxKDvgpR8tCNKwNDRVjVHjcABhfZNLXW,iv:g11fZE8UI0MVh9GKdjR6leBlxa4wN7ZubozXG/VlBbw=,tag:0YkzWCW3zJ3Mt3br/jmTYw==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXWkJUR0pIa2xOSEw2dThm\nYlNuOHZCVW93Wkc5LzE4YmpUTHRkZlk3ckc4CnN4M3ZRMWNFVitCT3FyWkxaR0di\nb0NmSXFhRHJmTWg0d05OcWx1LytscEEKLS0tIEtleTFqU3JrRjVsdHpJeTNuVUhF\nWEtnOVlXVXRFamFSak5ia2F2b0JiTzAKlhOBZvZ4AN+QqAYQXvd6YNmgVS4gtkWT\nbV3bLNTgwtrDtet9NDHM8vdF+cn5RZxwFfgmTbDEow6Zm8EXfpxj/g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6YVYyQkZqMTJYQTlyRG5Y\nbnJ2UkE1TS9FZkpSa2tQbk1hQjViMi9OcGk0CjFaZUdjU3JtNzh0bDFXdTdUVW4x\nanFqZHZjZjdzKzA2MC8vTWh3Uy82UGcKLS0tIDhyOFl3UGs3czdoMlpza3UvMlB1\nSE90MnpGc05sSCtmVWg0UVNVdmRvN2MKHlCr4U+7bsoYb+2fgT4mEseZCEjxrtLu\n55sR/4YH0vqMnIBnLTSA0e+WMrs3tQfseeJM5jY/ZNnpec1LbxkGTg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:58Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:gEoEC9D2Z7k5F8egaY1qPXT5/96FFVsyofSBivQ28Ir/9xHX2j40PAQrYRJUWsk/GAUMOyi52Wm7kPuacw+bBcdtQ0+MCDEmjkEnh1V83eZ/baey7iMmg05uO92MYY5o4e7ZkwzXoAeMCMcfO0GqjNvsYJHF1pSNa+UNDj+eflw=,iv:dnIYpvhAdvUDe9md53ll42krb0sxcHy/toqGc7JFxNA=,tag:0WkZU7GeKMD1DQTYaI+1dg==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
juK7P/92N2t2t680aLIRobHc3ts49CsZBvfZOyIKpUc=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
b36142569a74a0de0f9b229f2a040ae33a22d53bef5e62aa6939912d0cda05ba
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
6987:50a0:9b93:4337
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/peer2
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:apX2sLwtq6iQgLJslFwiRMNBUe0XLzLQbhKfmb2pKiJG7jGNHUgHJz3Ls4Ca,iv:HTDatm3iD5wACTkkd3LdRNvJfnfg75RMtn9G6Q7Fqd4=,tag:Mfehlljnes5CFD1NJdk27A==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVZzFyMUZsd2V2VWxOUmhP\nZE8yZTc4Q0RkZisxR25NemR1TzVDWmJZVjBVClA1MWhsU0xzSG16aUx3cWFWKzlG\nSkxrT09OTkVqLzlWejVESE1QWHVJaFkKLS0tIGxlaGVuWU43RXErNTB3c3FaUnM3\nT0N5M253anZkbnFkZWw2VHA0eWhxQW8Kd1PMtEX1h0Hd3fDLMi++gKJkzPi9FXUm\n+uYhx+pb+pJM+iLkPwP/q6AWC7T0T4bHfekkdzxrbsKMi73x/GrOiw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqVzRIMWdlNjVwTURyMFkv\nSUhiajZkZVNuWklRYit6cno4UzNDa2szOFN3CkQ2TWhHb25pbmR1MlBsRXNLL2lx\ncVZ3c3BsWXN2aS9UUVYvN3I4S0xUSmMKLS0tIE5FV0U5aXVUZk9XL0U0Z2ZSNGd5\nbU9zY3IvMlpSNVFLYkRNQUpUYVZOWFUK7j4Otzb8CJTcT7aAj9/irxHEDXh1HkTg\nzz7Ho8/ZncNtaCVHlHxjTgVW9d5aIx8fSsV9LRCFwHMtNzvwj1Nshg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:20:08Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:e7WNVEz78noHBiz6S3A6qNfop+yBXB3rYN0k4GvaQKz3b99naEHuqIF8Smzzt4XrbbiPKu2iLa5ddLBlqqsi32UQUB8JS9TY7hvW8ol+jpn0VxusGCXW9ThdDEsM/hXiPyr331C73zTvbOYI1hmcGMlJL9cunVRO9rkMtEqhEfo=,iv:6zt7wjIs1y5xDHNK+yLOwoOuUpY7/dOGJGT6UWAFeOg=,tag:gzFTgoxhoLzUV0lvzOhhfg==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
XI9uSaQRDBCb82cMnGzGJcbqRfDG/IXZobyeL+kV03k=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
360f9fce4a984eb87ce2a673eb5341ecb89c0f62126548d45ef25ff5243dd646
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
3b21:3ced:003e:89b3
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/peer3
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:Gluvjes/3oH5YsDq00JDJyJgoEFcj56smioMArPSt309MDGExYX2QsCzeO1q,iv:oBBJRDdTj/1dWEvzhdFKQ2WfeCKyavKMLmnMbqnU5PM=,tag:2WNFxKz2dWyVcybpm5N4iw==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtQWpjRmhZTFdPa2VSZkFN\nbUczMlY5bDBmMTdoMy8xcWxMaXpWVitMZGdjCnRWb2Y3eGpHU1hmNHRJVFBqbU5w\nVEZGdUIrQXk0U0dUUEZ6bE5EMFpTRHMKLS0tIGpYSmZmQThJUTlvTHpjc05ZVlM4\nQWhTOWxnUHZnYlJ3czE3ZUJ0L3ozWTQK3a7N0Zpzo4sUezYveqvKR49RUdJL23eD\n+cK5lk2xbtj+YHkeG+dg7UlHfDaicj0wnFH1KLuWmNd1ONa6eQp3BQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3a2FOWlVsSkdnendrYmUz\ndEpuL1hZSWNFTUtDYm14S3V1aW9KS3hsazJRCkp2SkFFbi9hbGJpNks1MlNTL0s5\nTk5pcUMxaEJobkcvWmRGeU9jMkdNdzAKLS0tIDR6M0Y5eE1ETHJJejAzVW1EYy9v\nZCtPWHJPUkhuWnRzSGhMUUtTa280UmMKXvtnxyop7PmRvTOFkV80LziDjhGh93Pf\nYwhD/ByD/vMmr21Fd6PVHOX70FFT30BdnMc1/wt7c/0iAw4w4GoQsA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:20:18Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:3nXMTma0UYXCco+EM8UW45cth7DVMboFBKyesL86GmaG6OlTkA2/25AeDrtSVO13a5c2jC6yNFK5dE6pSe5R9f0BoDF7d41mgc85zyn+LGECNWKC6hy6gADNSDD6RRuV1S3FisFQl1F1LD8LiSWmg/XNMZzChNlHYsCS8M+I84g=,iv:pu5VVXAVPmVoXy0BJ+hq5Ar8R0pZttKSYa4YS+dhDNc=,tag:xp1S/4qExnxMTGwhfLJrkA==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
t6qN4VGLR+VMhrBDNKQEXZVyRsEXs1/nGFRs5DI82F8=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
e3facc99b73fe029d4c295f71829a83f421f38d82361cf412326398175da162a
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
e42b:bf85:33f4:f0b1
|
||||||
217
clanServices/wireguard/README.md
Normal file
217
clanServices/wireguard/README.md
Normal file
@@ -0,0 +1,217 @@
|
|||||||
|
# Wireguard VPN Service
|
||||||
|
|
||||||
|
This service provides a Wireguard-based VPN mesh network with automatic IPv6 address allocation and routing between clan machines.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The wireguard service creates a secure mesh network between clan machines using two roles:
|
||||||
|
- **Controllers**: Machines with public endpoints that act as connection points and routers
|
||||||
|
- **Peers**: Machines that connect through controllers to access the network
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Controllers must have a publicly accessible endpoint (domain name or static IP)
|
||||||
|
- Peers must be in networks where UDP traffic is not blocked (uses port 51820 by default, configurable)
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Automatic IPv6 address allocation using ULA (Unique Local Address) prefixes
|
||||||
|
- Full mesh connectivity between all machines
|
||||||
|
- Automatic key generation and distribution
|
||||||
|
- IPv6 forwarding on controllers for inter-peer communication
|
||||||
|
- Support for multiple controllers for redundancy
|
||||||
|
|
||||||
|
## Network Architecture
|
||||||
|
|
||||||
|
### IPv6 Address Allocation
|
||||||
|
- Base network: `/40` ULA prefix (deterministically generated from instance name)
|
||||||
|
- Controllers: Each gets a `/56` subnet from the base `/40`
|
||||||
|
- Peers: Each gets a unique 64-bit host suffix that is used in ALL controller subnets
|
||||||
|
|
||||||
|
### Addressing Design
|
||||||
|
- Each peer generates a unique host suffix (e.g., `:8750:a09b:0:1`)
|
||||||
|
- This suffix is appended to each controller's `/56` prefix to create unique addresses
|
||||||
|
- Example: peer1 with suffix `:8750:a09b:0:1` gets:
|
||||||
|
- `fd51:19c1:3b:f700:8750:a09b:0:1` in controller1's subnet
|
||||||
|
- `fd51:19c1:c1:aa00:8750:a09b:0:1` in controller2's subnet
|
||||||
|
- Controllers allow each peer's `/96` subnet for routing flexibility
|
||||||
|
|
||||||
|
### Connectivity
|
||||||
|
- Peers use a single WireGuard interface with multiple IPs (one per controller subnet)
|
||||||
|
- Controllers connect to ALL other controllers and ALL peers on a single interface
|
||||||
|
- Controllers have IPv6 forwarding enabled to route traffic between peers
|
||||||
|
- All traffic between peers flows through controllers
|
||||||
|
- Symmetric routing is maintained as each peer has consistent IPs across all controllers
|
||||||
|
|
||||||
|
### Example Network Topology
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
subgraph Controllers
|
||||||
|
C1[controller1<br/>endpoint: vpn1.example.com<br/>fd51:19c1:3b:f700::/56]
|
||||||
|
C2[controller2<br/>endpoint: vpn2.example.com<br/>fd51:19c1:c1:aa00::/56]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Peers
|
||||||
|
P1[peer1<br/>designated: controller1]
|
||||||
|
P2[peer2<br/>designated: controller2]
|
||||||
|
P3[peer3<br/>designated: controller1]
|
||||||
|
end
|
||||||
|
|
||||||
|
%% Controllers connect to each other
|
||||||
|
C1 <--> C2
|
||||||
|
|
||||||
|
%% All peers connect to all controllers
|
||||||
|
P1 <--> C1
|
||||||
|
P1 <--> C2
|
||||||
|
P2 <--> C1
|
||||||
|
P2 <--> C2
|
||||||
|
P3 <--> C1
|
||||||
|
P3 <--> C2
|
||||||
|
|
||||||
|
%% Peer-to-peer traffic flows through controllers
|
||||||
|
P1 -.->|via controllers| P3
|
||||||
|
P1 -.->|via controllers| P2
|
||||||
|
P2 -.->|via controllers| P3
|
||||||
|
|
||||||
|
classDef controller fill:#f9f,stroke:#333,stroke-width:4px
|
||||||
|
classDef peer fill:#bbf,stroke:#333,stroke-width:2px
|
||||||
|
class C1,C2 controller
|
||||||
|
class P1,P2,P3 peer
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Basic Setup with Single Controller
|
||||||
|
|
||||||
|
```nix
|
||||||
|
# In your flake.nix or inventory
|
||||||
|
{
|
||||||
|
services.wireguard.server1 = {
|
||||||
|
roles.controller = {
|
||||||
|
# Public endpoint where this controller can be reached
|
||||||
|
endpoint = "vpn.example.com";
|
||||||
|
# Optional: Change the UDP port (default: 51820)
|
||||||
|
port = 51820;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.wireguard.laptop1 = {
|
||||||
|
roles.peer = {
|
||||||
|
# No configuration needed if only one controller exists
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multiple Controllers Setup
|
||||||
|
|
||||||
|
```nix
|
||||||
|
{
|
||||||
|
services.wireguard.server1 = {
|
||||||
|
roles.controller = {
|
||||||
|
endpoint = "vpn1.example.com";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.wireguard.server2 = {
|
||||||
|
roles.controller = {
|
||||||
|
endpoint = "vpn2.example.com";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.wireguard.laptop1 = {
|
||||||
|
roles.peer = {
|
||||||
|
# Must specify which controller subnet is exposed as the default in /etc/hosts, when multiple controllers exist
|
||||||
|
controller = "server1";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Advanced Options
|
||||||
|
|
||||||
|
|
||||||
|
### Automatic Hostname Resolution
|
||||||
|
|
||||||
|
The wireguard service automatically adds entries to `/etc/hosts` for all machines in the network. Each machine is accessible via its hostname in the format `<machine-name>.<instance-name>`.
|
||||||
|
|
||||||
|
For example, with an instance named `vpn`:
|
||||||
|
- `server1.vpn` - resolves to server1's IPv6 address
|
||||||
|
- `laptop1.vpn` - resolves to laptop1's IPv6 address
|
||||||
|
|
||||||
|
This allows machines to communicate using hostnames instead of IPv6 addresses:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ping another machine by hostname
|
||||||
|
ping6 server1.vpn
|
||||||
|
|
||||||
|
# SSH to another machine
|
||||||
|
ssh user@laptop1.vpn
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Check Wireguard Status
|
||||||
|
```bash
|
||||||
|
sudo wg show
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify IP Addresses
|
||||||
|
```bash
|
||||||
|
ip addr show dev <instance-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Routing
|
||||||
|
```bash
|
||||||
|
ip -6 route show dev <instance-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Interface Fails to Start: "Address already in use"
|
||||||
|
|
||||||
|
If you see this error in your logs:
|
||||||
|
```
|
||||||
|
wireguard: Could not bring up interface, ignoring: Address already in use
|
||||||
|
```
|
||||||
|
|
||||||
|
This means the configured port (default: 51820) is already in use by another service or wireguard instance. Solutions:
|
||||||
|
|
||||||
|
1. **Check for conflicting wireguard instances:**
|
||||||
|
```bash
|
||||||
|
sudo wg show
|
||||||
|
sudo ss -ulnp | grep 51820
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Use a different port:**
|
||||||
|
```nix
|
||||||
|
services.wireguard.myinstance = {
|
||||||
|
roles.controller = {
|
||||||
|
endpoint = "vpn.example.com";
|
||||||
|
port = 51821; # Use a different port
|
||||||
|
};
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Ensure unique ports across multiple instances:**
|
||||||
|
If you have multiple wireguard instances on the same machine, each must use a different port.
|
||||||
|
|
||||||
|
### Key Management
|
||||||
|
|
||||||
|
Keys are automatically generated and stored in the clan vars system. To regenerate keys:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Regenerate keys for a specific machine and instance
|
||||||
|
clan vars generate --service wireguard-keys-<instance-name> --regenerate --machine <machine-name>
|
||||||
|
|
||||||
|
# Apply the new keys
|
||||||
|
clan machines update <machine-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
- All traffic is encrypted using Wireguard's modern cryptography
|
||||||
|
- Private keys never leave the machines they're generated on
|
||||||
|
- Public keys are distributed through the clan vars system
|
||||||
|
- Controllers must have publicly accessible endpoints
|
||||||
|
- Firewall rules are automatically configured for the Wireguard ports
|
||||||
|
|
||||||
456
clanServices/wireguard/default.nix
Normal file
456
clanServices/wireguard/default.nix
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
/*
|
||||||
|
There are two roles: peers and controllers:
|
||||||
|
- Every controller has an endpoint set
|
||||||
|
- There can be multiple peers
|
||||||
|
- There has to be one or more controllers
|
||||||
|
- Peers connect to ALL controllers (full mesh)
|
||||||
|
- If only one controller exists, peers automatically use it for IP allocation
|
||||||
|
- If multiple controllers exist, peers must specify which controller's subnet to use
|
||||||
|
- Controllers have IPv6 forwarding enabled, every peer and controller can reach
|
||||||
|
everyone else, via extra controller hops if necessary
|
||||||
|
|
||||||
|
Example:
|
||||||
|
┌───────────────────────────────┐
|
||||||
|
│ ◄───────────── │
|
||||||
|
│ controller2 controller1
|
||||||
|
│ ▲ ─────────────► ▲ ▲
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ └───────────────┐ │ │ │ │
|
||||||
|
│ │ │ └──────────────┐ │ │ │ │ │
|
||||||
|
│ ▼ │ ▼ ▼ ▼
|
||||||
|
└─► peer2 │ peer1 peer3
|
||||||
|
│ ▲
|
||||||
|
└──────────┘
|
||||||
|
|
||||||
|
Network Architecture:
|
||||||
|
|
||||||
|
IPv6 Address Allocation:
|
||||||
|
- Base network: /40 ULA prefix (generated from instance name)
|
||||||
|
- Controllers: Each gets a /56 subnet from the base /40
|
||||||
|
- Peers: Each gets a unique host suffix that is used in ALL controller subnets
|
||||||
|
|
||||||
|
Address Assignment:
|
||||||
|
- Each peer generates a unique 64-bit host suffix (e.g., :8750:a09b:0:1)
|
||||||
|
- This suffix is appended to each controller's /56 prefix
|
||||||
|
- Example: peer1 with suffix :8750:a09b:0:1 gets:
|
||||||
|
- fd51:19c1:3b:f700:8750:a09b:0:1 in controller1's subnet
|
||||||
|
- fd51:19c1:c1:aa00:8750:a09b:0:1 in controller2's subnet
|
||||||
|
|
||||||
|
Peers: Use a SINGLE interface that:
|
||||||
|
- Connects to ALL controllers
|
||||||
|
- Has multiple IPs, one in each controller's subnet (with /56 prefix)
|
||||||
|
- Routes to each controller's /56 subnet via that controller
|
||||||
|
- allowedIPs: Each controller's /56 subnet
|
||||||
|
- No routing conflicts due to unique IPs per subnet
|
||||||
|
|
||||||
|
Controllers: Use a SINGLE interface that:
|
||||||
|
- Connects to ALL peers and ALL other controllers
|
||||||
|
- Gets a /56 subnet from the base /40 network
|
||||||
|
- Has IPv6 forwarding enabled for routing between peers
|
||||||
|
- allowedIPs:
|
||||||
|
- For peers: A /96 range containing the peer's address in this controller's subnet
|
||||||
|
- For other controllers: The controller's /56 subnet
|
||||||
|
*/
|
||||||
|
|
||||||
|
{ ... }:
|
||||||
|
let
|
||||||
|
# Shared module for extraHosts configuration
|
||||||
|
extraHostsModule =
|
||||||
|
{
|
||||||
|
instanceName,
|
||||||
|
settings,
|
||||||
|
roles,
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
networking.extraHosts =
|
||||||
|
let
|
||||||
|
domain = if settings.domain == null then instanceName else settings.domain;
|
||||||
|
# Controllers use their subnet's ::1 address
|
||||||
|
controllerHosts = lib.mapAttrsToList (
|
||||||
|
name: _value:
|
||||||
|
let
|
||||||
|
prefix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
);
|
||||||
|
# Controller IP is always ::1 in their subnet
|
||||||
|
ip = prefix + "::1";
|
||||||
|
in
|
||||||
|
"${ip} ${name}.${domain}"
|
||||||
|
) roles.controller.machines;
|
||||||
|
|
||||||
|
# Peers use their suffix in their designated controller's subnet only
|
||||||
|
peerHosts = lib.mapAttrsToList (
|
||||||
|
peerName: peerValue:
|
||||||
|
let
|
||||||
|
peerSuffix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${peerName}/wireguard-network-${instanceName}/suffix/value"
|
||||||
|
);
|
||||||
|
# Determine designated controller
|
||||||
|
designatedController =
|
||||||
|
if (builtins.length (builtins.attrNames roles.controller.machines) == 1) then
|
||||||
|
(builtins.head (builtins.attrNames roles.controller.machines))
|
||||||
|
else
|
||||||
|
peerValue.settings.controller;
|
||||||
|
controllerPrefix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${designatedController}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
);
|
||||||
|
peerIP = controllerPrefix + ":" + peerSuffix;
|
||||||
|
in
|
||||||
|
"${peerIP} ${peerName}.${domain}"
|
||||||
|
) roles.peer.machines;
|
||||||
|
in
|
||||||
|
builtins.concatStringsSep "\n" (controllerHosts ++ peerHosts);
|
||||||
|
};
|
||||||
|
|
||||||
|
# Shared interface options
|
||||||
|
sharedInterface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
options.port = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
example = 51820;
|
||||||
|
default = 51820;
|
||||||
|
description = ''
|
||||||
|
Port for the wireguard interface
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
options.domain = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
defaultText = lib.literalExpression "instanceName";
|
||||||
|
default = null;
|
||||||
|
description = ''
|
||||||
|
Domain suffix to use for hostnames in /etc/hosts.
|
||||||
|
Defaults to the instance name.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest.name = "clan-core/wireguard";
|
||||||
|
manifest.description = "Wireguard-based VPN mesh network with automatic IPv6 address allocation";
|
||||||
|
manifest.categories = [
|
||||||
|
"System"
|
||||||
|
"Network"
|
||||||
|
];
|
||||||
|
manifest.readme = builtins.readFile ./README.md;
|
||||||
|
|
||||||
|
# Peer options and configuration
|
||||||
|
roles.peer = {
|
||||||
|
interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
imports = [ sharedInterface ];
|
||||||
|
|
||||||
|
options.controller = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
example = "controller1";
|
||||||
|
description = ''
|
||||||
|
Machinename of the controller to attach to
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
perInstance =
|
||||||
|
{
|
||||||
|
instanceName,
|
||||||
|
settings,
|
||||||
|
roles,
|
||||||
|
machine,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
# Set default domain to instanceName
|
||||||
|
|
||||||
|
# Peers connect to all controllers
|
||||||
|
nixosModule =
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
(extraHostsModule {
|
||||||
|
inherit
|
||||||
|
instanceName
|
||||||
|
settings
|
||||||
|
roles
|
||||||
|
config
|
||||||
|
lib
|
||||||
|
;
|
||||||
|
})
|
||||||
|
];
|
||||||
|
# Network allocation generator for this peer - generates host suffix
|
||||||
|
clan.core.vars.generators."wireguard-network-${instanceName}" = {
|
||||||
|
files.suffix.secret = false;
|
||||||
|
|
||||||
|
runtimeInputs = with pkgs; [
|
||||||
|
python3
|
||||||
|
];
|
||||||
|
|
||||||
|
# Invalidate on hostname changes
|
||||||
|
validation.hostname = machine.name;
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" peer "${machine.name}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Single wireguard interface with multiple IPs
|
||||||
|
networking.wireguard.interfaces."${instanceName}" = {
|
||||||
|
ips =
|
||||||
|
# Get this peer's suffix
|
||||||
|
let
|
||||||
|
peerSuffix =
|
||||||
|
config.clan.core.vars.generators."wireguard-network-${instanceName}".files.suffix.value;
|
||||||
|
in
|
||||||
|
# Create an IP in each controller's subnet
|
||||||
|
lib.mapAttrsToList (
|
||||||
|
ctrlName: _:
|
||||||
|
let
|
||||||
|
controllerPrefix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${ctrlName}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
);
|
||||||
|
peerIP = controllerPrefix + ":" + peerSuffix;
|
||||||
|
in
|
||||||
|
"${peerIP}/56"
|
||||||
|
) roles.controller.machines;
|
||||||
|
|
||||||
|
privateKeyFile =
|
||||||
|
config.clan.core.vars.generators."wireguard-keys-${instanceName}".files."privatekey".path;
|
||||||
|
|
||||||
|
# Connect to all controllers
|
||||||
|
peers = lib.mapAttrsToList (name: value: {
|
||||||
|
publicKey = (
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
# Allow each controller's /56 subnet
|
||||||
|
allowedIPs = [
|
||||||
|
"${
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
)
|
||||||
|
}::/56"
|
||||||
|
];
|
||||||
|
|
||||||
|
endpoint = "${value.settings.endpoint}:${toString value.settings.port}";
|
||||||
|
|
||||||
|
persistentKeepalive = 25;
|
||||||
|
}) roles.controller.machines;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Controller options and configuration
|
||||||
|
roles.controller = {
|
||||||
|
interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
imports = [ sharedInterface ];
|
||||||
|
|
||||||
|
options.endpoint = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
example = "vpn.clan.lol";
|
||||||
|
description = ''
|
||||||
|
Endpoint where the controller can be reached
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
perInstance =
|
||||||
|
{
|
||||||
|
settings,
|
||||||
|
instanceName,
|
||||||
|
roles,
|
||||||
|
machine,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
|
||||||
|
# Controllers connect to all peers and other controllers
|
||||||
|
nixosModule =
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
allOtherControllers = lib.filterAttrs (name: _v: name != machine.name) roles.controller.machines;
|
||||||
|
allPeers = roles.peer.machines;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
(extraHostsModule {
|
||||||
|
inherit
|
||||||
|
instanceName
|
||||||
|
settings
|
||||||
|
roles
|
||||||
|
config
|
||||||
|
lib
|
||||||
|
;
|
||||||
|
})
|
||||||
|
];
|
||||||
|
# Network allocation generator for this controller
|
||||||
|
clan.core.vars.generators."wireguard-network-${instanceName}" = {
|
||||||
|
files.prefix.secret = false;
|
||||||
|
|
||||||
|
runtimeInputs = with pkgs; [
|
||||||
|
python3
|
||||||
|
];
|
||||||
|
|
||||||
|
# Invalidate on network or hostname changes
|
||||||
|
validation.hostname = machine.name;
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" controller "${machine.name}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Enable ip forwarding, so wireguard peers can reach eachother
|
||||||
|
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
|
||||||
|
|
||||||
|
networking.firewall.allowedUDPPorts = [ settings.port ];
|
||||||
|
|
||||||
|
# Single wireguard interface
|
||||||
|
networking.wireguard.interfaces."${instanceName}" = {
|
||||||
|
listenPort = settings.port;
|
||||||
|
|
||||||
|
ips = [
|
||||||
|
# Controller uses ::1 in its /56 subnet but with /40 prefix for proper routing
|
||||||
|
"${config.clan.core.vars.generators."wireguard-network-${instanceName}".files.prefix.value}::1/40"
|
||||||
|
];
|
||||||
|
|
||||||
|
privateKeyFile =
|
||||||
|
config.clan.core.vars.generators."wireguard-keys-${instanceName}".files."privatekey".path;
|
||||||
|
|
||||||
|
# Connect to all peers and other controllers
|
||||||
|
peers = lib.mapAttrsToList (
|
||||||
|
name: value:
|
||||||
|
if allPeers ? ${name} then
|
||||||
|
# For peers: they now have our entire /56 subnet
|
||||||
|
{
|
||||||
|
publicKey = (
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
# Allow the peer's /96 range in ALL controller subnets
|
||||||
|
allowedIPs = lib.mapAttrsToList (
|
||||||
|
ctrlName: _:
|
||||||
|
let
|
||||||
|
controllerPrefix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${ctrlName}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
);
|
||||||
|
peerSuffix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/suffix/value"
|
||||||
|
);
|
||||||
|
in
|
||||||
|
"${controllerPrefix}:${peerSuffix}/96"
|
||||||
|
) roles.controller.machines;
|
||||||
|
|
||||||
|
persistentKeepalive = 25;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
# For other controllers: use their /56 subnet
|
||||||
|
{
|
||||||
|
publicKey = (
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
allowedIPs = [
|
||||||
|
"${
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
)
|
||||||
|
}::/56"
|
||||||
|
];
|
||||||
|
|
||||||
|
endpoint = "${value.settings.endpoint}:${toString value.settings.port}";
|
||||||
|
persistentKeepalive = 25;
|
||||||
|
}
|
||||||
|
) (allPeers // allOtherControllers);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Maps over all machines and produces one result per machine, regardless of role
|
||||||
|
perMachine =
|
||||||
|
{ instances, machine, ... }:
|
||||||
|
{
|
||||||
|
nixosModule =
|
||||||
|
{ pkgs, lib, ... }:
|
||||||
|
let
|
||||||
|
# Check if this machine has conflicting roles across all instances
|
||||||
|
machineRoleConflicts = lib.flatten (
|
||||||
|
lib.mapAttrsToList (
|
||||||
|
instanceName: instanceInfo:
|
||||||
|
let
|
||||||
|
isController =
|
||||||
|
instanceInfo.roles ? controller && instanceInfo.roles.controller.machines ? ${machine.name};
|
||||||
|
isPeer = instanceInfo.roles ? peer && instanceInfo.roles.peer.machines ? ${machine.name};
|
||||||
|
in
|
||||||
|
lib.optional (isController && isPeer) {
|
||||||
|
inherit instanceName;
|
||||||
|
machineName = machine.name;
|
||||||
|
}
|
||||||
|
) instances
|
||||||
|
);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# Add assertions for role conflicts
|
||||||
|
assertions = lib.forEach machineRoleConflicts (conflict: {
|
||||||
|
assertion = false;
|
||||||
|
message = ''
|
||||||
|
Machine '${conflict.machineName}' cannot have both 'controller' and 'peer' roles in the wireguard instance '${conflict.instanceName}'.
|
||||||
|
A machine must be either a controller or a peer, not both.
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
|
# Generate keys for each instance where this machine participates
|
||||||
|
clan.core.vars.generators = lib.mapAttrs' (
|
||||||
|
name: _instanceInfo:
|
||||||
|
lib.nameValuePair "wireguard-keys-${name}" {
|
||||||
|
files.publickey.secret = false;
|
||||||
|
files.privatekey = { };
|
||||||
|
|
||||||
|
runtimeInputs = with pkgs; [
|
||||||
|
wireguard-tools
|
||||||
|
];
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
wg genkey > $out/privatekey
|
||||||
|
wg pubkey < $out/privatekey > $out/publickey
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
) instances;
|
||||||
|
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
7
clanServices/wireguard/flake-module.nix
Normal file
7
clanServices/wireguard/flake-module.nix
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{ lib, ... }:
|
||||||
|
let
|
||||||
|
module = lib.modules.importApply ./default.nix { };
|
||||||
|
in
|
||||||
|
{
|
||||||
|
clan.modules.wireguard = module;
|
||||||
|
}
|
||||||
135
clanServices/wireguard/ipv6_allocator.py
Executable file
135
clanServices/wireguard/ipv6_allocator.py
Executable file
@@ -0,0 +1,135 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
IPv6 address allocator for WireGuard networks.
|
||||||
|
|
||||||
|
Network layout:
|
||||||
|
- Base network: /40 ULA prefix (fd00::/8 + 32 bits from hash)
|
||||||
|
- Controllers: Each gets a /56 subnet from the base /40 (256 controllers max)
|
||||||
|
- Peers: Each gets a /96 subnet from their controller's /56
|
||||||
|
"""
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import ipaddress
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def hash_string(s: str) -> str:
|
||||||
|
"""Generate SHA256 hash of string."""
|
||||||
|
return hashlib.sha256(s.encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
|
||||||
|
"""
|
||||||
|
Generate a /40 ULA prefix from instance name.
|
||||||
|
|
||||||
|
Format: fd{32-bit hash}/40
|
||||||
|
This gives us fd00:0000:0000::/40 through fdff:ffff:ff00::/40
|
||||||
|
"""
|
||||||
|
h = hash_string(instance_name)
|
||||||
|
|
||||||
|
# For /40, we need 32 bits after 'fd' (8 hex chars)
|
||||||
|
# But only the first 32 bits count for the network prefix
|
||||||
|
# The last 8 bits of the 40-bit prefix must be 0
|
||||||
|
prefix_bits = int(h[:8], 16)
|
||||||
|
|
||||||
|
# Mask to ensure we only use the first 32 bits for /40
|
||||||
|
# This gives us addresses like fd28:387a::/40
|
||||||
|
prefix_bits = prefix_bits & 0xFFFFFF00 # Clear last 8 bits
|
||||||
|
|
||||||
|
# Format as IPv6 address
|
||||||
|
prefix = f"fd{prefix_bits:08x}"
|
||||||
|
prefix_formatted = f"{prefix[:4]}:{prefix[4:8]}::/40"
|
||||||
|
|
||||||
|
network = ipaddress.IPv6Network(prefix_formatted)
|
||||||
|
return network
|
||||||
|
|
||||||
|
|
||||||
|
def generate_controller_subnet(
|
||||||
|
base_network: ipaddress.IPv6Network, controller_name: str
|
||||||
|
) -> ipaddress.IPv6Network:
|
||||||
|
"""
|
||||||
|
Generate a /56 subnet for a controller from the base /40 network.
|
||||||
|
|
||||||
|
We have 16 bits (40 to 56) to allocate controller subnets.
|
||||||
|
This allows for 65,536 possible controller subnets.
|
||||||
|
"""
|
||||||
|
h = hash_string(controller_name)
|
||||||
|
# Take 16 bits from hash for the controller subnet ID
|
||||||
|
controller_id = int(h[:4], 16)
|
||||||
|
|
||||||
|
# Create the controller subnet by adding the controller ID to the base network
|
||||||
|
# The controller subnet is at base_prefix:controller_id::/56
|
||||||
|
base_int = int(base_network.network_address)
|
||||||
|
controller_subnet_int = base_int | (controller_id << (128 - 56))
|
||||||
|
controller_subnet = ipaddress.IPv6Network((controller_subnet_int, 56))
|
||||||
|
|
||||||
|
return controller_subnet
|
||||||
|
|
||||||
|
|
||||||
|
def generate_peer_suffix(peer_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate a unique 64-bit host suffix for a peer.
|
||||||
|
|
||||||
|
This suffix will be used in all controller subnets to create unique addresses.
|
||||||
|
Format: :xxxx:xxxx:xxxx:xxxx (64 bits)
|
||||||
|
"""
|
||||||
|
h = hash_string(peer_name)
|
||||||
|
# Take 64 bits (16 hex chars) from hash for the host suffix
|
||||||
|
suffix_bits = h[:16]
|
||||||
|
|
||||||
|
# Format as IPv6 suffix without leading colon
|
||||||
|
suffix = f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
|
||||||
|
return suffix
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
if len(sys.argv) < 4:
|
||||||
|
print(
|
||||||
|
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
output_dir = Path(sys.argv[1])
|
||||||
|
instance_name = sys.argv[2]
|
||||||
|
node_type = sys.argv[3]
|
||||||
|
|
||||||
|
# Generate base /40 network
|
||||||
|
base_network = generate_ula_prefix(instance_name)
|
||||||
|
|
||||||
|
if node_type == "controller":
|
||||||
|
if len(sys.argv) < 5:
|
||||||
|
print("Controller name required")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
controller_name = sys.argv[4]
|
||||||
|
subnet = generate_controller_subnet(base_network, controller_name)
|
||||||
|
|
||||||
|
# Extract clean prefix from subnet (e.g. "fd51:19c1:3b:f700::/56" -> "fd51:19c1:3b:f700")
|
||||||
|
prefix_str = str(subnet).split("/")[0].rstrip(":")
|
||||||
|
while prefix_str.endswith(":"):
|
||||||
|
prefix_str = prefix_str.rstrip(":")
|
||||||
|
|
||||||
|
# Write file
|
||||||
|
(output_dir / "prefix").write_text(prefix_str)
|
||||||
|
|
||||||
|
elif node_type == "peer":
|
||||||
|
if len(sys.argv) < 5:
|
||||||
|
print("Peer name required")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
peer_name = sys.argv[4]
|
||||||
|
|
||||||
|
# Generate the peer's host suffix
|
||||||
|
suffix = generate_peer_suffix(peer_name)
|
||||||
|
|
||||||
|
# Write file
|
||||||
|
(output_dir / "suffix").write_text(suffix)
|
||||||
|
|
||||||
|
else:
|
||||||
|
print(f"Unknown node type: {node_type}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
6
devFlake/flake.lock
generated
6
devFlake/flake.lock
generated
@@ -3,10 +3,10 @@
|
|||||||
"clan-core-for-checks": {
|
"clan-core-for-checks": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1754973208,
|
"lastModified": 1755093452,
|
||||||
"narHash": "sha256-K/abuL/G6TtwV6Oo/C5EloDfRd2lAbPhCxQ/KnIDI9k=",
|
"narHash": "sha256-NKBss7QtNnOqYVyJmYCgaCvYZK0mpQTQc9fLgE1mGyk=",
|
||||||
"ref": "main",
|
"ref": "main",
|
||||||
"rev": "caae6c7a559d918de06636febc317e6c0a59e0cb",
|
"rev": "7e97734797f0c6bd3c2d3a51cf54a2a6b371c222",
|
||||||
"shallow": true,
|
"shallow": true,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://git.clan.lol/clan/clan-core"
|
"url": "https://git.clan.lol/clan/clan-core"
|
||||||
|
|||||||
@@ -92,7 +92,6 @@ nav:
|
|||||||
- Services:
|
- Services:
|
||||||
- Overview:
|
- Overview:
|
||||||
- reference/clanServices/index.md
|
- reference/clanServices/index.md
|
||||||
|
|
||||||
- reference/clanServices/admin.md
|
- reference/clanServices/admin.md
|
||||||
- reference/clanServices/borgbackup.md
|
- reference/clanServices/borgbackup.md
|
||||||
- reference/clanServices/data-mesher.md
|
- reference/clanServices/data-mesher.md
|
||||||
@@ -109,6 +108,7 @@ nav:
|
|||||||
- reference/clanServices/trusted-nix-caches.md
|
- reference/clanServices/trusted-nix-caches.md
|
||||||
- reference/clanServices/users.md
|
- reference/clanServices/users.md
|
||||||
- reference/clanServices/wifi.md
|
- reference/clanServices/wifi.md
|
||||||
|
- reference/clanServices/wireguard.md
|
||||||
- reference/clanServices/zerotier.md
|
- reference/clanServices/zerotier.md
|
||||||
- API: reference/clanServices/clan-service-author-interface.md
|
- API: reference/clanServices/clan-service-author-interface.md
|
||||||
|
|
||||||
|
|||||||
6
flake.lock
generated
6
flake.lock
generated
@@ -146,11 +146,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1754328224,
|
"lastModified": 1754988908,
|
||||||
"narHash": "sha256-glPK8DF329/dXtosV7YSzRlF4n35WDjaVwdOMEoEXHA=",
|
"narHash": "sha256-t+voe2961vCgrzPFtZxha0/kmFSHFobzF00sT8p9h0U=",
|
||||||
"owner": "Mic92",
|
"owner": "Mic92",
|
||||||
"repo": "sops-nix",
|
"repo": "sops-nix",
|
||||||
"rev": "49021900e69812ba7ddb9e40f9170218a7eca9f4",
|
"rev": "3223c7a92724b5d804e9988c6b447a0d09017d48",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
@@ -59,8 +59,6 @@
|
|||||||
"pkgs/clan-cli/clan_cli/tests/data/password-store/.gpg-id"
|
"pkgs/clan-cli/clan_cli/tests/data/password-store/.gpg-id"
|
||||||
"pkgs/clan-cli/clan_cli/tests/data/ssh_host_ed25519_key"
|
"pkgs/clan-cli/clan_cli/tests/data/ssh_host_ed25519_key"
|
||||||
"pkgs/clan-cli/clan_cli/tests/data/sshd_config"
|
"pkgs/clan-cli/clan_cli/tests/data/sshd_config"
|
||||||
"pkgs/clan-vm-manager/.vscode/lhebendanz.weaudit"
|
|
||||||
"pkgs/clan-vm-manager/bin/clan-vm-manager"
|
|
||||||
"clanServices/hello-world/default.nix"
|
"clanServices/hello-world/default.nix"
|
||||||
"sops/secrets/test-backup-age.key/secret"
|
"sops/secrets/test-backup-age.key/secret"
|
||||||
];
|
];
|
||||||
@@ -115,21 +113,7 @@
|
|||||||
];
|
];
|
||||||
extraPythonPaths = [ "../clan-cli" ];
|
extraPythonPaths = [ "../clan-cli" ];
|
||||||
};
|
};
|
||||||
}
|
};
|
||||||
// (
|
|
||||||
if pkgs.stdenv.isLinux then
|
|
||||||
{
|
|
||||||
"clan-vm-manager" = {
|
|
||||||
directory = "pkgs/clan-vm-manager";
|
|
||||||
extraPythonPackages = self'.packages.clan-vm-manager.externalTestDeps ++ [
|
|
||||||
(pkgs.python3.withPackages (ps: self'.packages.clan-cli.devshellPyDeps ps))
|
|
||||||
];
|
|
||||||
extraPythonPaths = [ "../clan-cli" ];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{ }
|
|
||||||
);
|
|
||||||
treefmt.programs.ruff.check = true;
|
treefmt.programs.ruff.check = true;
|
||||||
treefmt.programs.ruff.format = true;
|
treefmt.programs.ruff.format = true;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ rec {
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
}" \
|
}" \
|
||||||
${pkgs.runtimeShell} ${genInfo.finalScript}
|
${pkgs.runtimeShell} -x "${genInfo.finalScript}"
|
||||||
|
|
||||||
# Verify expected outputs were created
|
# Verify expected outputs were created
|
||||||
${lib.concatStringsSep "\n" (
|
${lib.concatStringsSep "\n" (
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
div.alert {
|
div.alert {
|
||||||
@apply flex gap-2.5 px-6 py-4 size-full rounded-md items-start;
|
@apply flex flex-row gap-2.5 p-4 rounded-md items-start;
|
||||||
|
|
||||||
&.has-icon {
|
&.has-icon {
|
||||||
@apply pl-4;
|
@apply pl-3;
|
||||||
|
|
||||||
svg.icon {
|
svg.icon {
|
||||||
@apply relative top-0.5;
|
@apply relative top-0.5;
|
||||||
@@ -10,11 +10,15 @@ div.alert {
|
|||||||
}
|
}
|
||||||
|
|
||||||
&.has-dismiss {
|
&.has-dismiss {
|
||||||
@apply pr-4;
|
@apply pr-3;
|
||||||
|
}
|
||||||
|
|
||||||
|
& > button.dismiss-trigger {
|
||||||
|
@apply relative top-0.5;
|
||||||
}
|
}
|
||||||
|
|
||||||
& > div.content {
|
& > div.content {
|
||||||
@apply flex flex-col gap-2 size-full;
|
@apply flex flex-col size-full gap-1;
|
||||||
}
|
}
|
||||||
|
|
||||||
&.info {
|
&.info {
|
||||||
@@ -33,7 +37,7 @@ div.alert {
|
|||||||
@apply bg-semantic-success-1 border border-semantic-success-3 fg-semantic-success-3;
|
@apply bg-semantic-success-1 border border-semantic-success-3 fg-semantic-success-3;
|
||||||
}
|
}
|
||||||
|
|
||||||
& > button.dismiss-trigger {
|
&.transparent {
|
||||||
@apply relative top-0.5;
|
@apply bg-transparent border-none p-0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,16 +3,26 @@ import { Alert, AlertProps } from "@/src/components/Alert/Alert";
|
|||||||
import { expect, fn } from "storybook/test";
|
import { expect, fn } from "storybook/test";
|
||||||
import { StoryContext } from "@kachurun/storybook-solid-vite";
|
import { StoryContext } from "@kachurun/storybook-solid-vite";
|
||||||
|
|
||||||
|
const AlertExamples = (props: AlertProps) => (
|
||||||
|
<div class="grid w-fit grid-cols-2 gap-8">
|
||||||
|
<div class="w-72">
|
||||||
|
<Alert {...props} />
|
||||||
|
</div>
|
||||||
|
<div class="w-72">
|
||||||
|
<Alert {...props} size="s" />
|
||||||
|
</div>
|
||||||
|
<div class="w-72">
|
||||||
|
<Alert {...props} transparent />
|
||||||
|
</div>
|
||||||
|
<div class="w-72">
|
||||||
|
<Alert {...props} size="s" transparent />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
|
||||||
const meta: Meta<AlertProps> = {
|
const meta: Meta<AlertProps> = {
|
||||||
title: "Components/Alert",
|
title: "Components/Alert",
|
||||||
component: Alert,
|
component: AlertExamples,
|
||||||
decorators: [
|
|
||||||
(Story: StoryObj) => (
|
|
||||||
<div class="w-72">
|
|
||||||
<Story />
|
|
||||||
</div>
|
|
||||||
),
|
|
||||||
],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export default meta;
|
export default meta;
|
||||||
@@ -23,6 +33,7 @@ export const Info: Story = {
|
|||||||
args: {
|
args: {
|
||||||
type: "info",
|
type: "info",
|
||||||
title: "Headline",
|
title: "Headline",
|
||||||
|
onDismiss: undefined,
|
||||||
description:
|
description:
|
||||||
"Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua.",
|
"Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua.",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -7,40 +7,63 @@ import { Alert as KAlert } from "@kobalte/core/alert";
|
|||||||
import { Show } from "solid-js";
|
import { Show } from "solid-js";
|
||||||
|
|
||||||
export interface AlertProps {
|
export interface AlertProps {
|
||||||
type: "success" | "error" | "warning" | "info";
|
|
||||||
title: string;
|
|
||||||
description?: string;
|
|
||||||
icon?: IconVariant;
|
icon?: IconVariant;
|
||||||
|
type: "success" | "error" | "warning" | "info";
|
||||||
|
size?: "default" | "s";
|
||||||
|
title: string;
|
||||||
onDismiss?: () => void;
|
onDismiss?: () => void;
|
||||||
|
transparent?: boolean;
|
||||||
|
description?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export const Alert = (props: AlertProps) => (
|
export const Alert = (props: AlertProps) => {
|
||||||
<KAlert
|
const size = () => props.size || "default";
|
||||||
class={cx("alert", props.type, {
|
const titleSize = () => (size() == "default" ? "default" : "xs");
|
||||||
"has-icon": props.icon,
|
const bodySize = () => (size() == "default" ? "xs" : "xxs");
|
||||||
"has-dismiss": props.onDismiss,
|
const iconSize = () => (size() == "default" ? "1rem" : "0.75rem");
|
||||||
})}
|
|
||||||
>
|
return (
|
||||||
{props.icon && <Icon icon={props.icon} color="inherit" size="1rem" />}
|
<KAlert
|
||||||
<div class="content">
|
class={cx("alert", props.type, {
|
||||||
<Typography hierarchy="body" size="default" weight="bold" color="inherit">
|
"has-icon": props.icon,
|
||||||
{props.title}
|
"has-dismiss": props.onDismiss,
|
||||||
</Typography>
|
transparent: props.transparent,
|
||||||
<Show when={props.description}>
|
})}
|
||||||
<Typography hierarchy="body" size="xs" color="inherit">
|
>
|
||||||
{props.description}
|
{props.icon && (
|
||||||
|
<Icon icon={props.icon} color="inherit" size={iconSize()} />
|
||||||
|
)}
|
||||||
|
<div class="content">
|
||||||
|
<Typography
|
||||||
|
hierarchy="body"
|
||||||
|
family="condensed"
|
||||||
|
size={titleSize()}
|
||||||
|
weight="bold"
|
||||||
|
color="inherit"
|
||||||
|
>
|
||||||
|
{props.title}
|
||||||
</Typography>
|
</Typography>
|
||||||
</Show>
|
<Show when={props.description}>
|
||||||
</div>
|
<Typography
|
||||||
{props.onDismiss && (
|
hierarchy="body"
|
||||||
<Button
|
family="condensed"
|
||||||
name="dismiss-alert"
|
size={bodySize()}
|
||||||
class="dismiss-trigger"
|
color="inherit"
|
||||||
onClick={props.onDismiss}
|
>
|
||||||
aria-label={`Dismiss ${props.type} alert`}
|
{props.description}
|
||||||
>
|
</Typography>
|
||||||
<Icon icon="Close" color="primary" size="0.75rem" />
|
</Show>
|
||||||
</Button>
|
</div>
|
||||||
)}
|
{props.onDismiss && (
|
||||||
</KAlert>
|
<Button
|
||||||
);
|
name="dismiss-alert"
|
||||||
|
class="dismiss-trigger"
|
||||||
|
onClick={props.onDismiss}
|
||||||
|
aria-label={`Dismiss ${props.type} alert`}
|
||||||
|
>
|
||||||
|
<Icon icon="Close" color="primary" size="0.75rem" />
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</KAlert>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ export interface ButtonProps
|
|||||||
startIcon?: IconVariant;
|
startIcon?: IconVariant;
|
||||||
endIcon?: IconVariant;
|
endIcon?: IconVariant;
|
||||||
class?: string;
|
class?: string;
|
||||||
onAction?: Action;
|
loading?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
const iconSizes: Record<Size, string> = {
|
const iconSizes: Record<Size, string> = {
|
||||||
@@ -40,31 +40,12 @@ export const Button = (props: ButtonProps) => {
|
|||||||
"startIcon",
|
"startIcon",
|
||||||
"endIcon",
|
"endIcon",
|
||||||
"class",
|
"class",
|
||||||
"onAction",
|
"loading",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
const size = local.size || "default";
|
const size = local.size || "default";
|
||||||
const hierarchy = local.hierarchy || "primary";
|
const hierarchy = local.hierarchy || "primary";
|
||||||
|
|
||||||
const [loading, setLoading] = createSignal(false);
|
|
||||||
|
|
||||||
const onClick = async () => {
|
|
||||||
if (!local.onAction) {
|
|
||||||
console.error("this should not be possible");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
setLoading(true);
|
|
||||||
|
|
||||||
try {
|
|
||||||
await local.onAction();
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Error while executing action", error);
|
|
||||||
}
|
|
||||||
|
|
||||||
setLoading(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
const iconSize = iconSizes[local.size || "default"];
|
const iconSize = iconSizes[local.size || "default"];
|
||||||
|
|
||||||
const loadingClass =
|
const loadingClass =
|
||||||
@@ -81,16 +62,19 @@ export const Button = (props: ButtonProps) => {
|
|||||||
hierarchy,
|
hierarchy,
|
||||||
{
|
{
|
||||||
icon: local.icon,
|
icon: local.icon,
|
||||||
loading: loading(),
|
loading: props.loading,
|
||||||
ghost: local.ghost,
|
ghost: local.ghost,
|
||||||
},
|
},
|
||||||
)}
|
)}
|
||||||
onClick={local.onAction ? onClick : undefined}
|
onClick={props.onClick}
|
||||||
{...other}
|
{...other}
|
||||||
>
|
>
|
||||||
<Loader
|
<Loader
|
||||||
hierarchy={hierarchy}
|
hierarchy={hierarchy}
|
||||||
class={cx({ [idleClass]: !loading(), [loadingClass]: loading() })}
|
class={cx({
|
||||||
|
[idleClass]: !props.loading,
|
||||||
|
[loadingClass]: props.loading,
|
||||||
|
})}
|
||||||
/>
|
/>
|
||||||
|
|
||||||
{local.startIcon && (
|
{local.startIcon && (
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
hr {
|
hr {
|
||||||
@apply border-none outline-none bg-inv-2 self-stretch;
|
@apply border-none outline-none bg-def-3 self-stretch;
|
||||||
|
|
||||||
&.inverted {
|
&.inverted {
|
||||||
@apply bg-def-3;
|
@apply bg-inv-2;
|
||||||
}
|
}
|
||||||
|
|
||||||
&[data-orientation="horizontal"] {
|
&[data-orientation="horizontal"] {
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import { Label } from "@/src/components/Form/Label";
|
|||||||
import { Orienter } from "@/src/components/Form/Orienter";
|
import { Orienter } from "@/src/components/Form/Orienter";
|
||||||
import { CollectionNode } from "@kobalte/core";
|
import { CollectionNode } from "@kobalte/core";
|
||||||
|
|
||||||
interface MachineTag {
|
export interface MachineTag {
|
||||||
value: string;
|
value: string;
|
||||||
disabled?: boolean;
|
disabled?: boolean;
|
||||||
new?: boolean;
|
new?: boolean;
|
||||||
@@ -24,11 +24,10 @@ export type MachineTagsProps = FieldProps & {
|
|||||||
disabled?: boolean;
|
disabled?: boolean;
|
||||||
required?: boolean;
|
required?: boolean;
|
||||||
defaultValue?: string[];
|
defaultValue?: string[];
|
||||||
|
defaultOptions?: string[];
|
||||||
|
readonlyOptions?: string[];
|
||||||
};
|
};
|
||||||
|
|
||||||
// tags which are applied to all machines and cannot be removed
|
|
||||||
const staticOptions = [{ value: "all", disabled: true }];
|
|
||||||
|
|
||||||
const uniqueOptions = (options: MachineTag[]) => {
|
const uniqueOptions = (options: MachineTag[]) => {
|
||||||
const record: Record<string, MachineTag> = {};
|
const record: Record<string, MachineTag> = {};
|
||||||
options.forEach((option) => {
|
options.forEach((option) => {
|
||||||
@@ -39,13 +38,8 @@ const uniqueOptions = (options: MachineTag[]) => {
|
|||||||
return Object.values(record);
|
return Object.values(record);
|
||||||
};
|
};
|
||||||
|
|
||||||
const sortedOptions = (options: MachineTag[]) => {
|
const sortedOptions = (options: MachineTag[]) =>
|
||||||
return options.sort((a, b) => {
|
options.sort((a, b) => a.value.localeCompare(b.value));
|
||||||
if (a.new && !b.new) return -1;
|
|
||||||
if (a.disabled && !b.disabled) return -1;
|
|
||||||
return a.value.localeCompare(b.value);
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
const sortedAndUniqueOptions = (options: MachineTag[]) =>
|
const sortedAndUniqueOptions = (options: MachineTag[]) =>
|
||||||
sortedOptions(uniqueOptions(options));
|
sortedOptions(uniqueOptions(options));
|
||||||
@@ -72,9 +66,15 @@ export const MachineTags = (props: MachineTagsProps) => {
|
|||||||
(props.defaultValue || []).map((value) => ({ value })),
|
(props.defaultValue || []).map((value) => ({ value })),
|
||||||
);
|
);
|
||||||
|
|
||||||
// todo this should be the superset of tags used across the entire clan and be passed in via a prop
|
// convert default options string[] into MachineTag[]
|
||||||
const [availableOptions, setAvailableOptions] = createSignal<MachineTag[]>(
|
const [availableOptions, setAvailableOptions] = createSignal<MachineTag[]>(
|
||||||
sortedAndUniqueOptions([...staticOptions, ...defaultValue]),
|
sortedAndUniqueOptions([
|
||||||
|
...(props.readonlyOptions || []).map((value) => ({
|
||||||
|
value,
|
||||||
|
disabled: true,
|
||||||
|
})),
|
||||||
|
...(props.defaultOptions || []).map((value) => ({ value })),
|
||||||
|
]),
|
||||||
);
|
);
|
||||||
|
|
||||||
const onKeyDown = (event: KeyboardEvent) => {
|
const onKeyDown = (event: KeyboardEvent) => {
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
import { FieldSchema } from "@/src/hooks/queries";
|
import { SuccessData } from "@/src/hooks/api";
|
||||||
import { Maybe } from "@modular-forms/solid";
|
import { Maybe } from "@modular-forms/solid";
|
||||||
|
|
||||||
export const tooltipText = <T extends object, K extends keyof T>(
|
export const tooltipText = (
|
||||||
name: K,
|
name: string,
|
||||||
schema: FieldSchema<T>,
|
schema: SuccessData<"get_machine_fields_schema">,
|
||||||
staticValue: Maybe<string> = undefined,
|
staticValue: Maybe<string> = undefined,
|
||||||
): Maybe<string> => {
|
): Maybe<string> => {
|
||||||
const entry = schema[name];
|
const entry = schema[name];
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
&[data-expanded] {
|
&[data-expanded] {
|
||||||
@apply outline-def-2 outline-1 outline;
|
@apply outline-def-2 outline-1 outline;
|
||||||
z-index: var(--z-index + 5);
|
z-index: calc(var(--z-index) + 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
&[data-highlighted] {
|
&[data-highlighted] {
|
||||||
|
|||||||
@@ -146,6 +146,7 @@ export const Select = (props: SelectProps) => {
|
|||||||
<KSelect.HiddenSelect {...selectProps} />
|
<KSelect.HiddenSelect {...selectProps} />
|
||||||
<KSelect.Trigger
|
<KSelect.Trigger
|
||||||
class={cx(styles.trigger)}
|
class={cx(styles.trigger)}
|
||||||
|
style={{ "--z-index": zIndex() }}
|
||||||
data-loading={loading() || undefined}
|
data-loading={loading() || undefined}
|
||||||
>
|
>
|
||||||
<KSelect.Value<Option>>
|
<KSelect.Value<Option>>
|
||||||
|
|||||||
@@ -24,15 +24,16 @@ const TypographyExamples: Component<TypographyExamplesProps> = (props) => (
|
|||||||
<For each={props.sizes}>
|
<For each={props.sizes}>
|
||||||
{(size) => (
|
{(size) => (
|
||||||
<tr
|
<tr
|
||||||
class="border-b border-def-3 even:bg-def-2"
|
class="border-b fg-semantic-info-1 border-def-3 even:bg-def-2"
|
||||||
classList={{
|
classList={{
|
||||||
"border-inv-3 even:bg-inv-2": props.inverted,
|
"border-inv-3 even:bg-inv-2": props.inverted,
|
||||||
"border-def-3 even:bg-def-2": !props.inverted,
|
"border-def-3 even:bg-def-2": !props.inverted,
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
<For each={props.weights}>
|
<For each={props.weights}>
|
||||||
|
{/* we set a foreground color to test color=inherit */}
|
||||||
{(weight) => (
|
{(weight) => (
|
||||||
<td class="px-6 py-2 ">
|
<td class="px-6 py-2">
|
||||||
<Show when={!props.colors}>
|
<Show when={!props.colors}>
|
||||||
<Typography
|
<Typography
|
||||||
hierarchy={props.hierarchy}
|
hierarchy={props.hierarchy}
|
||||||
|
|||||||
@@ -6,20 +6,15 @@ import { useApiClient } from "./ApiClient";
|
|||||||
export type ClanDetails = SuccessData<"get_clan_details">;
|
export type ClanDetails = SuccessData<"get_clan_details">;
|
||||||
export type ClanDetailsWithURI = ClanDetails & { uri: string };
|
export type ClanDetailsWithURI = ClanDetails & { uri: string };
|
||||||
|
|
||||||
export type FieldSchema<T> = {
|
export type Tags = SuccessData<"list_tags">;
|
||||||
[K in keyof T]: {
|
|
||||||
readonly: boolean;
|
|
||||||
reason?: string;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
export type Machine = SuccessData<"get_machine">;
|
export type Machine = SuccessData<"get_machine">;
|
||||||
export type ListMachines = SuccessData<"list_machines">;
|
export type ListMachines = SuccessData<"list_machines">;
|
||||||
export type MachineDetails = SuccessData<"get_machine_details">;
|
export type MachineDetails = SuccessData<"get_machine_details">;
|
||||||
|
|
||||||
export interface MachineDetail {
|
export interface MachineDetail {
|
||||||
|
tags: Tags;
|
||||||
machine: Machine;
|
machine: Machine;
|
||||||
fieldsSchema: FieldSchema<Machine>;
|
fieldsSchema: SuccessData<"get_machine_fields_schema">;
|
||||||
}
|
}
|
||||||
|
|
||||||
export type MachinesQueryResult = UseQueryResult<ListMachines>;
|
export type MachinesQueryResult = UseQueryResult<ListMachines>;
|
||||||
@@ -50,7 +45,12 @@ export const useMachineQuery = (clanURI: string, machineName: string) => {
|
|||||||
return useQuery<MachineDetail>(() => ({
|
return useQuery<MachineDetail>(() => ({
|
||||||
queryKey: ["clans", encodeBase64(clanURI), "machine", machineName],
|
queryKey: ["clans", encodeBase64(clanURI), "machine", machineName],
|
||||||
queryFn: async () => {
|
queryFn: async () => {
|
||||||
const [machineCall, schemaCall] = await Promise.all([
|
const [tagsCall, machineCall, schemaCall] = await Promise.all([
|
||||||
|
client.fetch("list_tags", {
|
||||||
|
flake: {
|
||||||
|
identifier: clanURI,
|
||||||
|
},
|
||||||
|
}),
|
||||||
client.fetch("get_machine", {
|
client.fetch("get_machine", {
|
||||||
name: machineName,
|
name: machineName,
|
||||||
flake: {
|
flake: {
|
||||||
@@ -67,6 +67,11 @@ export const useMachineQuery = (clanURI: string, machineName: string) => {
|
|||||||
}),
|
}),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
|
const tags = await tagsCall.result;
|
||||||
|
if (tags.status === "error") {
|
||||||
|
throw new Error("Error fetching tags: " + tags.errors[0].message);
|
||||||
|
}
|
||||||
|
|
||||||
const machine = await machineCall.result;
|
const machine = await machineCall.result;
|
||||||
if (machine.status === "error") {
|
if (machine.status === "error") {
|
||||||
throw new Error("Error fetching machine: " + machine.errors[0].message);
|
throw new Error("Error fetching machine: " + machine.errors[0].message);
|
||||||
@@ -81,6 +86,7 @@ export const useMachineQuery = (clanURI: string, machineName: string) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
tags: tags.data,
|
||||||
machine: machine.data,
|
machine: machine.data,
|
||||||
fieldsSchema: writeSchema.data,
|
fieldsSchema: writeSchema.data,
|
||||||
};
|
};
|
||||||
@@ -312,8 +318,13 @@ export const useMachineGenerators = (
|
|||||||
],
|
],
|
||||||
queryFn: async () => {
|
queryFn: async () => {
|
||||||
const call = client.fetch("get_generators", {
|
const call = client.fetch("get_generators", {
|
||||||
base_dir: clanUri,
|
machine: {
|
||||||
machine_name: machineName,
|
name: machineName,
|
||||||
|
flake: {
|
||||||
|
identifier: clanUri,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
full_closure: true, // TODO: Make this configurable
|
||||||
// TODO: Make this configurable
|
// TODO: Make this configurable
|
||||||
include_previous_values: true,
|
include_previous_values: true,
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -30,6 +30,26 @@ export const SectionTags = (props: SectionTags) => {
|
|||||||
return pick(machineQuery.data.machine, ["tags"]) satisfies FormValues;
|
return pick(machineQuery.data.machine, ["tags"]) satisfies FormValues;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const options = () => {
|
||||||
|
if (!machineQuery.isSuccess) {
|
||||||
|
return [[], []];
|
||||||
|
}
|
||||||
|
|
||||||
|
// these are static values or values which have been configured in nix and
|
||||||
|
// cannot be modified in the UI
|
||||||
|
const readonlyOptions =
|
||||||
|
machineQuery.data.fieldsSchema.tags?.readonly_members || [];
|
||||||
|
|
||||||
|
// filter out the read-only options from the superset of clan-wide options
|
||||||
|
const readonlySet = new Set(readonlyOptions);
|
||||||
|
|
||||||
|
const defaultOptions = (machineQuery.data.tags.options || []).filter(
|
||||||
|
(tag) => !readonlySet.has(tag),
|
||||||
|
);
|
||||||
|
|
||||||
|
return [defaultOptions, readonlyOptions];
|
||||||
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Show when={machineQuery.isSuccess}>
|
<Show when={machineQuery.isSuccess}>
|
||||||
<SidebarSectionForm
|
<SidebarSectionForm
|
||||||
@@ -50,6 +70,8 @@ export const SectionTags = (props: SectionTags) => {
|
|||||||
readOnly={!editing}
|
readOnly={!editing}
|
||||||
orientation="horizontal"
|
orientation="horizontal"
|
||||||
defaultValue={field.value}
|
defaultValue={field.value}
|
||||||
|
defaultOptions={options()[0]}
|
||||||
|
readonlyOptions={options()[1]}
|
||||||
input={input}
|
input={input}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -99,8 +99,12 @@ const welcome = (props: {
|
|||||||
}) => {
|
}) => {
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
|
|
||||||
|
const [loading, setLoading] = createSignal(false);
|
||||||
|
|
||||||
const selectFolder = async () => {
|
const selectFolder = async () => {
|
||||||
|
setLoading(true);
|
||||||
const uri = await selectClanFolder();
|
const uri = await selectClanFolder();
|
||||||
|
setLoading(false);
|
||||||
navigateToClan(navigate, uri);
|
navigateToClan(navigate, uri);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -136,7 +140,7 @@ const welcome = (props: {
|
|||||||
Start building
|
Start building
|
||||||
</Button>
|
</Button>
|
||||||
<div class="separator">
|
<div class="separator">
|
||||||
<Divider orientation="horizontal" inverted={true} />
|
<Divider orientation="horizontal" />
|
||||||
<Typography
|
<Typography
|
||||||
hierarchy="body"
|
hierarchy="body"
|
||||||
size="s"
|
size="s"
|
||||||
@@ -146,9 +150,14 @@ const welcome = (props: {
|
|||||||
>
|
>
|
||||||
or
|
or
|
||||||
</Typography>
|
</Typography>
|
||||||
<Divider orientation="horizontal" inverted={true} />
|
<Divider orientation="horizontal" />
|
||||||
</div>
|
</div>
|
||||||
<Button hierarchy="primary" ghost={true} onAction={selectFolder}>
|
<Button
|
||||||
|
hierarchy="primary"
|
||||||
|
ghost={true}
|
||||||
|
loading={loading()}
|
||||||
|
onClick={selectFolder}
|
||||||
|
>
|
||||||
Select folder
|
Select folder
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
@@ -251,6 +260,10 @@ export const Onboarding: Component<RouteSectionProps> = (props) => {
|
|||||||
},
|
},
|
||||||
}).result;
|
}).result;
|
||||||
|
|
||||||
|
await client.fetch("create_secrets_user", {
|
||||||
|
flake_dir: path,
|
||||||
|
}).result;
|
||||||
|
|
||||||
if (resp.status === "error") {
|
if (resp.status === "error") {
|
||||||
setWelcomeError(resp.errors[0].message);
|
setWelcomeError(resp.errors[0].message);
|
||||||
setState("welcome");
|
setState("welcome");
|
||||||
@@ -318,7 +331,7 @@ export const Onboarding: Component<RouteSectionProps> = (props) => {
|
|||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
</Field>
|
</Field>
|
||||||
<Divider inverted={true} />
|
<Divider />
|
||||||
<Field name="description">
|
<Field name="description">
|
||||||
{(field, input) => (
|
{(field, input) => (
|
||||||
<TextArea
|
<TextArea
|
||||||
|
|||||||
@@ -18,29 +18,64 @@ type ResultDataMap = {
|
|||||||
[K in OperationNames]: SuccessQuery<K>["data"];
|
[K in OperationNames]: SuccessQuery<K>["data"];
|
||||||
};
|
};
|
||||||
|
|
||||||
export const mockFetcher: Fetcher = <K extends OperationNames>(
|
const mockFetcher: Fetcher = <K extends OperationNames>(
|
||||||
name: K,
|
name: K,
|
||||||
_args: unknown,
|
_args: unknown,
|
||||||
): ApiCall<K> => {
|
): ApiCall<K> => {
|
||||||
// TODO: Make this configurable for every story
|
// TODO: Make this configurable for every story
|
||||||
const resultData: Partial<ResultDataMap> = {
|
const resultData: Partial<ResultDataMap> = {
|
||||||
get_machine_flash_options: {
|
|
||||||
keymaps: ["DE_de", "US_en"],
|
|
||||||
languages: ["en", "de"],
|
|
||||||
},
|
|
||||||
get_system_file: ["id_rsa.pub"],
|
get_system_file: ["id_rsa.pub"],
|
||||||
list_system_storage_devices: {
|
list_system_storage_devices: {
|
||||||
blockdevices: [
|
blockdevices: [
|
||||||
{
|
{
|
||||||
name: "sda_bla_bla",
|
name: "sda_bla_bla",
|
||||||
path: "/dev/sda",
|
path: "/dev/sda",
|
||||||
|
id_link: "sda_bla_bla",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "sdb_foo_foo",
|
name: "sdb_foo_foo",
|
||||||
path: "/dev/sdb",
|
path: "/dev/sdb",
|
||||||
|
id_link: "sdb_foo_foo",
|
||||||
},
|
},
|
||||||
] as SuccessQuery<"list_system_storage_devices">["data"]["blockdevices"],
|
] as SuccessQuery<"list_system_storage_devices">["data"]["blockdevices"],
|
||||||
},
|
},
|
||||||
|
get_machine_disk_schemas: {
|
||||||
|
"single-disk": {
|
||||||
|
readme: "This is a single disk installation schema",
|
||||||
|
frontmatter: {
|
||||||
|
description: "Single disk installation schema",
|
||||||
|
},
|
||||||
|
name: "single-disk",
|
||||||
|
placeholders: {
|
||||||
|
mainDisk: {
|
||||||
|
label: "Main Disk",
|
||||||
|
required: true,
|
||||||
|
options: ["disk1", "usb1"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
get_generators: [
|
||||||
|
{
|
||||||
|
name: "funny.gritty",
|
||||||
|
prompts: [
|
||||||
|
{
|
||||||
|
name: "gritty.name",
|
||||||
|
description: "Name of the gritty",
|
||||||
|
prompt_type: "line",
|
||||||
|
display: {
|
||||||
|
label: "Gritty Name",
|
||||||
|
group: "User",
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
run_generators: null,
|
||||||
|
get_machine_hardware_summary: {
|
||||||
|
hardware_config: "nixos-facter",
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import { StepLayout } from "../../Steps";
|
|||||||
const ChoiceLocalOrRemote = () => {
|
const ChoiceLocalOrRemote = () => {
|
||||||
const stepSignal = useStepper<InstallSteps>();
|
const stepSignal = useStepper<InstallSteps>();
|
||||||
return (
|
return (
|
||||||
<div class="flex flex-col gap-3">
|
<div class="flex flex-col gap-3 size-full">
|
||||||
<div class="flex flex-col gap-6 rounded-md px-4 py-6 text-fg-def-1 bg-def-2">
|
<div class="flex flex-col gap-6 rounded-md px-4 py-6 text-fg-def-1 bg-def-2">
|
||||||
<div class="flex justify-between gap-2">
|
<div class="flex justify-between gap-2">
|
||||||
<div class="flex flex-col justify-center gap-1 px-1">
|
<div class="flex flex-col justify-center gap-1 px-1">
|
||||||
|
|||||||
@@ -16,10 +16,7 @@ import { Alert } from "@/src/components/Alert/Alert";
|
|||||||
import { LoadingBar } from "@/src/components/LoadingBar/LoadingBar";
|
import { LoadingBar } from "@/src/components/LoadingBar/LoadingBar";
|
||||||
import { Button } from "@/src/components/Button/Button";
|
import { Button } from "@/src/components/Button/Button";
|
||||||
import Icon from "@/src/components/Icon/Icon";
|
import Icon from "@/src/components/Icon/Icon";
|
||||||
import {
|
import { useSystemStorageOptions } from "@/src/hooks/queries";
|
||||||
useMachineFlashOptions,
|
|
||||||
useSystemStorageOptions,
|
|
||||||
} from "@/src/hooks/queries";
|
|
||||||
import { useApiClient } from "@/src/hooks/ApiClient";
|
import { useApiClient } from "@/src/hooks/ApiClient";
|
||||||
import { onMount } from "solid-js";
|
import { onMount } from "solid-js";
|
||||||
|
|
||||||
@@ -144,8 +141,6 @@ const ConfigureImage = () => {
|
|||||||
throw new Error("No data returned from api call");
|
throw new Error("No data returned from api call");
|
||||||
};
|
};
|
||||||
|
|
||||||
const optionsQuery = useMachineFlashOptions();
|
|
||||||
|
|
||||||
let content: Node;
|
let content: Node;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
@@ -309,15 +304,17 @@ const FlashProgress = () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const handleCancel = async () => {
|
const handleCancel = async () => {
|
||||||
const progress = store.flash.progress;
|
if (store.flash) {
|
||||||
if (progress) {
|
const progress = store.flash.progress;
|
||||||
await progress.cancel();
|
if (progress) {
|
||||||
|
await progress.cancel();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
stepSignal.previous();
|
stepSignal.previous();
|
||||||
};
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div class="flex size-full h-60 flex-col items-center justify-end bg-inv-4">
|
<div class="flex size-full flex-col items-center justify-center bg-inv-4">
|
||||||
<div class="mb-6 flex w-full max-w-md flex-col items-center gap-3 fg-inv-1">
|
<div class="mb-6 flex w-full max-w-md flex-col items-center gap-3 fg-inv-1">
|
||||||
<Typography
|
<Typography
|
||||||
hierarchy="title"
|
hierarchy="title"
|
||||||
@@ -344,7 +341,7 @@ const FlashDone = () => {
|
|||||||
const stepSignal = useStepper<InstallSteps>();
|
const stepSignal = useStepper<InstallSteps>();
|
||||||
return (
|
return (
|
||||||
<div class="flex size-full flex-col items-center justify-between bg-inv-4">
|
<div class="flex size-full flex-col items-center justify-between bg-inv-4">
|
||||||
<div class="flex w-full max-w-md flex-col items-center gap-3 py-6 fg-inv-1">
|
<div class="flex size-full max-w-md flex-col items-center justify-center gap-3 py-6 fg-inv-1">
|
||||||
<div class="rounded-full bg-semantic-success-4">
|
<div class="rounded-full bg-semantic-success-4">
|
||||||
<Icon icon="Checkmark" class="size-9" />
|
<Icon icon="Checkmark" class="size-9" />
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import {
|
|||||||
createForm,
|
createForm,
|
||||||
FieldValues,
|
FieldValues,
|
||||||
getError,
|
getError,
|
||||||
|
getValue,
|
||||||
SubmitHandler,
|
SubmitHandler,
|
||||||
valiForm,
|
valiForm,
|
||||||
} from "@modular-forms/solid";
|
} from "@modular-forms/solid";
|
||||||
@@ -13,7 +14,7 @@ import { getStepStore, useStepper } from "@/src/hooks/stepper";
|
|||||||
import { InstallSteps, InstallStoreType, PromptValues } from "../install";
|
import { InstallSteps, InstallStoreType, PromptValues } from "../install";
|
||||||
import { TextInput } from "@/src/components/Form/TextInput";
|
import { TextInput } from "@/src/components/Form/TextInput";
|
||||||
import { Alert } from "@/src/components/Alert/Alert";
|
import { Alert } from "@/src/components/Alert/Alert";
|
||||||
import { For, Match, Show, Switch } from "solid-js";
|
import { createSignal, For, Match, Show, Switch } from "solid-js";
|
||||||
import { Divider } from "@/src/components/Divider/Divider";
|
import { Divider } from "@/src/components/Divider/Divider";
|
||||||
import { Orienter } from "@/src/components/Form/Orienter";
|
import { Orienter } from "@/src/components/Form/Orienter";
|
||||||
import { Button } from "@/src/components/Button/Button";
|
import { Button } from "@/src/components/Button/Button";
|
||||||
@@ -29,6 +30,7 @@ import {
|
|||||||
import { useClanURI } from "@/src/hooks/clan";
|
import { useClanURI } from "@/src/hooks/clan";
|
||||||
import { useApiClient } from "@/src/hooks/ApiClient";
|
import { useApiClient } from "@/src/hooks/ApiClient";
|
||||||
import { ProcessMessage, useNotifyOrigin } from "@/src/hooks/notify";
|
import { ProcessMessage, useNotifyOrigin } from "@/src/hooks/notify";
|
||||||
|
import { Loader } from "@/src/components/Loader/Loader";
|
||||||
|
|
||||||
export const InstallHeader = (props: { machineName: string }) => {
|
export const InstallHeader = (props: { machineName: string }) => {
|
||||||
return (
|
return (
|
||||||
@@ -58,8 +60,9 @@ const ConfigureAddress = () => {
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const [isReachable, setIsReachable] = createSignal<string | null>(null);
|
||||||
|
|
||||||
const client = useApiClient();
|
const client = useApiClient();
|
||||||
const clanUri = useClanURI();
|
|
||||||
// TODO: push values to the parent form Store
|
// TODO: push values to the parent form Store
|
||||||
const handleSubmit: SubmitHandler<ConfigureAdressForm> = async (
|
const handleSubmit: SubmitHandler<ConfigureAdressForm> = async (
|
||||||
values,
|
values,
|
||||||
@@ -70,7 +73,24 @@ const ConfigureAddress = () => {
|
|||||||
|
|
||||||
// Here you would typically trigger the ISO creation process
|
// Here you would typically trigger the ISO creation process
|
||||||
stepSignal.next();
|
stepSignal.next();
|
||||||
console.log("Shit doesnt work", values);
|
};
|
||||||
|
|
||||||
|
const tryReachable = async () => {
|
||||||
|
const address = getValue(formStore, "targetHost");
|
||||||
|
if (!address) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const call = client.fetch("check_machine_ssh_login", {
|
||||||
|
remote: {
|
||||||
|
address,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
const result = await call.result;
|
||||||
|
console.log("SSH login check result:", result);
|
||||||
|
if (result.status === "success") {
|
||||||
|
setIsReachable(address);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
@@ -99,12 +119,28 @@ const ConfigureAddress = () => {
|
|||||||
)}
|
)}
|
||||||
</Field>
|
</Field>
|
||||||
</Fieldset>
|
</Fieldset>
|
||||||
|
<Button
|
||||||
|
disabled={!getValue(formStore, "targetHost")}
|
||||||
|
endIcon="ArrowRight"
|
||||||
|
onClick={tryReachable}
|
||||||
|
hierarchy="secondary"
|
||||||
|
>
|
||||||
|
Test Connection
|
||||||
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
}
|
}
|
||||||
footer={
|
footer={
|
||||||
<div class="flex justify-between">
|
<div class="flex justify-between">
|
||||||
<BackButton />
|
<BackButton />
|
||||||
<NextButton type="submit">Next</NextButton>
|
<NextButton
|
||||||
|
type="submit"
|
||||||
|
disabled={
|
||||||
|
!isReachable() ||
|
||||||
|
isReachable() !== getValue(formStore, "targetHost")
|
||||||
|
}
|
||||||
|
>
|
||||||
|
Next
|
||||||
|
</NextButton>
|
||||||
</div>
|
</div>
|
||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
@@ -158,15 +194,18 @@ const CheckHardware = () => {
|
|||||||
Hardware Report
|
Hardware Report
|
||||||
</Typography>
|
</Typography>
|
||||||
<Button
|
<Button
|
||||||
|
disabled={hardwareQuery.isLoading}
|
||||||
hierarchy="secondary"
|
hierarchy="secondary"
|
||||||
startIcon="Report"
|
startIcon="Report"
|
||||||
onClick={handleUpdateSummary}
|
onClick={handleUpdateSummary}
|
||||||
|
class="flex gap-3"
|
||||||
|
loading={hardwareQuery.isFetching}
|
||||||
>
|
>
|
||||||
Update hardware report
|
Update hardware report
|
||||||
</Button>
|
</Button>
|
||||||
</Orienter>
|
</Orienter>
|
||||||
<Divider orientation="horizontal" />
|
<Divider orientation="horizontal" />
|
||||||
<Show when={hardwareQuery.isLoading}>Loading...</Show>
|
|
||||||
<Show when={hardwareQuery.data}>
|
<Show when={hardwareQuery.data}>
|
||||||
{(d) => (
|
{(d) => (
|
||||||
<Alert
|
<Alert
|
||||||
@@ -318,6 +357,25 @@ interface PromptForm extends FieldValues {
|
|||||||
promptValues: PromptValues;
|
promptValues: PromptValues;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const sanitize = (name: string) => {
|
||||||
|
return name.replace(".", "__dot__");
|
||||||
|
};
|
||||||
|
const restore = (name: string) => {
|
||||||
|
return name.replace("__dot__", ".");
|
||||||
|
};
|
||||||
|
|
||||||
|
const transformPromptValues = (
|
||||||
|
values: PromptValues,
|
||||||
|
transform: (s: string) => string,
|
||||||
|
): PromptValues =>
|
||||||
|
Object.fromEntries(
|
||||||
|
Object.entries(values).map(([key, value]) => [
|
||||||
|
transform(key),
|
||||||
|
Object.fromEntries(
|
||||||
|
Object.entries(value).map(([k, v]) => [transform(k), v]),
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
);
|
||||||
interface PromptsFieldsProps {
|
interface PromptsFieldsProps {
|
||||||
generators: MachineGenerators;
|
generators: MachineGenerators;
|
||||||
}
|
}
|
||||||
@@ -338,8 +396,11 @@ const PromptsFields = (props: PromptsFieldsProps) => {
|
|||||||
if (!acc[groupName]) acc[groupName] = { fields: [], name: groupName };
|
if (!acc[groupName]) acc[groupName] = { fields: [], name: groupName };
|
||||||
|
|
||||||
acc[groupName].fields.push({
|
acc[groupName].fields.push({
|
||||||
prompt,
|
prompt: {
|
||||||
generator: generator.name,
|
...prompt,
|
||||||
|
name: sanitize(prompt.name),
|
||||||
|
},
|
||||||
|
generator: sanitize(generator.name),
|
||||||
value: prompt.previous_value,
|
value: prompt.previous_value,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -351,16 +412,24 @@ const PromptsFields = (props: PromptsFieldsProps) => {
|
|||||||
|
|
||||||
const [formStore, { Form, Field }] = createForm<PromptForm>({
|
const [formStore, { Form, Field }] = createForm<PromptForm>({
|
||||||
initialValues: {
|
initialValues: {
|
||||||
promptValues: store.install?.promptValues || {},
|
promptValues: transformPromptValues(
|
||||||
|
store.install?.promptValues || {},
|
||||||
|
sanitize,
|
||||||
|
),
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(groups);
|
console.log(groups);
|
||||||
|
|
||||||
const handleSubmit: SubmitHandler<PromptForm> = (values, event) => {
|
const handleSubmit: SubmitHandler<PromptForm> = (values, event) => {
|
||||||
console.log("vars submitted", values);
|
const restoredValues: PromptValues = transformPromptValues(
|
||||||
|
values.promptValues,
|
||||||
|
restore,
|
||||||
|
);
|
||||||
|
|
||||||
set("install", (s) => ({ ...s, promptValues: values.promptValues }));
|
console.log("vars submitted", restoredValues);
|
||||||
|
|
||||||
|
set("install", (s) => ({ ...s, promptValues: restoredValues }));
|
||||||
console.log("vars preloaded");
|
console.log("vars preloaded");
|
||||||
// Here you would typically trigger the ISO creation process
|
// Here you would typically trigger the ISO creation process
|
||||||
stepSignal.next();
|
stepSignal.next();
|
||||||
@@ -479,8 +548,12 @@ const InstallSummary = () => {
|
|||||||
|
|
||||||
const runGenerators = client.fetch("run_generators", {
|
const runGenerators = client.fetch("run_generators", {
|
||||||
all_prompt_values: store.install.promptValues,
|
all_prompt_values: store.install.promptValues,
|
||||||
base_dir: clanUri,
|
machine: {
|
||||||
machine_name: store.install.machineName,
|
name: store.install.machineName,
|
||||||
|
flake: {
|
||||||
|
identifier: clanUri,
|
||||||
|
},
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
set("install", (s) => ({
|
set("install", (s) => ({
|
||||||
@@ -516,13 +589,16 @@ const InstallSummary = () => {
|
|||||||
<StepLayout
|
<StepLayout
|
||||||
body={
|
body={
|
||||||
<div class="flex flex-col gap-4">
|
<div class="flex flex-col gap-4">
|
||||||
<Fieldset legend="Address Configuration">
|
<Fieldset legend="Machine">
|
||||||
<Orienter orientation="horizontal">
|
<Orienter orientation="horizontal">
|
||||||
{/* TOOD: Display the values emited from previous steps */}
|
<Display label="Name" value={store.install.machineName} />
|
||||||
<Display label="Target" value="flash-installer.local" />
|
</Orienter>
|
||||||
|
<Divider orientation="horizontal" />
|
||||||
|
<Orienter orientation="horizontal">
|
||||||
|
<Display label="Address" value={store.install.targetHost} />
|
||||||
</Orienter>
|
</Orienter>
|
||||||
</Fieldset>
|
</Fieldset>
|
||||||
<Fieldset legend="Disk Configuration">
|
<Fieldset legend="Disk">
|
||||||
<Orienter orientation="horizontal">
|
<Orienter orientation="horizontal">
|
||||||
<Display label="Disk Schema" value="Single" />
|
<Display label="Disk Schema" value="Single" />
|
||||||
</Orienter>
|
</Orienter>
|
||||||
@@ -545,7 +621,14 @@ const InstallSummary = () => {
|
|||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
type InstallTopic = "generators" | "upload-secrets" | "nixos-anywhere";
|
type InstallTopic = [
|
||||||
|
"generators",
|
||||||
|
"upload-secrets",
|
||||||
|
"nixos-anywhere",
|
||||||
|
"formatting",
|
||||||
|
"rebooting",
|
||||||
|
"installing",
|
||||||
|
][number];
|
||||||
|
|
||||||
const InstallProgress = () => {
|
const InstallProgress = () => {
|
||||||
const stepSignal = useStepper<InstallSteps>();
|
const stepSignal = useStepper<InstallSteps>();
|
||||||
@@ -563,7 +646,7 @@ const InstallProgress = () => {
|
|||||||
);
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div class="flex size-full h-60 flex-col items-center justify-end bg-inv-4">
|
<div class="flex size-full flex-col items-center justify-center bg-inv-4">
|
||||||
<div class="mb-6 flex w-full max-w-md flex-col items-center gap-3 fg-inv-1">
|
<div class="mb-6 flex w-full max-w-md flex-col items-center gap-3 fg-inv-1">
|
||||||
<Typography
|
<Typography
|
||||||
hierarchy="title"
|
hierarchy="title"
|
||||||
@@ -599,6 +682,15 @@ const InstallProgress = () => {
|
|||||||
<Match when={installState()?.topic === "nixos-anywhere"}>
|
<Match when={installState()?.topic === "nixos-anywhere"}>
|
||||||
Running nixos-anywhere ...
|
Running nixos-anywhere ...
|
||||||
</Match>
|
</Match>
|
||||||
|
<Match when={installState()?.topic === "formatting"}>
|
||||||
|
Formatting ...
|
||||||
|
</Match>
|
||||||
|
<Match when={installState()?.topic === "installing"}>
|
||||||
|
Installing ...
|
||||||
|
</Match>
|
||||||
|
<Match when={installState()?.topic === "rebooting"}>
|
||||||
|
Rebooting ...
|
||||||
|
</Match>
|
||||||
</Switch>
|
</Switch>
|
||||||
</Match>
|
</Match>
|
||||||
</Switch>
|
</Switch>
|
||||||
@@ -625,7 +717,7 @@ const InstallDone = (props: InstallDoneProps) => {
|
|||||||
const [store, get] = getStepStore<InstallStoreType>(stepSignal);
|
const [store, get] = getStepStore<InstallStoreType>(stepSignal);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div class="flex w-full flex-col items-center bg-inv-4">
|
<div class="flex size-full flex-col items-center justify-center bg-inv-4">
|
||||||
<div class="flex w-full max-w-md flex-col items-center gap-3 py-6 fg-inv-1">
|
<div class="flex w-full max-w-md flex-col items-center gap-3 py-6 fg-inv-1">
|
||||||
<div class="rounded-full bg-semantic-success-4">
|
<div class="rounded-full bg-semantic-success-4">
|
||||||
<Icon icon="Checkmark" class="size-9" />
|
<Icon icon="Checkmark" class="size-9" />
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ from tempfile import TemporaryDirectory
|
|||||||
from clan_lib.flake import require_flake
|
from clan_lib.flake import require_flake
|
||||||
from clan_lib.machines.machines import Machine
|
from clan_lib.machines.machines import Machine
|
||||||
from clan_lib.ssh.host import Host
|
from clan_lib.ssh.host import Host
|
||||||
|
from clan_lib.ssh.upload import upload
|
||||||
|
|
||||||
from clan_cli.completions import add_dynamic_completer, complete_machines
|
from clan_cli.completions import add_dynamic_completer, complete_machines
|
||||||
from clan_cli.ssh.upload import upload
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
import argparse
|
import argparse
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
from contextlib import ExitStack
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import get_args
|
from typing import get_args
|
||||||
|
|
||||||
@@ -8,6 +10,7 @@ from clan_lib.errors import ClanError
|
|||||||
from clan_lib.flake import require_flake
|
from clan_lib.flake import require_flake
|
||||||
from clan_lib.machines.install import BuildOn, InstallOptions, run_machine_install
|
from clan_lib.machines.install import BuildOn, InstallOptions, run_machine_install
|
||||||
from clan_lib.machines.machines import Machine
|
from clan_lib.machines.machines import Machine
|
||||||
|
from clan_lib.network.qr_code import read_qr_image, read_qr_json
|
||||||
from clan_lib.ssh.host_key import HostKeyCheck
|
from clan_lib.ssh.host_key import HostKeyCheck
|
||||||
from clan_lib.ssh.remote import Remote
|
from clan_lib.ssh.remote import Remote
|
||||||
|
|
||||||
@@ -17,7 +20,6 @@ from clan_cli.completions import (
|
|||||||
complete_target_host,
|
complete_target_host,
|
||||||
)
|
)
|
||||||
from clan_cli.machines.hardware import HardwareConfig
|
from clan_cli.machines.hardware import HardwareConfig
|
||||||
from clan_cli.ssh.deploy_info import DeployInfo, find_reachable_host, ssh_command_parse
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -27,80 +29,71 @@ def install_command(args: argparse.Namespace) -> None:
|
|||||||
flake = require_flake(args.flake)
|
flake = require_flake(args.flake)
|
||||||
# Only if the caller did not specify a target_host via args.target_host
|
# Only if the caller did not specify a target_host via args.target_host
|
||||||
# Find a suitable target_host that is reachable
|
# Find a suitable target_host that is reachable
|
||||||
target_host_str = args.target_host
|
with ExitStack() as stack:
|
||||||
deploy_info: DeployInfo | None = (
|
remote: Remote
|
||||||
ssh_command_parse(args) if target_host_str is None else None
|
if args.target_host:
|
||||||
)
|
# TODO add network support here with either --network or some url magic
|
||||||
|
remote = Remote.from_ssh_uri(
|
||||||
use_tor = False
|
machine_name=args.machine, address=args.target_host
|
||||||
if deploy_info:
|
|
||||||
host = find_reachable_host(deploy_info)
|
|
||||||
if host is None or host.socks_port:
|
|
||||||
use_tor = True
|
|
||||||
target_host_str = deploy_info.tor.target
|
|
||||||
else:
|
|
||||||
target_host_str = host.target
|
|
||||||
|
|
||||||
if args.password:
|
|
||||||
password = args.password
|
|
||||||
elif deploy_info and deploy_info.addrs[0].password:
|
|
||||||
password = deploy_info.addrs[0].password
|
|
||||||
else:
|
|
||||||
password = None
|
|
||||||
|
|
||||||
machine = Machine(name=args.machine, flake=flake)
|
|
||||||
host_key_check = args.host_key_check
|
|
||||||
|
|
||||||
if target_host_str is not None:
|
|
||||||
target_host = Remote.from_ssh_uri(
|
|
||||||
machine_name=machine.name, address=target_host_str
|
|
||||||
).override(host_key_check=host_key_check)
|
|
||||||
else:
|
|
||||||
target_host = machine.target_host().override(host_key_check=host_key_check)
|
|
||||||
|
|
||||||
if args.identity_file:
|
|
||||||
target_host = target_host.override(private_key=args.identity_file)
|
|
||||||
|
|
||||||
if machine._class_ == "darwin":
|
|
||||||
msg = "Installing macOS machines is not yet supported"
|
|
||||||
raise ClanError(msg)
|
|
||||||
|
|
||||||
if not args.yes:
|
|
||||||
while True:
|
|
||||||
ask = (
|
|
||||||
input(f"Install {args.machine} to {target_host.target}? [y/N] ")
|
|
||||||
.strip()
|
|
||||||
.lower()
|
|
||||||
)
|
)
|
||||||
if ask == "y":
|
elif args.png:
|
||||||
break
|
data = read_qr_image(Path(args.png))
|
||||||
if ask == "n" or ask == "":
|
qr_code = read_qr_json(data, args.flake)
|
||||||
return None
|
remote = stack.enter_context(qr_code.get_best_remote())
|
||||||
print(f"Invalid input '{ask}'. Please enter 'y' for yes or 'n' for no.")
|
elif args.json:
|
||||||
|
json_file = Path(args.json)
|
||||||
|
if json_file.is_file():
|
||||||
|
data = json.loads(json_file.read_text())
|
||||||
|
else:
|
||||||
|
data = json.loads(args.json)
|
||||||
|
|
||||||
if args.identity_file:
|
qr_code = read_qr_json(data, args.flake)
|
||||||
target_host = target_host.override(private_key=args.identity_file)
|
remote = stack.enter_context(qr_code.get_best_remote())
|
||||||
|
else:
|
||||||
|
msg = "No --target-host, --json or --png data provided"
|
||||||
|
raise ClanError(msg)
|
||||||
|
|
||||||
if password:
|
machine = Machine(name=args.machine, flake=flake)
|
||||||
target_host = target_host.override(password=password)
|
if args.host_key_check:
|
||||||
|
remote.override(host_key_check=args.host_key_check)
|
||||||
|
|
||||||
if use_tor:
|
if machine._class_ == "darwin":
|
||||||
target_host = target_host.override(
|
msg = "Installing macOS machines is not yet supported"
|
||||||
socks_port=9050, socks_wrapper=["torify"]
|
raise ClanError(msg)
|
||||||
|
|
||||||
|
if not args.yes:
|
||||||
|
while True:
|
||||||
|
ask = (
|
||||||
|
input(f"Install {args.machine} to {remote.target}? [y/N] ")
|
||||||
|
.strip()
|
||||||
|
.lower()
|
||||||
|
)
|
||||||
|
if ask == "y":
|
||||||
|
break
|
||||||
|
if ask == "n" or ask == "":
|
||||||
|
return None
|
||||||
|
print(
|
||||||
|
f"Invalid input '{ask}'. Please enter 'y' for yes or 'n' for no."
|
||||||
|
)
|
||||||
|
|
||||||
|
if args.identity_file:
|
||||||
|
remote = remote.override(private_key=args.identity_file)
|
||||||
|
|
||||||
|
if args.password:
|
||||||
|
remote = remote.override(password=args.password)
|
||||||
|
|
||||||
|
return run_machine_install(
|
||||||
|
InstallOptions(
|
||||||
|
machine=machine,
|
||||||
|
kexec=args.kexec,
|
||||||
|
phases=args.phases,
|
||||||
|
debug=args.debug,
|
||||||
|
no_reboot=args.no_reboot,
|
||||||
|
build_on=args.build_on if args.build_on is not None else None,
|
||||||
|
update_hardware_config=HardwareConfig(args.update_hardware_config),
|
||||||
|
),
|
||||||
|
target_host=remote,
|
||||||
)
|
)
|
||||||
|
|
||||||
return run_machine_install(
|
|
||||||
InstallOptions(
|
|
||||||
machine=machine,
|
|
||||||
kexec=args.kexec,
|
|
||||||
phases=args.phases,
|
|
||||||
debug=args.debug,
|
|
||||||
no_reboot=args.no_reboot,
|
|
||||||
build_on=args.build_on if args.build_on is not None else None,
|
|
||||||
update_hardware_config=HardwareConfig(args.update_hardware_config),
|
|
||||||
),
|
|
||||||
target_host=target_host,
|
|
||||||
)
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
log.warning("Interrupted by user")
|
log.warning("Interrupted by user")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@@ -144,14 +144,10 @@ def update_command(args: argparse.Namespace) -> None:
|
|||||||
build_host = machine.build_host()
|
build_host = machine.build_host()
|
||||||
# Figure out the target host
|
# Figure out the target host
|
||||||
if args.target_host:
|
if args.target_host:
|
||||||
target_host: Host | None = None
|
target_host = Remote.from_ssh_uri(
|
||||||
if args.target_host == "localhost":
|
machine_name=machine.name,
|
||||||
target_host = LocalHost()
|
address=args.target_host,
|
||||||
else:
|
).override(host_key_check=host_key_check)
|
||||||
target_host = Remote.from_ssh_uri(
|
|
||||||
machine_name=machine.name,
|
|
||||||
address=args.target_host,
|
|
||||||
).override(host_key_check=host_key_check)
|
|
||||||
else:
|
else:
|
||||||
target_host = machine.target_host().override(
|
target_host = machine.target_host().override(
|
||||||
host_key_check=host_key_check
|
host_key_check=host_key_check
|
||||||
|
|||||||
@@ -16,6 +16,9 @@ def overview_command(args: argparse.Namespace) -> None:
|
|||||||
for peer_name, peer in network["peers"].items():
|
for peer_name, peer in network["peers"].items():
|
||||||
print(f"\t{peer_name}: {'[OFFLINE]' if not peer else f'[{peer}]'}")
|
print(f"\t{peer_name}: {'[OFFLINE]' if not peer else f'[{peer}]'}")
|
||||||
|
|
||||||
|
if not overview:
|
||||||
|
print("No networks found.")
|
||||||
|
|
||||||
|
|
||||||
def register_overview_parser(parser: argparse.ArgumentParser) -> None:
|
def register_overview_parser(parser: argparse.ArgumentParser) -> None:
|
||||||
parser.set_defaults(func=overview_command)
|
parser.set_defaults(func=overview_command)
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ def ping_command(args: argparse.Namespace) -> None:
|
|||||||
networks = networks_from_flake(flake)
|
networks = networks_from_flake(flake)
|
||||||
|
|
||||||
if not networks:
|
if not networks:
|
||||||
print("No networks found in the flake")
|
print("No networks found")
|
||||||
|
return
|
||||||
# If network is specified, only check that network
|
# If network is specified, only check that network
|
||||||
if network_name:
|
if network_name:
|
||||||
networks_to_check = [(network_name, networks[network_name])]
|
networks_to_check = [(network_name, networks[network_name])]
|
||||||
|
|||||||
@@ -1,17 +1,14 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import contextlib
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import textwrap
|
from contextlib import ExitStack
|
||||||
from dataclasses import dataclass
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, get_args
|
from typing import get_args
|
||||||
|
|
||||||
from clan_lib.cmd import run
|
|
||||||
from clan_lib.errors import ClanError
|
from clan_lib.errors import ClanError
|
||||||
from clan_lib.machines.machines import Machine
|
from clan_lib.machines.machines import Machine
|
||||||
from clan_lib.network.tor.lib import spawn_tor
|
from clan_lib.network.network import get_best_remote
|
||||||
from clan_lib.nix import nix_shell
|
from clan_lib.network.qr_code import read_qr_image, read_qr_json
|
||||||
from clan_lib.ssh.remote import HostKeyCheck, Remote
|
from clan_lib.ssh.remote import HostKeyCheck, Remote
|
||||||
|
|
||||||
from clan_cli.completions import (
|
from clan_cli.completions import (
|
||||||
@@ -22,180 +19,57 @@ from clan_cli.completions import (
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
def get_tor_remote(remotes: list[Remote]) -> Remote:
|
||||||
class DeployInfo:
|
"""Get the Remote configured for SOCKS5 proxy (Tor)."""
|
||||||
addrs: list[Remote]
|
tor_remotes = [r for r in remotes if r.socks_port]
|
||||||
|
|
||||||
@property
|
if not tor_remotes:
|
||||||
def tor(self) -> Remote:
|
msg = "No socks5 proxy address provided, please provide a socks5 proxy address."
|
||||||
"""Return a list of Remote objects that are configured for SOCKS5 proxy."""
|
|
||||||
addrs = [addr for addr in self.addrs if addr.socks_port]
|
|
||||||
|
|
||||||
if not addrs:
|
|
||||||
msg = "No socks5 proxy address provided, please provide a socks5 proxy address."
|
|
||||||
raise ClanError(msg)
|
|
||||||
|
|
||||||
if len(addrs) > 1:
|
|
||||||
msg = "Multiple socks5 proxy addresses provided, expected only one."
|
|
||||||
raise ClanError(msg)
|
|
||||||
return addrs[0]
|
|
||||||
|
|
||||||
def overwrite_remotes(
|
|
||||||
self,
|
|
||||||
host_key_check: HostKeyCheck | None = None,
|
|
||||||
private_key: Path | None = None,
|
|
||||||
ssh_options: dict[str, str] | None = None,
|
|
||||||
) -> "DeployInfo":
|
|
||||||
"""Return a new DeployInfo with all Remotes overridden with the given host_key_check."""
|
|
||||||
return DeployInfo(
|
|
||||||
addrs=[
|
|
||||||
addr.override(
|
|
||||||
host_key_check=host_key_check,
|
|
||||||
private_key=private_key,
|
|
||||||
ssh_options=ssh_options,
|
|
||||||
)
|
|
||||||
for addr in self.addrs
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_json(data: dict[str, Any], host_key_check: HostKeyCheck) -> "DeployInfo":
|
|
||||||
addrs = []
|
|
||||||
password = data.get("pass")
|
|
||||||
|
|
||||||
for addr in data.get("addrs", []):
|
|
||||||
if isinstance(addr, str):
|
|
||||||
remote = Remote.from_ssh_uri(
|
|
||||||
machine_name="clan-installer",
|
|
||||||
address=addr,
|
|
||||||
).override(host_key_check=host_key_check, password=password)
|
|
||||||
addrs.append(remote)
|
|
||||||
else:
|
|
||||||
msg = f"Invalid address format: {addr}"
|
|
||||||
raise ClanError(msg)
|
|
||||||
if tor_addr := data.get("tor"):
|
|
||||||
remote = Remote.from_ssh_uri(
|
|
||||||
machine_name="clan-installer",
|
|
||||||
address=tor_addr,
|
|
||||||
).override(
|
|
||||||
host_key_check=host_key_check,
|
|
||||||
socks_port=9050,
|
|
||||||
socks_wrapper=["torify"],
|
|
||||||
password=password,
|
|
||||||
)
|
|
||||||
addrs.append(remote)
|
|
||||||
|
|
||||||
return DeployInfo(addrs=addrs)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_qr_code(picture_file: Path, host_key_check: HostKeyCheck) -> "DeployInfo":
|
|
||||||
cmd = nix_shell(
|
|
||||||
["zbar"],
|
|
||||||
[
|
|
||||||
"zbarimg",
|
|
||||||
"--quiet",
|
|
||||||
"--raw",
|
|
||||||
str(picture_file),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
res = run(cmd)
|
|
||||||
data = res.stdout.strip()
|
|
||||||
return DeployInfo.from_json(json.loads(data), host_key_check=host_key_check)
|
|
||||||
|
|
||||||
|
|
||||||
def find_reachable_host(deploy_info: DeployInfo) -> Remote | None:
|
|
||||||
# If we only have one address, we have no choice but to use it.
|
|
||||||
if len(deploy_info.addrs) == 1:
|
|
||||||
return deploy_info.addrs[0]
|
|
||||||
|
|
||||||
for addr in deploy_info.addrs:
|
|
||||||
with contextlib.suppress(ClanError):
|
|
||||||
addr.check_machine_ssh_reachable()
|
|
||||||
return addr
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def ssh_shell_from_deploy(
|
|
||||||
deploy_info: DeployInfo, command: list[str] | None = None
|
|
||||||
) -> None:
|
|
||||||
if command and len(command) == 1 and command[0].count(" ") > 0:
|
|
||||||
msg = (
|
|
||||||
textwrap.dedent("""
|
|
||||||
It looks like you quoted the remote command.
|
|
||||||
The first argument should be the command to run, not a quoted string.
|
|
||||||
""")
|
|
||||||
.lstrip("\n")
|
|
||||||
.rstrip("\n")
|
|
||||||
)
|
|
||||||
raise ClanError(msg)
|
raise ClanError(msg)
|
||||||
|
|
||||||
if host := find_reachable_host(deploy_info):
|
if len(tor_remotes) > 1:
|
||||||
host.interactive_ssh(command)
|
msg = "Multiple socks5 proxy addresses provided, expected only one."
|
||||||
return
|
|
||||||
|
|
||||||
log.info("Could not reach host via clearnet 'addrs'")
|
|
||||||
log.info(f"Trying to reach host via tor '{deploy_info}'")
|
|
||||||
|
|
||||||
tor_addrs = [addr for addr in deploy_info.addrs if addr.socks_port]
|
|
||||||
if not tor_addrs:
|
|
||||||
msg = "No tor address provided, please provide a tor address."
|
|
||||||
raise ClanError(msg)
|
raise ClanError(msg)
|
||||||
|
|
||||||
with spawn_tor():
|
return tor_remotes[0]
|
||||||
for tor_addr in tor_addrs:
|
|
||||||
log.info(f"Trying to reach host via tor address: {tor_addr}")
|
|
||||||
|
|
||||||
with contextlib.suppress(ClanError):
|
|
||||||
tor_addr.check_machine_ssh_reachable()
|
|
||||||
|
|
||||||
log.info(
|
|
||||||
"Host reachable via tor address, starting interactive ssh session."
|
|
||||||
)
|
|
||||||
tor_addr.interactive_ssh(command)
|
|
||||||
return
|
|
||||||
|
|
||||||
log.error("Could not reach host via tor address.")
|
|
||||||
|
|
||||||
|
|
||||||
def ssh_command_parse(args: argparse.Namespace) -> DeployInfo | None:
|
|
||||||
host_key_check = args.host_key_check
|
|
||||||
deploy = None
|
|
||||||
|
|
||||||
if args.json:
|
|
||||||
json_file = Path(args.json)
|
|
||||||
if json_file.is_file():
|
|
||||||
data = json.loads(json_file.read_text())
|
|
||||||
return DeployInfo.from_json(data, host_key_check)
|
|
||||||
data = json.loads(args.json)
|
|
||||||
deploy = DeployInfo.from_json(data, host_key_check)
|
|
||||||
elif args.png:
|
|
||||||
deploy = DeployInfo.from_qr_code(Path(args.png), host_key_check)
|
|
||||||
elif hasattr(args, "machine") and args.machine:
|
|
||||||
machine = Machine(args.machine, args.flake)
|
|
||||||
target = machine.target_host().override(
|
|
||||||
command_prefix=machine.name, host_key_check=host_key_check
|
|
||||||
)
|
|
||||||
deploy = DeployInfo(addrs=[target])
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
ssh_options = None
|
|
||||||
if hasattr(args, "ssh_option") and args.ssh_option:
|
|
||||||
for name, value in args.ssh_option:
|
|
||||||
ssh_options = {}
|
|
||||||
ssh_options[name] = value
|
|
||||||
|
|
||||||
deploy = deploy.overwrite_remotes(ssh_options=ssh_options)
|
|
||||||
|
|
||||||
return deploy
|
|
||||||
|
|
||||||
|
|
||||||
def ssh_command(args: argparse.Namespace) -> None:
|
def ssh_command(args: argparse.Namespace) -> None:
|
||||||
deploy_info = ssh_command_parse(args)
|
with ExitStack() as stack:
|
||||||
if not deploy_info:
|
remote: Remote
|
||||||
msg = "No MACHINE, --json or --png data provided"
|
if hasattr(args, "machine") and args.machine:
|
||||||
raise ClanError(msg)
|
machine = Machine(args.machine, args.flake)
|
||||||
ssh_shell_from_deploy(deploy_info, args.remote_command)
|
remote = stack.enter_context(get_best_remote(machine))
|
||||||
|
elif args.png:
|
||||||
|
data = read_qr_image(Path(args.png))
|
||||||
|
qr_code = read_qr_json(data, args.flake)
|
||||||
|
remote = stack.enter_context(qr_code.get_best_remote())
|
||||||
|
elif args.json:
|
||||||
|
json_file = Path(args.json)
|
||||||
|
if json_file.is_file():
|
||||||
|
data = json.loads(json_file.read_text())
|
||||||
|
else:
|
||||||
|
data = json.loads(args.json)
|
||||||
|
|
||||||
|
qr_code = read_qr_json(data, args.flake)
|
||||||
|
remote = stack.enter_context(qr_code.get_best_remote())
|
||||||
|
else:
|
||||||
|
msg = "No MACHINE, --json or --png data provided"
|
||||||
|
raise ClanError(msg)
|
||||||
|
|
||||||
|
# Convert ssh_option list to dictionary
|
||||||
|
ssh_options = {}
|
||||||
|
if args.ssh_option:
|
||||||
|
for name, value in args.ssh_option:
|
||||||
|
ssh_options[name] = value
|
||||||
|
|
||||||
|
remote = remote.override(
|
||||||
|
host_key_check=args.host_key_check, ssh_options=ssh_options
|
||||||
|
)
|
||||||
|
if args.remote_command:
|
||||||
|
remote.interactive_ssh(args.remote_command)
|
||||||
|
else:
|
||||||
|
remote.interactive_ssh()
|
||||||
|
|
||||||
|
|
||||||
def register_parser(parser: argparse.ArgumentParser) -> None:
|
def register_parser(parser: argparse.ArgumentParser) -> None:
|
||||||
|
|||||||
@@ -3,15 +3,17 @@ from pathlib import Path
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from clan_lib.cmd import RunOpts, run
|
from clan_lib.cmd import RunOpts, run
|
||||||
|
from clan_lib.flake import Flake
|
||||||
|
from clan_lib.network.qr_code import read_qr_image, read_qr_json
|
||||||
from clan_lib.nix import nix_shell
|
from clan_lib.nix import nix_shell
|
||||||
from clan_lib.ssh.remote import Remote
|
from clan_lib.ssh.remote import Remote
|
||||||
|
|
||||||
from clan_cli.ssh.deploy_info import DeployInfo, find_reachable_host
|
|
||||||
from clan_cli.tests.fixtures_flakes import ClanFlake
|
from clan_cli.tests.fixtures_flakes import ClanFlake
|
||||||
from clan_cli.tests.helpers import cli
|
from clan_cli.tests.helpers import cli
|
||||||
|
|
||||||
|
|
||||||
def test_qrcode_scan(temp_dir: Path) -> None:
|
@pytest.mark.with_core
|
||||||
|
def test_qrcode_scan(temp_dir: Path, flake: ClanFlake) -> None:
|
||||||
data = '{"pass":"scabbed-defender-headlock","tor":"qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion","addrs":["192.168.122.86"]}'
|
data = '{"pass":"scabbed-defender-headlock","tor":"qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion","addrs":["192.168.122.86"]}'
|
||||||
img_path = temp_dir / "qrcode.png"
|
img_path = temp_dir / "qrcode.png"
|
||||||
cmd = nix_shell(
|
cmd = nix_shell(
|
||||||
@@ -25,63 +27,93 @@ def test_qrcode_scan(temp_dir: Path) -> None:
|
|||||||
run(cmd, RunOpts(input=data.encode()))
|
run(cmd, RunOpts(input=data.encode()))
|
||||||
|
|
||||||
# Call the qrcode_scan function
|
# Call the qrcode_scan function
|
||||||
deploy_info = DeployInfo.from_qr_code(img_path, "none")
|
json_data = read_qr_image(img_path)
|
||||||
|
qr_code = read_qr_json(json_data, Flake(str(flake.path)))
|
||||||
|
|
||||||
host = deploy_info.addrs[0]
|
# Check addresses
|
||||||
assert host.address == "192.168.122.86"
|
addresses = qr_code.addresses
|
||||||
assert host.user == "root"
|
assert len(addresses) >= 2 # At least direct and tor
|
||||||
assert host.password == "scabbed-defender-headlock"
|
|
||||||
|
|
||||||
tor_host = deploy_info.addrs[1]
|
# Find direct connection
|
||||||
|
direct_remote = None
|
||||||
|
for addr in addresses:
|
||||||
|
if addr.network.module_name == "clan_lib.network.direct":
|
||||||
|
direct_remote = addr.remote
|
||||||
|
break
|
||||||
|
|
||||||
|
assert direct_remote is not None
|
||||||
|
assert direct_remote.address == "192.168.122.86"
|
||||||
|
assert direct_remote.user == "root"
|
||||||
|
assert direct_remote.password == "scabbed-defender-headlock"
|
||||||
|
|
||||||
|
# Find tor connection
|
||||||
|
tor_remote = None
|
||||||
|
for addr in addresses:
|
||||||
|
if addr.network.module_name == "clan_lib.network.tor":
|
||||||
|
tor_remote = addr.remote
|
||||||
|
break
|
||||||
|
|
||||||
|
assert tor_remote is not None
|
||||||
assert (
|
assert (
|
||||||
tor_host.address
|
tor_remote.address
|
||||||
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
|
|
||||||
)
|
|
||||||
assert tor_host.socks_port == 9050
|
|
||||||
assert tor_host.password == "scabbed-defender-headlock"
|
|
||||||
assert tor_host.user == "root"
|
|
||||||
assert (
|
|
||||||
tor_host.address
|
|
||||||
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
|
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
|
||||||
)
|
)
|
||||||
|
assert tor_remote.socks_port == 9050
|
||||||
|
assert tor_remote.password == "scabbed-defender-headlock"
|
||||||
|
assert tor_remote.user == "root"
|
||||||
|
|
||||||
|
|
||||||
def test_from_json() -> None:
|
def test_from_json(temp_dir: Path) -> None:
|
||||||
data = '{"pass":"scabbed-defender-headlock","tor":"qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion","addrs":["192.168.122.86"]}'
|
data = '{"pass":"scabbed-defender-headlock","tor":"qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion","addrs":["192.168.122.86"]}'
|
||||||
deploy_info = DeployInfo.from_json(json.loads(data), "none")
|
flake = Flake(str(temp_dir))
|
||||||
|
qr_code = read_qr_json(json.loads(data), flake)
|
||||||
|
|
||||||
host = deploy_info.addrs[0]
|
# Check addresses
|
||||||
assert host.password == "scabbed-defender-headlock"
|
addresses = qr_code.addresses
|
||||||
assert host.address == "192.168.122.86"
|
assert len(addresses) >= 2 # At least direct and tor
|
||||||
|
|
||||||
tor_host = deploy_info.addrs[1]
|
# Find direct connection
|
||||||
|
direct_remote = None
|
||||||
|
for addr in addresses:
|
||||||
|
if addr.network.module_name == "clan_lib.network.direct":
|
||||||
|
direct_remote = addr.remote
|
||||||
|
break
|
||||||
|
|
||||||
|
assert direct_remote is not None
|
||||||
|
assert direct_remote.password == "scabbed-defender-headlock"
|
||||||
|
assert direct_remote.address == "192.168.122.86"
|
||||||
|
|
||||||
|
# Find tor connection
|
||||||
|
tor_remote = None
|
||||||
|
for addr in addresses:
|
||||||
|
if addr.network.module_name == "clan_lib.network.tor":
|
||||||
|
tor_remote = addr.remote
|
||||||
|
break
|
||||||
|
|
||||||
|
assert tor_remote is not None
|
||||||
assert (
|
assert (
|
||||||
tor_host.address
|
tor_remote.address
|
||||||
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
|
|
||||||
)
|
|
||||||
assert tor_host.socks_port == 9050
|
|
||||||
assert tor_host.password == "scabbed-defender-headlock"
|
|
||||||
assert tor_host.user == "root"
|
|
||||||
assert (
|
|
||||||
tor_host.address
|
|
||||||
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
|
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
|
||||||
)
|
)
|
||||||
|
assert tor_remote.socks_port == 9050
|
||||||
|
assert tor_remote.password == "scabbed-defender-headlock"
|
||||||
|
assert tor_remote.user == "root"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.with_core
|
# TODO: This test needs to be updated to use get_best_remote from clan_lib.network.network
|
||||||
def test_find_reachable_host(hosts: list[Remote]) -> None:
|
# @pytest.mark.with_core
|
||||||
host = hosts[0]
|
# def test_find_reachable_host(hosts: list[Remote]) -> None:
|
||||||
|
# host = hosts[0]
|
||||||
uris = ["172.19.1.2", host.ssh_url()]
|
#
|
||||||
remotes = [Remote.from_ssh_uri(machine_name="some", address=uri) for uri in uris]
|
# uris = ["172.19.1.2", host.ssh_url()]
|
||||||
deploy_info = DeployInfo(addrs=remotes)
|
# remotes = [Remote.from_ssh_uri(machine_name="some", address=uri) for uri in uris]
|
||||||
|
#
|
||||||
assert deploy_info.addrs[0].address == "172.19.1.2"
|
# assert remotes[0].address == "172.19.1.2"
|
||||||
|
#
|
||||||
remote = find_reachable_host(deploy_info=deploy_info)
|
# remote = find_reachable_host(remotes=remotes)
|
||||||
|
#
|
||||||
assert remote is not None
|
# assert remote is not None
|
||||||
assert remote.ssh_url() == host.ssh_url()
|
# assert remote.ssh_url() == host.ssh_url()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.with_core
|
@pytest.mark.with_core
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from clan_cli.ssh.upload import upload
|
|
||||||
from clan_lib.ssh.remote import Remote
|
from clan_lib.ssh.remote import Remote
|
||||||
|
from clan_lib.ssh.upload import upload
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.with_core
|
@pytest.mark.with_core
|
||||||
|
|||||||
@@ -699,8 +699,7 @@ def test_api_set_prompts(
|
|||||||
monkeypatch.chdir(flake.path)
|
monkeypatch.chdir(flake.path)
|
||||||
|
|
||||||
run_generators(
|
run_generators(
|
||||||
machine_name="my_machine",
|
machine=Machine(name="my_machine", flake=Flake(str(flake.path))),
|
||||||
base_dir=flake.path,
|
|
||||||
generators=["my_generator"],
|
generators=["my_generator"],
|
||||||
all_prompt_values={
|
all_prompt_values={
|
||||||
"my_generator": {
|
"my_generator": {
|
||||||
@@ -714,8 +713,7 @@ def test_api_set_prompts(
|
|||||||
assert store.exists(my_generator, "prompt1")
|
assert store.exists(my_generator, "prompt1")
|
||||||
assert store.get(my_generator, "prompt1").decode() == "input1"
|
assert store.get(my_generator, "prompt1").decode() == "input1"
|
||||||
run_generators(
|
run_generators(
|
||||||
machine_name="my_machine",
|
machine=Machine(name="my_machine", flake=Flake(str(flake.path))),
|
||||||
base_dir=flake.path,
|
|
||||||
generators=["my_generator"],
|
generators=["my_generator"],
|
||||||
all_prompt_values={
|
all_prompt_values={
|
||||||
"my_generator": {
|
"my_generator": {
|
||||||
@@ -725,8 +723,9 @@ def test_api_set_prompts(
|
|||||||
)
|
)
|
||||||
assert store.get(my_generator, "prompt1").decode() == "input2"
|
assert store.get(my_generator, "prompt1").decode() == "input2"
|
||||||
|
|
||||||
|
machine = Machine(name="my_machine", flake=Flake(str(flake.path)))
|
||||||
generators = get_generators(
|
generators = get_generators(
|
||||||
machine_name="my_machine", base_dir=flake.path, include_previous_values=True
|
machine=machine, full_closure=True, include_previous_values=True
|
||||||
)
|
)
|
||||||
# get_generators should bind the store
|
# get_generators should bind the store
|
||||||
assert generators[0].files[0]._store is not None
|
assert generators[0].files[0]._store is not None
|
||||||
|
|||||||
@@ -81,6 +81,20 @@ class StoreBase(ABC):
|
|||||||
generators: list["Generator"] | None = None,
|
generators: list["Generator"] | None = None,
|
||||||
file_name: str | None = None,
|
file_name: str | None = None,
|
||||||
) -> str | None:
|
) -> str | None:
|
||||||
|
"""
|
||||||
|
Check the health of the store for the given machine and generators.
|
||||||
|
|
||||||
|
This method detects any issues or inconsistencies in the store that may
|
||||||
|
require fixing (e.g., outdated encryption keys, missing permissions).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
machine: The name of the machine to check
|
||||||
|
generators: List of generators to check. If None, checks all generators for the machine
|
||||||
|
file_name: Optional specific file to check. If provided, only checks that file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str | None: An error message describing issues found, or None if everything is healthy
|
||||||
|
"""
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def fix(
|
def fix(
|
||||||
@@ -89,7 +103,21 @@ class StoreBase(ABC):
|
|||||||
generators: list["Generator"] | None = None,
|
generators: list["Generator"] | None = None,
|
||||||
file_name: str | None = None,
|
file_name: str | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
return None
|
"""
|
||||||
|
Fix any issues with the store for the given machine and generators.
|
||||||
|
|
||||||
|
This method is intended to repair or update the store when inconsistencies
|
||||||
|
are detected (e.g., re-encrypting secrets with new keys, fixing permissions).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
machine: The name of the machine to fix vars for
|
||||||
|
generators: List of generators to fix. If None, fixes all generators for the machine
|
||||||
|
file_name: Optional specific file to fix. If provided, only fixes that file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
return
|
||||||
|
|
||||||
def backend_collision_error(self, folder: Path) -> None:
|
def backend_collision_error(self, folder: Path) -> None:
|
||||||
msg = (
|
msg = (
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ from dataclasses import dataclass, field
|
|||||||
from functools import cached_property
|
from functools import cached_property
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from tempfile import TemporaryDirectory
|
from tempfile import TemporaryDirectory
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from clan_cli.completions import (
|
from clan_cli.completions import (
|
||||||
add_dynamic_completer,
|
add_dynamic_completer,
|
||||||
@@ -23,6 +22,7 @@ from clan_lib.errors import ClanError
|
|||||||
from clan_lib.flake import Flake, require_flake
|
from clan_lib.flake import Flake, require_flake
|
||||||
from clan_lib.git import commit_files
|
from clan_lib.git import commit_files
|
||||||
from clan_lib.machines.list import list_full_machines
|
from clan_lib.machines.list import list_full_machines
|
||||||
|
from clan_lib.machines.machines import Machine
|
||||||
from clan_lib.nix import nix_config, nix_shell, nix_test_store
|
from clan_lib.nix import nix_config, nix_shell, nix_test_store
|
||||||
|
|
||||||
from .check import check_vars
|
from .check import check_vars
|
||||||
@@ -32,10 +32,6 @@ from .var import Var
|
|||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from clan_lib.flake import Flake
|
|
||||||
from clan_lib.machines.machines import Machine
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
@dataclass(frozen=True)
|
||||||
class GeneratorKey:
|
class GeneratorKey:
|
||||||
@@ -357,6 +353,11 @@ def _execute_generator(
|
|||||||
if not secret_file.is_file():
|
if not secret_file.is_file():
|
||||||
msg = f"did not generate a file for '{file.name}' when running the following command:\n"
|
msg = f"did not generate a file for '{file.name}' when running the following command:\n"
|
||||||
msg += str(final_script)
|
msg += str(final_script)
|
||||||
|
# list all files in the output directory
|
||||||
|
if tmpdir_out.is_dir():
|
||||||
|
msg += "\nOutput files:\n"
|
||||||
|
for f in tmpdir_out.iterdir():
|
||||||
|
msg += f" - {f.name}\n"
|
||||||
raise ClanError(msg)
|
raise ClanError(msg)
|
||||||
if file.secret:
|
if file.secret:
|
||||||
file_path = secret_vars_store.set(
|
file_path = secret_vars_store.set(
|
||||||
@@ -422,12 +423,25 @@ def _get_previous_value(
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _get_closure(
|
@API.register
|
||||||
machine: "Machine",
|
def get_generators(
|
||||||
generator_name: str | None,
|
machine: Machine,
|
||||||
full_closure: bool,
|
full_closure: bool,
|
||||||
|
generator_name: str | None = None,
|
||||||
include_previous_values: bool = False,
|
include_previous_values: bool = False,
|
||||||
) -> list[Generator]:
|
) -> list[Generator]:
|
||||||
|
"""
|
||||||
|
Get generators for a machine, with optional closure computation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
machine: The machine to get generators for.
|
||||||
|
full_closure: If True, include all dependency generators. If False, only include missing ones.
|
||||||
|
generator_name: Name of a specific generator to get, or None for all generators.
|
||||||
|
include_previous_values: If True, populate prompts with their previous values.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of generators based on the specified selection and closure mode.
|
||||||
|
"""
|
||||||
from . import graph
|
from . import graph
|
||||||
|
|
||||||
vars_generators = Generator.get_machine_generators(machine.name, machine.flake)
|
vars_generators = Generator.get_machine_generators(machine.name, machine.flake)
|
||||||
@@ -457,12 +471,40 @@ def _get_closure(
|
|||||||
return result_closure
|
return result_closure
|
||||||
|
|
||||||
|
|
||||||
|
def _ensure_healthy(
|
||||||
|
machine: "Machine",
|
||||||
|
generators: list[Generator] | None = None,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Run health checks on the provided generators.
|
||||||
|
Fails if any of the generators' health checks fail.
|
||||||
|
"""
|
||||||
|
if generators is None:
|
||||||
|
generators = Generator.get_machine_generators(machine.name, machine.flake)
|
||||||
|
|
||||||
|
pub_healtcheck_msg = machine.public_vars_store.health_check(
|
||||||
|
machine.name, generators
|
||||||
|
)
|
||||||
|
sec_healtcheck_msg = machine.secret_vars_store.health_check(
|
||||||
|
machine.name, generators
|
||||||
|
)
|
||||||
|
|
||||||
|
if pub_healtcheck_msg or sec_healtcheck_msg:
|
||||||
|
msg = f"Health check failed for machine {machine.name}:\n"
|
||||||
|
if pub_healtcheck_msg:
|
||||||
|
msg += f"Public vars store: {pub_healtcheck_msg}\n"
|
||||||
|
if sec_healtcheck_msg:
|
||||||
|
msg += f"Secret vars store: {sec_healtcheck_msg}"
|
||||||
|
raise ClanError(msg)
|
||||||
|
|
||||||
|
|
||||||
def _generate_vars_for_machine(
|
def _generate_vars_for_machine(
|
||||||
machine: "Machine",
|
machine: "Machine",
|
||||||
generators: list[Generator],
|
generators: list[Generator],
|
||||||
all_prompt_values: dict[str, dict[str, str]],
|
all_prompt_values: dict[str, dict[str, str]],
|
||||||
no_sandbox: bool = False,
|
no_sandbox: bool = False,
|
||||||
) -> bool:
|
) -> None:
|
||||||
|
_ensure_healthy(machine=machine, generators=generators)
|
||||||
for generator in generators:
|
for generator in generators:
|
||||||
if check_can_migrate(machine, generator):
|
if check_can_migrate(machine, generator):
|
||||||
migrate_files(machine, generator)
|
migrate_files(machine, generator)
|
||||||
@@ -475,42 +517,15 @@ def _generate_vars_for_machine(
|
|||||||
prompt_values=all_prompt_values.get(generator.name, {}),
|
prompt_values=all_prompt_values.get(generator.name, {}),
|
||||||
no_sandbox=no_sandbox,
|
no_sandbox=no_sandbox,
|
||||||
)
|
)
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
@API.register
|
|
||||||
def get_generators(
|
|
||||||
machine_name: str,
|
|
||||||
base_dir: Path,
|
|
||||||
include_previous_values: bool = False,
|
|
||||||
) -> list[Generator]:
|
|
||||||
"""
|
|
||||||
Get the list of generators for a machine, optionally with previous values.
|
|
||||||
If `full_closure` is True, it returns the full closure of generators.
|
|
||||||
If `include_previous_values` is True, it includes the previous values for prompts.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
machine_name (str): The name of the machine.
|
|
||||||
base_dir (Path): The base directory of the flake.
|
|
||||||
Returns:
|
|
||||||
list[Generator]: A list of generators for the machine.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return Generator.get_machine_generators(
|
|
||||||
machine_name,
|
|
||||||
Flake(str(base_dir)),
|
|
||||||
include_previous_values,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@API.register
|
@API.register
|
||||||
def run_generators(
|
def run_generators(
|
||||||
machine_name: str,
|
machine: Machine,
|
||||||
all_prompt_values: dict[str, dict[str, str]],
|
all_prompt_values: dict[str, dict[str, str]],
|
||||||
base_dir: Path,
|
|
||||||
generators: list[str] | None = None,
|
generators: list[str] | None = None,
|
||||||
no_sandbox: bool = False,
|
no_sandbox: bool = False,
|
||||||
) -> bool:
|
) -> None:
|
||||||
"""Run the specified generators for a machine.
|
"""Run the specified generators for a machine.
|
||||||
Args:
|
Args:
|
||||||
machine_name (str): The name of the machine.
|
machine_name (str): The name of the machine.
|
||||||
@@ -525,21 +540,19 @@ def run_generators(
|
|||||||
ClanError: If the machine or generator is not found, or if there are issues with
|
ClanError: If the machine or generator is not found, or if there are issues with
|
||||||
executing the generator.
|
executing the generator.
|
||||||
"""
|
"""
|
||||||
from clan_lib.machines.machines import Machine
|
|
||||||
|
|
||||||
machine = Machine(name=machine_name, flake=Flake(str(base_dir)))
|
|
||||||
if not generators:
|
if not generators:
|
||||||
generator_objects = Generator.get_machine_generators(
|
generator_objects = Generator.get_machine_generators(
|
||||||
machine_name, machine.flake
|
machine.name, machine.flake
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
generators_set = set(generators)
|
generators_set = set(generators)
|
||||||
generator_objects = [
|
generator_objects = [
|
||||||
g
|
g
|
||||||
for g in Generator.get_machine_generators(machine_name, machine.flake)
|
for g in Generator.get_machine_generators(machine.name, machine.flake)
|
||||||
if g.name in generators_set
|
if g.name in generators_set
|
||||||
]
|
]
|
||||||
return _generate_vars_for_machine(
|
_generate_vars_for_machine(
|
||||||
machine=machine,
|
machine=machine,
|
||||||
generators=generator_objects,
|
generators=generator_objects,
|
||||||
all_prompt_values=all_prompt_values,
|
all_prompt_values=all_prompt_values,
|
||||||
@@ -552,37 +565,14 @@ def create_machine_vars_interactive(
|
|||||||
generator_name: str | None,
|
generator_name: str | None,
|
||||||
regenerate: bool,
|
regenerate: bool,
|
||||||
no_sandbox: bool = False,
|
no_sandbox: bool = False,
|
||||||
) -> bool:
|
) -> None:
|
||||||
_generator = None
|
generators = get_generators(machine, regenerate, generator_name)
|
||||||
if generator_name:
|
|
||||||
generators = Generator.get_machine_generators(machine.name, machine.flake)
|
|
||||||
for generator in generators:
|
|
||||||
if generator.name == generator_name:
|
|
||||||
_generator = generator
|
|
||||||
break
|
|
||||||
|
|
||||||
pub_healtcheck_msg = machine.public_vars_store.health_check(
|
|
||||||
machine.name, [_generator] if _generator else None
|
|
||||||
)
|
|
||||||
sec_healtcheck_msg = machine.secret_vars_store.health_check(
|
|
||||||
machine.name, [_generator] if _generator else None
|
|
||||||
)
|
|
||||||
|
|
||||||
if pub_healtcheck_msg or sec_healtcheck_msg:
|
|
||||||
msg = f"Health check failed for machine {machine.name}:\n"
|
|
||||||
if pub_healtcheck_msg:
|
|
||||||
msg += f"Public vars store: {pub_healtcheck_msg}\n"
|
|
||||||
if sec_healtcheck_msg:
|
|
||||||
msg += f"Secret vars store: {sec_healtcheck_msg}"
|
|
||||||
raise ClanError(msg)
|
|
||||||
|
|
||||||
generators = _get_closure(machine, generator_name, regenerate)
|
|
||||||
if len(generators) == 0:
|
if len(generators) == 0:
|
||||||
return False
|
return
|
||||||
all_prompt_values = {}
|
all_prompt_values = {}
|
||||||
for generator in generators:
|
for generator in generators:
|
||||||
all_prompt_values[generator.name] = _ask_prompts(generator)
|
all_prompt_values[generator.name] = _ask_prompts(generator)
|
||||||
return _generate_vars_for_machine(
|
_generate_vars_for_machine(
|
||||||
machine,
|
machine,
|
||||||
generators,
|
generators,
|
||||||
all_prompt_values,
|
all_prompt_values,
|
||||||
@@ -596,30 +586,26 @@ def generate_vars(
|
|||||||
regenerate: bool = False,
|
regenerate: bool = False,
|
||||||
no_sandbox: bool = False,
|
no_sandbox: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
was_regenerated = False
|
|
||||||
for machine in machines:
|
for machine in machines:
|
||||||
errors = []
|
errors = []
|
||||||
try:
|
try:
|
||||||
was_regenerated |= create_machine_vars_interactive(
|
create_machine_vars_interactive(
|
||||||
machine,
|
machine,
|
||||||
generator_name,
|
generator_name,
|
||||||
regenerate,
|
regenerate,
|
||||||
no_sandbox=no_sandbox,
|
no_sandbox=no_sandbox,
|
||||||
)
|
)
|
||||||
|
machine.info("All vars are up to date")
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
errors += [(machine, exc)]
|
errors += [(machine, exc)]
|
||||||
if len(errors) == 1:
|
if len(errors) == 1:
|
||||||
raise errors[0][1]
|
raise errors[0][1]
|
||||||
if len(errors) > 1:
|
if len(errors) > 1:
|
||||||
msg = f"Failed to generate facts for {len(errors)} hosts:"
|
msg = f"Failed to generate vars for {len(errors)} hosts:"
|
||||||
for machine, error in errors:
|
for machine, error in errors:
|
||||||
msg += f"\n{machine}: {error}"
|
msg += f"\n{machine}: {error}"
|
||||||
raise ClanError(msg) from errors[0][1]
|
raise ClanError(msg) from errors[0][1]
|
||||||
|
|
||||||
if not was_regenerated and len(machines) > 0:
|
|
||||||
for machine in machines:
|
|
||||||
machine.info("All vars are already up to date")
|
|
||||||
|
|
||||||
|
|
||||||
def generate_command(args: argparse.Namespace) -> None:
|
def generate_command(args: argparse.Namespace) -> None:
|
||||||
flake = require_flake(args.flake)
|
flake = require_flake(args.flake)
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from clan_cli.completions import add_dynamic_completer, complete_machines
|
from clan_cli.completions import add_dynamic_completer, complete_machines
|
||||||
from clan_lib.flake import Flake, require_flake
|
from clan_lib.flake import Flake, require_flake
|
||||||
@@ -20,7 +19,7 @@ def get_machine_vars(base_dir: str, machine_name: str) -> list[Var]:
|
|||||||
|
|
||||||
all_vars = []
|
all_vars = []
|
||||||
|
|
||||||
generators = get_generators(base_dir=Path(base_dir), machine_name=machine_name)
|
generators = get_generators(machine=machine, full_closure=True)
|
||||||
for generator in generators:
|
for generator in generators:
|
||||||
for var in generator.files:
|
for var in generator.files:
|
||||||
if var.secret:
|
if var.secret:
|
||||||
|
|||||||
@@ -6,11 +6,11 @@ from collections.abc import Iterable
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from tempfile import TemporaryDirectory
|
from tempfile import TemporaryDirectory
|
||||||
|
|
||||||
from clan_cli.ssh.upload import upload
|
|
||||||
from clan_cli.vars._types import StoreBase
|
from clan_cli.vars._types import StoreBase
|
||||||
from clan_cli.vars.generate import Generator, Var
|
from clan_cli.vars.generate import Generator, Var
|
||||||
from clan_lib.flake import Flake
|
from clan_lib.flake import Flake
|
||||||
from clan_lib.ssh.host import Host
|
from clan_lib.ssh.host import Host
|
||||||
|
from clan_lib.ssh.upload import upload
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -22,13 +22,13 @@ from clan_cli.secrets.secrets import (
|
|||||||
has_secret,
|
has_secret,
|
||||||
)
|
)
|
||||||
from clan_cli.secrets.sops import load_age_plugins
|
from clan_cli.secrets.sops import load_age_plugins
|
||||||
from clan_cli.ssh.upload import upload
|
|
||||||
from clan_cli.vars._types import StoreBase
|
from clan_cli.vars._types import StoreBase
|
||||||
from clan_cli.vars.generate import Generator
|
from clan_cli.vars.generate import Generator
|
||||||
from clan_cli.vars.var import Var
|
from clan_cli.vars.var import Var
|
||||||
from clan_lib.errors import ClanError
|
from clan_lib.errors import ClanError
|
||||||
from clan_lib.flake import Flake
|
from clan_lib.flake import Flake
|
||||||
from clan_lib.ssh.host import Host
|
from clan_lib.ssh.host import Host
|
||||||
|
from clan_lib.ssh.upload import upload
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -116,8 +116,22 @@ class SecretStore(StoreBase):
|
|||||||
file_name: str | None = None,
|
file_name: str | None = None,
|
||||||
) -> str | None:
|
) -> str | None:
|
||||||
"""
|
"""
|
||||||
Apply local updates to secrets like re-encrypting with missing keys
|
Check if SOPS secrets need to be re-encrypted due to recipient changes.
|
||||||
when new users were added.
|
|
||||||
|
This method verifies that all secrets are properly encrypted with the current
|
||||||
|
set of recipient keys. It detects when new users or machines have been added
|
||||||
|
to the clan but secrets haven't been re-encrypted to grant them access.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
machine: The name of the machine to check secrets for
|
||||||
|
generators: List of generators to check. If None, checks all generators for the machine
|
||||||
|
file_name: Optional specific file to check. If provided, only checks that file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str | None: A message describing which secrets need updating, or None if all secrets are up-to-date
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ClanError: If the specified file_name is not found
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if generators is None:
|
if generators is None:
|
||||||
@@ -315,6 +329,21 @@ class SecretStore(StoreBase):
|
|||||||
generators: list[Generator] | None = None,
|
generators: list[Generator] | None = None,
|
||||||
file_name: str | None = None,
|
file_name: str | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
"""
|
||||||
|
Fix sops secrets by re-encrypting them with the current set of recipient keys.
|
||||||
|
|
||||||
|
This method updates secrets when recipients have changed (e.g., new admin users
|
||||||
|
were added to the clan). It ensures all authorized recipients have access to the
|
||||||
|
secrets and removes access from any removed recipients.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
machine: The name of the machine to fix secrets for
|
||||||
|
generators: List of generators to fix. If None, fixes all generators for the machine
|
||||||
|
file_name: Optional specific file to fix. If provided, only fixes that file
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ClanError: If the specified file_name is not found
|
||||||
|
"""
|
||||||
from clan_cli.secrets.secrets import update_keys
|
from clan_cli.secrets.secrets import update_keys
|
||||||
|
|
||||||
if generators is None:
|
if generators is None:
|
||||||
|
|||||||
@@ -4,12 +4,10 @@ import sys
|
|||||||
import urllib.parse
|
import urllib.parse
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TYPE_CHECKING, Protocol
|
from typing import Protocol
|
||||||
|
|
||||||
from clan_lib.errors import ClanError
|
from clan_lib.errors import ClanError
|
||||||
|
from clan_lib.flake import Flake
|
||||||
if TYPE_CHECKING:
|
|
||||||
from clan_lib.flake import Flake
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ enable_inhibition() {
|
|||||||
disable_inhibition() {
|
disable_inhibition() {
|
||||||
local devices=("$@")
|
local devices=("$@")
|
||||||
local rules_dir="/run/udev/rules.d"
|
local rules_dir="/run/udev/rules.d"
|
||||||
|
|
||||||
for device in "${devices[@]}"; do
|
for device in "${devices[@]}"; do
|
||||||
local devpath="$device"
|
local devpath="$device"
|
||||||
local rule_file="$rules_dir/90-udisks-inhibit-${devpath//\//_}.rules"
|
local rule_file="$rules_dir/90-udisks-inhibit-${devpath//\//_}.rules"
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
"""
|
|
||||||
DEPRECATED:
|
|
||||||
|
|
||||||
Don't use this module anymore
|
|
||||||
|
|
||||||
Instead use:
|
|
||||||
'clan_lib.persist.inventoryStore'
|
|
||||||
|
|
||||||
Which is an abstraction over the inventory
|
|
||||||
|
|
||||||
Interacting with 'clan_lib.inventory' is NOT recommended and will be removed
|
|
||||||
"""
|
|
||||||
@@ -22,7 +22,14 @@ log = logging.getLogger(__name__)
|
|||||||
BuildOn = Literal["auto", "local", "remote"]
|
BuildOn = Literal["auto", "local", "remote"]
|
||||||
|
|
||||||
|
|
||||||
Step = Literal["generators", "upload-secrets", "nixos-anywhere"]
|
Step = Literal[
|
||||||
|
"generators",
|
||||||
|
"upload-secrets",
|
||||||
|
"nixos-anywhere",
|
||||||
|
"formatting",
|
||||||
|
"rebooting",
|
||||||
|
"installing",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def notify_install_step(current: Step) -> None:
|
def notify_install_step(current: Step) -> None:
|
||||||
@@ -180,11 +187,13 @@ def run_machine_install(opts: InstallOptions, target_host: Remote) -> None:
|
|||||||
cmd.append(target_host.target)
|
cmd.append(target_host.target)
|
||||||
if target_host.socks_port:
|
if target_host.socks_port:
|
||||||
# nix copy does not support socks5 proxy, use wrapper command
|
# nix copy does not support socks5 proxy, use wrapper command
|
||||||
wrapper_cmd = target_host.socks_wrapper or ["torify"]
|
wrapper = target_host.socks_wrapper
|
||||||
|
wrapper_cmd = wrapper.cmd if wrapper else []
|
||||||
|
wrapper_packages = wrapper.packages if wrapper else []
|
||||||
cmd = nix_shell(
|
cmd = nix_shell(
|
||||||
[
|
[
|
||||||
"nixos-anywhere",
|
"nixos-anywhere",
|
||||||
*wrapper_cmd,
|
*wrapper_packages,
|
||||||
],
|
],
|
||||||
[*wrapper_cmd, *cmd],
|
[*wrapper_cmd, *cmd],
|
||||||
)
|
)
|
||||||
@@ -195,4 +204,22 @@ def run_machine_install(opts: InstallOptions, target_host: Remote) -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
notify_install_step("nixos-anywhere")
|
notify_install_step("nixos-anywhere")
|
||||||
run(cmd, RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True))
|
run(
|
||||||
|
[*cmd, "--phases", "kexec"],
|
||||||
|
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||||
|
)
|
||||||
|
notify_install_step("formatting")
|
||||||
|
run(
|
||||||
|
[*cmd, "--phases", "disko"],
|
||||||
|
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||||
|
)
|
||||||
|
notify_install_step("installing")
|
||||||
|
run(
|
||||||
|
[*cmd, "--phases", "install"],
|
||||||
|
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||||
|
)
|
||||||
|
notify_install_step("rebooting")
|
||||||
|
run(
|
||||||
|
[*cmd, "--phases", "reboot"],
|
||||||
|
RunOpts(log=Log.BOTH, prefix=machine.name, needs_user_terminal=True),
|
||||||
|
)
|
||||||
|
|||||||
@@ -3,23 +3,19 @@ import logging
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from functools import cached_property
|
from functools import cached_property
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TYPE_CHECKING, Any, Literal
|
from typing import Any, Literal
|
||||||
|
|
||||||
from clan_cli.facts import public_modules as facts_public_modules
|
from clan_cli.facts import public_modules as facts_public_modules
|
||||||
from clan_cli.facts import secret_modules as facts_secret_modules
|
from clan_cli.facts import secret_modules as facts_secret_modules
|
||||||
from clan_cli.vars._types import StoreBase
|
from clan_cli.vars._types import StoreBase
|
||||||
|
|
||||||
from clan_lib.api import API
|
from clan_lib.api import API
|
||||||
from clan_lib.errors import ClanError
|
|
||||||
from clan_lib.flake import ClanSelectError, Flake
|
from clan_lib.flake import ClanSelectError, Flake
|
||||||
from clan_lib.nix_models.clan import InventoryMachine
|
from clan_lib.nix_models.clan import InventoryMachine
|
||||||
from clan_lib.ssh.remote import Remote
|
from clan_lib.ssh.remote import Remote
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
@dataclass(frozen=True)
|
||||||
class Machine:
|
class Machine:
|
||||||
@@ -125,15 +121,10 @@ class Machine:
|
|||||||
return self.flake.path
|
return self.flake.path
|
||||||
|
|
||||||
def target_host(self) -> Remote:
|
def target_host(self) -> Remote:
|
||||||
remote = get_machine_host(self.name, self.flake, field="targetHost")
|
from clan_lib.network.network import get_best_remote
|
||||||
if remote is None:
|
|
||||||
msg = f"'targetHost' is not set for machine '{self.name}'"
|
with get_best_remote(self) as remote:
|
||||||
raise ClanError(
|
return remote
|
||||||
msg,
|
|
||||||
description="See https://docs.clan.lol/guides/getting-started/update/#setting-the-target-host for more information.",
|
|
||||||
)
|
|
||||||
data = remote.data
|
|
||||||
return data
|
|
||||||
|
|
||||||
def build_host(self) -> Remote | None:
|
def build_host(self) -> Remote | None:
|
||||||
"""
|
"""
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user