Compare commits

...

36 Commits

Author SHA1 Message Date
Johannes Kirschbauer
fea4c2dc48 vm_manager: make timesink happy 2025-08-13 20:17:37 +02:00
Johannes Kirschbauer
80bbc6d7a3 cli/clan-inspect: remove unsused command 2025-08-13 19:47:38 +02:00
clan-bot
765bdb262a Merge pull request 'Update clan-core-for-checks in devFlake' (#4731) from update-devFlake-clan-core-for-checks into main 2025-08-13 15:22:38 +00:00
gitea-actions[bot]
05c00fbe82 Update clan-core-for-checks in devFlake 2025-08-13 15:01:35 +00:00
clan-bot
7e97734797 Merge pull request 'Update clan-core-for-checks in devFlake' (#4727) from update-devFlake-clan-core-for-checks into main 2025-08-13 13:57:32 +00:00
gitea-actions[bot]
6384c4654e Update clan-core-for-checks in devFlake 2025-08-13 13:54:09 +00:00
DavHau
72d3ad09a4 vars: refactor - pass Machine objects to run_generators 2025-08-13 12:45:47 +00:00
DavHau
a535450ec0 vars: refactor - unify get_generators and _get_closure 2025-08-13 12:45:47 +00:00
Mic92
aaeb616f82 Merge pull request 'Drop update-private-flake-inputs ci action' (#4730) from init-wireguard-service into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/4730
2025-08-13 12:42:59 +00:00
Jörg Thalheim
434edeaae1 drop update-private-flake-inputs 2025-08-13 14:35:43 +02:00
Mic92
a4efd3cb16 Merge pull request 'update-sops-nix2' (#4719) from update-sops-nix2 into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/4719
2025-08-13 12:34:37 +00:00
Jörg Thalheim
13131ccd6e docs/wireguard: put requirements at the top 2025-08-13 14:34:15 +02:00
hsjobeki
3a8309b01f Merge pull request 'UI/install: add loading animation' (#4723) from install-ui into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/4723
2025-08-13 12:19:23 +00:00
Johannes Kirschbauer
10065a7c8f UI/install: add loading to button 2025-08-13 14:15:52 +02:00
Johannes Kirschbauer
176b54e29d UI/Button: move state out of the button 2025-08-13 14:15:29 +02:00
Jörg Thalheim
be048d8307 morph/flash: use patched clan-core-for-checks
the other one doesn't override flake.lock
2025-08-13 11:41:09 +00:00
gitea-actions[bot]
52fcab30e7 Update sops-nix 2025-08-13 11:41:09 +00:00
Mic92
d3b423328f Merge pull request 'Add wireguard service module' (#3354) from init-wireguard-service into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3354
2025-08-13 10:55:48 +00:00
Jörg Thalheim
1177e84dcc vars/generate: print the files that were found when files are missing
this helps fixing typos in the generator scripts
2025-08-13 12:29:52 +02:00
pinpox
414952dfa3 Add wireguard service module 2025-08-13 12:29:52 +02:00
DavHau
24194011ac vars: refactor - remove unnecessary return values
The boolean return value signaling if anything was ran or not isn't that useful. We are not doing anything with it.
2025-08-13 12:54:05 +07:00
DavHau
4f78a8ff94 Merge pull request 'networking_3' (#4507) from networking_3 into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/4507
2025-08-13 05:20:03 +00:00
DavHau
068b5d4c1e install: fix error message when target host not specified 2025-08-13 12:04:14 +07:00
DavHau
adccef4757 install: fix torify package not available 2025-08-13 12:04:14 +07:00
Qubasa
980d94d47d clan_cli: Improve cli message if no networks present 2025-08-13 12:04:14 +07:00
lassulus
a50b25eea2 clan-cli network: refactor, use new networking in ssh and install commands 2025-08-13 12:04:14 +07:00
lassulus
017989841d refactor: remove DeployInfo class and use Network/Remote directly
- Remove DeployInfo class entirely, replacing with direct Remote usage
- Update parse_qr_json_to_networks to return dict with network and remote
- Refactor all code to work with Remote lists instead of DeployInfo
- Add get_remote_for_machine context manager for network connections
- Update tests to use new Network/Remote structure
2025-08-13 12:04:14 +07:00
lassulus
c14a5fcc69 refactor: move ssh/upload.py from cli to lib
Move the upload module to clan_lib to better organize SSH-related
utilities. Updated all imports across the codebase.
2025-08-13 12:04:14 +07:00
clan-bot
4f60345ba7 Merge pull request 'Update clan-core-for-checks in devFlake' (#4726) from update-devFlake-clan-core-for-checks into main 2025-08-13 00:21:42 +00:00
gitea-actions[bot]
ece48d3b5f Update clan-core-for-checks in devFlake 2025-08-13 00:01:32 +00:00
clan-bot
4eea8d24f0 Merge pull request 'Update clan-core-for-checks in devFlake' (#4725) from update-devFlake-clan-core-for-checks into main 2025-08-12 20:26:23 +00:00
gitea-actions[bot]
49099df3fb Update clan-core-for-checks in devFlake 2025-08-12 20:01:32 +00:00
Johannes Kirschbauer
62ccba9fb5 ui/install: test connection 2025-08-12 21:04:18 +02:00
Johannes Kirschbauer
0b44770f1f UI/install: add loading animation 2025-08-12 20:45:55 +02:00
Jörg Thalheim
88871bea69 clan_lib/flash: remove trailing whitespace 2025-08-12 17:14:52 +02:00
Jörg Thalheim
1006fc755e clanTest/vars-executor: add debugging to finalScript 2025-08-12 12:38:47 +02:00
95 changed files with 1858 additions and 706 deletions

View File

@@ -19,8 +19,7 @@ jobs:
uses: Mic92/update-flake-inputs-gitea@main uses: Mic92/update-flake-inputs-gitea@main
with: with:
# Exclude private flakes and update-clan-core checks flake # Exclude private flakes and update-clan-core checks flake
exclude-patterns: "checks/impure/flake.nix"
exclude-patterns: "devFlake/private/flake.nix,checks/impure/flake.nix"
auto-merge: true auto-merge: true
gitea-token: ${{ secrets.CI_BOT_TOKEN }} gitea-token: ${{ secrets.CI_BOT_TOKEN }}
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }} github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}

View File

@@ -1,40 +0,0 @@
name: "Update private flake inputs"
on:
repository_dispatch:
workflow_dispatch:
schedule:
- cron: "0 3 * * *" # Run daily at 3 AM
jobs:
update-private-flake:
runs-on: nix
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Update private flake inputs
run: |
# Update the private flake lock file
cd devFlake/private
nix flake update
cd ../..
# Update the narHash
bash ./devFlake/update-private-narhash
- name: Create pull request
env:
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
run: |
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
# Check if there are any changes
if ! git diff --quiet; then
git add devFlake/private/flake.lock devFlake/private.narHash
git commit -m "Update dev flake"
# Use shared PR creation script
export PR_BRANCH="update-dev-flake"
export PR_TITLE="Update dev flake"
export PR_BODY="This PR updates the dev flake inputs and corresponding narHash."
else
echo "No changes detected in dev flake inputs"
fi

View File

@@ -104,6 +104,7 @@ in
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs; nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
service-dummy-test = import ./service-dummy-test nixosTestArgs; service-dummy-test = import ./service-dummy-test nixosTestArgs;
wireguard = import ./wireguard nixosTestArgs;
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs; service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
}; };

View File

@@ -2,7 +2,6 @@
config, config,
self, self,
lib, lib,
privateInputs,
... ...
}: }:
{ {
@@ -85,7 +84,7 @@
# Some distros like to automount disks with spaces # Some distros like to automount disks with spaces
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"') machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
machine.succeed("clan flash write --debug --flake ${privateInputs.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}") machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
''; '';
} { inherit pkgs self; }; } { inherit pkgs self; };
}; };

View File

@@ -208,7 +208,7 @@
# Prepare test flake and Nix store # Prepare test flake and Nix store
flake_dir = prepare_test_flake( flake_dir = prepare_test_flake(
temp_dir, temp_dir,
"${privateInputs.clan-core-for-checks}", "${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}" "${closureInfo}"
) )
@@ -272,7 +272,7 @@
# Prepare test flake and Nix store # Prepare test flake and Nix store
flake_dir = prepare_test_flake( flake_dir = prepare_test_flake(
temp_dir, temp_dir,
"${privateInputs.clan-core-for-checks}", "${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}" "${closureInfo}"
) )

View File

@@ -1,6 +1,5 @@
{ {
self, self,
privateInputs,
... ...
}: }:
{ {
@@ -55,7 +54,7 @@
testScript = '' testScript = ''
start_all() start_all()
actual.fail("cat /etc/testfile") actual.fail("cat /etc/testfile")
actual.succeed("env CLAN_DIR=${privateInputs.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine") actual.succeed("env CLAN_DIR=${self.checks.x86_64-linux.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
assert actual.succeed("cat /etc/testfile") == "morphed" assert actual.succeed("cat /etc/testfile") == "morphed"
''; '';
} { inherit pkgs self; }; } { inherit pkgs self; };

View File

@@ -0,0 +1,115 @@
{
pkgs,
nixosLib,
clan-core,
lib,
...
}:
nixosLib.runTest (
{ ... }:
let
machines = [
"controller1"
"controller2"
"peer1"
"peer2"
"peer3"
];
in
{
imports = [
clan-core.modules.nixosTest.clanTest
];
hostPkgs = pkgs;
name = "wireguard";
clan = {
directory = ./.;
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
inventory = {
machines = lib.genAttrs machines (_: { });
instances = {
/*
wg-test-one
controller2 controller1
peer2 peer1 peer3
*/
wg-test-one = {
module.name = "@clan/wireguard";
module.input = "self";
roles.controller.machines."controller1".settings = {
endpoint = "192.168.1.1";
};
roles.controller.machines."controller2".settings = {
endpoint = "192.168.1.2";
};
roles.peer.machines = {
peer1.settings.controller = "controller1";
peer2.settings.controller = "controller2";
peer3.settings.controller = "controller1";
};
};
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
#wg-test-two = {
# module.name = "@clan/wireguard";
# roles.controller.machines."controller1".settings = {
# endpoint = "192.168.1.1";
# port = 51922;
# };
# roles.peer.machines = {
# peer1 = { };
# };
#};
};
};
};
testScript = ''
start_all()
# Show all addresses
machines = [peer1, peer2, peer3, controller1, controller2]
for m in machines:
m.systemctl("start network-online.target")
for m in machines:
m.wait_for_unit("network-online.target")
m.wait_for_unit("systemd-networkd.service")
print("\n\n" + "="*60)
print("STARTING PING TESTS")
print("="*60)
for m1 in machines:
for m2 in machines:
if m1 != m2:
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
'';
}
)

View File

@@ -0,0 +1,6 @@
[
{
"publickey": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
"type": "age"
}
]

View File

@@ -0,0 +1,6 @@
[
{
"publickey": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
"type": "age"
}
]

View File

@@ -0,0 +1,6 @@
[
{
"publickey": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
"type": "age"
}
]

View File

@@ -0,0 +1,6 @@
[
{
"publickey": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
"type": "age"
}
]

View File

@@ -0,0 +1,6 @@
[
{
"publickey": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
"type": "age"
}
]

View File

@@ -0,0 +1,15 @@
{
"data": "ENC[AES256_GCM,data:zDF0RiBqaawpg+GaFkuLPomJ01Xu+lgY5JfUzaIk2j03XkCzIf8EMrmn6pRtBP3iUjPBm+gQSTQk6GHTONrixA5hRNyETV+UgQw=,iv:zUUCAGZ0cz4Tc2t/HOjVYNsdnrAOtid/Ns5ak7rnyCk=,tag:z43WtNSue4Ddf7AVu21IKA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlY1NEdjAzQm5RMFZWY3BJ\nclp6c01FdlZFK3dOSDB4cHc1NTdwMXErMFJFCnIrRVFNZEFYOG1rVUhFd2xsbTJ2\nVkJHNmdOWXlOcHJoQ0QzM1VyZmxmcGcKLS0tIFk1cEx4dFdvNGRwK1FWdDZsb1lR\nV2d1RFZtNzZqVFdtQ1FzNStEcEgyUUkKx8tkxqJz/Ko3xgvhvd6IYiV/lRGmrY13\nUZpYWR9tsQwZAR9dLjCyVU3JRuXeGB1unXC1CO0Ff3R0A/PuuRHh+g==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:37Z",
"mac": "ENC[AES256_GCM,data:8RGOUhZ2LGmC9ugULwHDgdMrtdo9vzBm3BJmL4XTuNJKm0NlKfgNLi1E4n9DMQ+kD4hKvcwbiUcwSGE8jZD6sm7Sh3bJi/HZCoiWm/O/OIzstli2NNDBGvQBgyWZA5H+kDjZ6aEi6icNWIlm5gsty7KduABnf5B3p0Bn5Uf5Bio=,iv:sGZp0XF+mgocVzAfHF8ATdlSE/5zyz5WUSRMJqNeDQs=,tag:ymYVBRwF5BOSAu5ONU2qKw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../users/admin

View File

@@ -0,0 +1,15 @@
{
"data": "ENC[AES256_GCM,data:dHM7zWzqnC1QLRKYpbI2t63kOFnSaQy6ur9zlkLQf17Q03CNrqUsZtdEbwMnLR3llu7eVMhtvVRkXjEkvn3leb9HsNFmtk/DP70=,iv:roEZsBFqRypM106O5sehTzo7SySOJUJgAR738rTtOo8=,tag:VDd9/6uU0SAM7pWRLIUhUQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKTEVYUmVGbUtOcHZ4cnc3\nKzNETnlxaVRKYTI3eWVHdEoyc3l2SnhsZ1J3CnB2RnZrOXM5Uml6TThDUlZjY25J\nbkJ6eUZ2ckN1NWpNUU9IaE93UDJQdlEKLS0tIC95ZDhkU0R1VHhCdldxdW4zSmps\nN3NqL1cvd05hRTRPdDA3R2pzNUFFajgKS+DJH14fH9AvEAa3PoUC1jEqKAzTmExN\nl32FeHTHbGMo1PKeaFm+Eg0WSpAmFE7beBunc5B73SW30ok6x4FcQw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:47Z",
"mac": "ENC[AES256_GCM,data:77EnuBQyguvkCtobUg8/6zoLHjmeGDrSBZuIXOZBMxdbJjzhRg++qxQjuu6t0FoWATtz7u4Y3/jzUMGffr/N5HegqSq0D2bhv7AqJwBiVaOwd80fRTtM+YiP/zXsCk52Pj/Gadapg208bDPQ1BBDOyz/DrqZ7w//j+ARJjAnugI=,iv:IuTDmJKZEuHXJXjxrBw0gP2t6vpxAYEqbtpnVbavVCY=,tag:4EnpX6rOamtg1O+AaEQahQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../users/admin

View File

@@ -0,0 +1,15 @@
{
"data": "ENC[AES256_GCM,data:wcSsqxTKiMAnzPwxs5DNjcSdLyjVQ9UOrZxfSbOkVfniwx6F7xz6dLNhaDq7MHQ0vRWpg28yNs7NHrp52bYFnb/+eZsis46WiCw=,iv:B4t1lvS2gC601MtsmZfEiEulLWvSGei3/LSajwFS9Vs=,tag:hnRXlZyYEFfLJUrw1SqbSQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAybUgya2VEdzMvRG1hdkpu\nM2pGNmcyVmcvYVZ1ZjJlY3A1bXFUUUtkMTI0CmJoRFZmejZjN2UxUXNuc1k5WnE2\nNmxIcnpNQ1lJZ3ZKSmhtSlVURXJTSUUKLS0tIGU4Wi9yZ3VYekJkVW9pNWFHblFk\na0gzbTVKUWdSam1sVjRUaUlTdVd5YWMKntRc9yb9VPOTMibp8QM5m57DilP01N/X\nPTQaw8oI40znnHdctTZz7S+W/3Te6sRnkOhFyalWmsKY0CWg/FELlA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:58Z",
"mac": "ENC[AES256_GCM,data:8nq+ugkUJxE24lUIySySs/cAF8vnfqr936L/5F0O1QFwNrbpPmKRXkuwa6u0V+187L2952Id20Fym4ke59f3fJJsF840NCKDwDDZhBZ20q9GfOqIKImEom/Nzw6D0WXQLUT3w8EMyJ/F+UaJxnBNPR6f6+Kx4YgStYzCcA6Ahzg=,iv:VBPktEz7qwWBBnXE+xOP/EUVy7/AmNCHPoK56Yt/ZNc=,tag:qXONwOLFAlopymBEf5p4Sw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../users/admin

View File

@@ -0,0 +1,15 @@
{
"data": "ENC[AES256_GCM,data:4d3ri0EsDmWRtA8vzvpPRLMsSp4MIMKwvtn0n0pRY05uBPXs3KcjnweMPIeTE1nIhqnMR2o2MfLah5TCPpaFax9+wxIt74uacbg=,iv:0LBAldTC/hN4QLCxgXTl6d9UB8WmUTnj4sD2zHQuG2w=,tag:zr/RhG/AU4g9xj9l2BprKw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvV0JnZDhlU1piU1g2cng0\ncytKOEZ6WlZlNGRGUjV3MmVMd2Nzc0ZwelgwCjBGdThCUGlXbVFYdnNoZWpJZ3Vm\nc2xkRXhxS09vdzltSVoxLzhFSVduak0KLS0tIE5DRjJ6cGxiVlB1eElHWXhxN1pJ\nYWtIMDMvb0Z6akJjUzlqeEFsNHkxL2cKpghv/QegnXimeqd9OPFouGM//jYvoVmw\n2d4mLT2JSMkEhpfGcqb6vswhdJfCiKuqr2B4bqwAnPMaykhsm8DFRQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:20:08Z",
"mac": "ENC[AES256_GCM,data:BzlQVAJ7HzcxNPKB3JhabqRX/uU0EElj172YecjmOflHnzz/s9xgfdAfJK/c53hXlX4LtGPnubH7a8jOolRq98zmZeBYE27+WLs2aN7Ufld6mYk90/i7u4CqR+Fh2Kfht04SlUJCjnS5A9bTPwU9XGRHJ0BiOhzTuSMUJTRaPRM=,iv:L50K5zc1o99Ix9nP0pb9PRH+VIN2yvq7JqKeVHxVXmc=,tag:XFLkSCsdbTPxbasDYYxcFQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../users/admin

View File

@@ -0,0 +1,15 @@
{
"data": "ENC[AES256_GCM,data:qfLm6+g1vYnESCik9uyBeKsY6Ju2Gq3arnn2I8HHNO67Ri5BWbOQTvtz7WT8/q94RwVjv8SGeJ/fsJSpwLSrJSbqTZCPAnYwzzQ=,iv:PnA9Ao8RRELNhNQYbaorstc0KaIXRU7h3+lgDCXZFHk=,tag:VeLgYQYwqthYihIoQTwYiA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBNWVVQaDJFd0N3WHptRC9Z\nZTgxTWh5bnU1SkpqRWRXZnhPaFhpSVJmVEhrCjFvdHFYenNWaFNrdXlha09iS2xj\nOTZDcUNkcHkvTDUwNjM4Z3gxUkxreUEKLS0tIE5oY3Q2bWhsb2FSQTVGTWVSclJw\nWllrelRwT3duYjJJbTV0d3FwU1VuNlkK2eN3fHFX/sVUWom8TeZC9fddqnSCsC1+\nJRCZsG46uHDxqLcKIfdFWh++2t16XupQYk3kn+NUR/aMc3fR32Uwjw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:20:18Z",
"mac": "ENC[AES256_GCM,data:nUwsPcP1bsDjAHFjQ1NlVkTwyZY4B+BpzNkMx9gl0rE14j425HVLtlhlLndhRp+XMpnDldQppLAAtSdzMsrw8r5efNgTRl7cu4Fy/b9cHt84k7m0aou5lrGus9SV1bM7/fzC9Xm7CSXBcRzyDGVsKC6UBl1rx+ybh7HyAN05XSo=,iv:It57H+zUUNPkoN1D8sYwyZx5zIFIga7mydhGUHYBCGE=,tag:mBQdYqUpjPknbYa13qESyw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../users/admin

View File

@@ -0,0 +1,4 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -0,0 +1 @@
../../../../../../sops/machines/controller1

View File

@@ -0,0 +1,19 @@
{
"data": "ENC[AES256_GCM,data:noe913+28JWkoDkGGMu++cc1+j5NPDoyIhWixdsowoiVO3cTWGkZ88SUGO5D,iv:ynYMljwqMcBdk8RpVcw/2Jflg2RCF28r4fKUgIAF8B4=,tag:+TsXDJgfUhKgg4iQVXKKlQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBhYVRReTZBQ05GYmVBVjhS\nNXM5aFlhVzZRaVl6UHl6S3JnMC9Sb1dwZ1ZjCmVuS2dEVExYZWROVklUZWFCSnM2\nZnlxbVNseTM2c0Q0TjhsT3NzYmtqREUKLS0tIHBRTFpvVGt6d1cxZ2lFclRsUVhZ\nZDlWaG9PcXVrNUZKaEgxWndjUDVpYjgKt0eOhAgcYdkg9JSEakx4FjChLTn3pis+\njOkuGd4JfXMKcwC7vJV5ygQBxzVJSBw+RucP7sYCBPK0m8Voj94ntw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6MFJqNHNraG9DSnJZMFdz\ndU8zVXNTamxROFd1dWtuK2RiekhPdHhleVhFCi8zNWJDNXJMRUlDdjc4Q0UycTIz\nSGFGSmdnNU0wZWlDaTEwTzBqWjh6SFkKLS0tIEJOdjhOMDY2TUFLb3RPczNvMERx\nYkpSeW5VOXZvMlEvdm53MDE3aUFTNjgKyelSTjrTIR9I3rJd3krvzpsrKF1uGs4J\n4MtmQj0/3G+zPYZVBx7b3HF6B3f1Z7LYh05+z7nCnN/duXyPnDjNcg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:37Z",
"mac": "ENC[AES256_GCM,data:+DmIkPG/H6tCtf8CvB98E1QFXv08QfTcCB3CRsi+XWnIRBkryRd/Au9JahViHMdK7MED8WNf84NWTjY2yH4y824/DjI8XXNMF1iVMo0CqY42xbVHtUuhXrYeT+c8CyEw+M6zfy1jC0+Bm3WQWgagz1G6A9SZk3D2ycu0N08+axA=,iv:kwBjTYebIy5i2hagAajSwwuKnSkrM9GyrnbeQXB2e/w=,tag:EgKJ5gVGYj1NGFUduxLGfg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../../../../sops/users/admin

View File

@@ -0,0 +1 @@
lQfR7GhivN87XoXruTGOPjVPhNu1Brt//wyc3pdwE20=

View File

@@ -0,0 +1 @@
7470bb5c79df224a9b7f5a2259acd2e46db763c27e24cb3416c8b591cb328077

View File

@@ -0,0 +1 @@
fd51:19c1:3b:f700

View File

@@ -0,0 +1 @@
../../../../../../sops/machines/controller2

View File

@@ -0,0 +1,19 @@
{
"data": "ENC[AES256_GCM,data:2kehACgvNgoYGPwnW7p86BR0yUu689Chth6qZf9zoJtuTY9ATS68dxDyBc5S,iv:qb2iDUtExegTeN3jt6SA8RnU61W5GDDhn56QXiQT4gw=,tag:pSGPICX5p6qlZ1WMVoIEYQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSTTR5TDY4RE9VYmlCK1dL\nWkVRcVZqVDlsbmQvUlJmdzF2b1Z1S0k3NngwCkFWNzRVaERtSmFsd0o2aFJOb0ZX\nSU9yUnVaNi9IUjJWeGRFcEpDUXo5WkEKLS0tIEczNkxiYnJsTWRoLzFhQVF1M21n\nWnZEdGV1N2N5d1FZQkJUQ1IrdGFLblkKPTpha2bxS8CCAMXWTDKX/WOcdvggaP3Y\nqewyahDNzb4ggP+LNKp55BtwFjdvoPoq4BpYOOgMRbQMMk+H1o9WFw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYcEZ6Tzk3M0pkV0tOdTBj\nenF2a0tHNnhBa0NrazMwV1VBbXBZR3pzSHpvCnBZOEU0VlFHS1FHcVpTTDdPczVV\nV0RFSlZ0VmIzWGoydEdKVXlIUE9OOEkKLS0tIFZ0cWVBR1loeVlWa2c4U3oweXE2\ncm1ja0JCS3U5Nk41dlAzV2NabDc2bDQKdgCDNnpRZlFPnEGlX6fo0SQX4yOB+E6r\ntnSwofR3xxZvkyme/6JJU5qBZXyCXEAhKMRkFyvJANXzMJAUo/Osow==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:48Z",
"mac": "ENC[AES256_GCM,data:e3EkL8vwRhLsec83Zi9DE3PKT+4RwgiffpN4QHcJKTgmDW6hzizWc5kAxbNWGJ9Qqe6sso2KY7tc+hg1lHEsmzjCbg153p8h+7lVI2XT6adi/CS8WZ2VpeL+0X9zDQCjqHmrESZAYFBdkLqO4jucdf0Pc3CKKD+N3BDDTwSUvHM=,iv:xvR7dJL8sdYen00ovrYT8PNxhB9XxSWDSRz1IK23I/o=,tag:OyhAvllBgfAp3eGeNpR/Nw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../../../../sops/users/admin

View File

@@ -0,0 +1 @@
5Z7gbLFbXpEFfomW2pKyZBpZN5xvUtiqrIL0GVfNtQ8=

View File

@@ -0,0 +1 @@
c3672fdb9fb31ddaf6572fc813cf7a8fe50488ef4e9d534c62d4f29da60a1a99

View File

@@ -0,0 +1 @@
fd51:19c1:c1:aa00

View File

@@ -0,0 +1 @@
../../../../../../sops/machines/peer1

View File

@@ -0,0 +1,19 @@
{
"data": "ENC[AES256_GCM,data:b+akw85T3D9xc75CPLHucR//k7inpxKDvgpR8tCNKwNDRVjVHjcABhfZNLXW,iv:g11fZE8UI0MVh9GKdjR6leBlxa4wN7ZubozXG/VlBbw=,tag:0YkzWCW3zJ3Mt3br/jmTYw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXWkJUR0pIa2xOSEw2dThm\nYlNuOHZCVW93Wkc5LzE4YmpUTHRkZlk3ckc4CnN4M3ZRMWNFVitCT3FyWkxaR0di\nb0NmSXFhRHJmTWg0d05OcWx1LytscEEKLS0tIEtleTFqU3JrRjVsdHpJeTNuVUhF\nWEtnOVlXVXRFamFSak5ia2F2b0JiTzAKlhOBZvZ4AN+QqAYQXvd6YNmgVS4gtkWT\nbV3bLNTgwtrDtet9NDHM8vdF+cn5RZxwFfgmTbDEow6Zm8EXfpxj/g==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6YVYyQkZqMTJYQTlyRG5Y\nbnJ2UkE1TS9FZkpSa2tQbk1hQjViMi9OcGk0CjFaZUdjU3JtNzh0bDFXdTdUVW4x\nanFqZHZjZjdzKzA2MC8vTWh3Uy82UGcKLS0tIDhyOFl3UGs3czdoMlpza3UvMlB1\nSE90MnpGc05sSCtmVWg0UVNVdmRvN2MKHlCr4U+7bsoYb+2fgT4mEseZCEjxrtLu\n55sR/4YH0vqMnIBnLTSA0e+WMrs3tQfseeJM5jY/ZNnpec1LbxkGTg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:19:58Z",
"mac": "ENC[AES256_GCM,data:gEoEC9D2Z7k5F8egaY1qPXT5/96FFVsyofSBivQ28Ir/9xHX2j40PAQrYRJUWsk/GAUMOyi52Wm7kPuacw+bBcdtQ0+MCDEmjkEnh1V83eZ/baey7iMmg05uO92MYY5o4e7ZkwzXoAeMCMcfO0GqjNvsYJHF1pSNa+UNDj+eflw=,iv:dnIYpvhAdvUDe9md53ll42krb0sxcHy/toqGc7JFxNA=,tag:0WkZU7GeKMD1DQTYaI+1dg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../../../../sops/users/admin

View File

@@ -0,0 +1 @@
juK7P/92N2t2t680aLIRobHc3ts49CsZBvfZOyIKpUc=

View File

@@ -0,0 +1 @@
b36142569a74a0de0f9b229f2a040ae33a22d53bef5e62aa6939912d0cda05ba

View File

@@ -0,0 +1 @@
6987:50a0:9b93:4337

View File

@@ -0,0 +1 @@
../../../../../../sops/machines/peer2

View File

@@ -0,0 +1,19 @@
{
"data": "ENC[AES256_GCM,data:apX2sLwtq6iQgLJslFwiRMNBUe0XLzLQbhKfmb2pKiJG7jGNHUgHJz3Ls4Ca,iv:HTDatm3iD5wACTkkd3LdRNvJfnfg75RMtn9G6Q7Fqd4=,tag:Mfehlljnes5CFD1NJdk27A==,type:str]",
"sops": {
"age": [
{
"recipient": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVZzFyMUZsd2V2VWxOUmhP\nZE8yZTc4Q0RkZisxR25NemR1TzVDWmJZVjBVClA1MWhsU0xzSG16aUx3cWFWKzlG\nSkxrT09OTkVqLzlWejVESE1QWHVJaFkKLS0tIGxlaGVuWU43RXErNTB3c3FaUnM3\nT0N5M253anZkbnFkZWw2VHA0eWhxQW8Kd1PMtEX1h0Hd3fDLMi++gKJkzPi9FXUm\n+uYhx+pb+pJM+iLkPwP/q6AWC7T0T4bHfekkdzxrbsKMi73x/GrOiw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqVzRIMWdlNjVwTURyMFkv\nSUhiajZkZVNuWklRYit6cno4UzNDa2szOFN3CkQ2TWhHb25pbmR1MlBsRXNLL2lx\ncVZ3c3BsWXN2aS9UUVYvN3I4S0xUSmMKLS0tIE5FV0U5aXVUZk9XL0U0Z2ZSNGd5\nbU9zY3IvMlpSNVFLYkRNQUpUYVZOWFUK7j4Otzb8CJTcT7aAj9/irxHEDXh1HkTg\nzz7Ho8/ZncNtaCVHlHxjTgVW9d5aIx8fSsV9LRCFwHMtNzvwj1Nshg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:20:08Z",
"mac": "ENC[AES256_GCM,data:e7WNVEz78noHBiz6S3A6qNfop+yBXB3rYN0k4GvaQKz3b99naEHuqIF8Smzzt4XrbbiPKu2iLa5ddLBlqqsi32UQUB8JS9TY7hvW8ol+jpn0VxusGCXW9ThdDEsM/hXiPyr331C73zTvbOYI1hmcGMlJL9cunVRO9rkMtEqhEfo=,iv:6zt7wjIs1y5xDHNK+yLOwoOuUpY7/dOGJGT6UWAFeOg=,tag:gzFTgoxhoLzUV0lvzOhhfg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../../../../sops/users/admin

View File

@@ -0,0 +1 @@
XI9uSaQRDBCb82cMnGzGJcbqRfDG/IXZobyeL+kV03k=

View File

@@ -0,0 +1 @@
360f9fce4a984eb87ce2a673eb5341ecb89c0f62126548d45ef25ff5243dd646

View File

@@ -0,0 +1 @@
3b21:3ced:003e:89b3

View File

@@ -0,0 +1 @@
../../../../../../sops/machines/peer3

View File

@@ -0,0 +1,19 @@
{
"data": "ENC[AES256_GCM,data:Gluvjes/3oH5YsDq00JDJyJgoEFcj56smioMArPSt309MDGExYX2QsCzeO1q,iv:oBBJRDdTj/1dWEvzhdFKQ2WfeCKyavKMLmnMbqnU5PM=,tag:2WNFxKz2dWyVcybpm5N4iw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtQWpjRmhZTFdPa2VSZkFN\nbUczMlY5bDBmMTdoMy8xcWxMaXpWVitMZGdjCnRWb2Y3eGpHU1hmNHRJVFBqbU5w\nVEZGdUIrQXk0U0dUUEZ6bE5EMFpTRHMKLS0tIGpYSmZmQThJUTlvTHpjc05ZVlM4\nQWhTOWxnUHZnYlJ3czE3ZUJ0L3ozWTQK3a7N0Zpzo4sUezYveqvKR49RUdJL23eD\n+cK5lk2xbtj+YHkeG+dg7UlHfDaicj0wnFH1KLuWmNd1ONa6eQp3BQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3a2FOWlVsSkdnendrYmUz\ndEpuL1hZSWNFTUtDYm14S3V1aW9KS3hsazJRCkp2SkFFbi9hbGJpNks1MlNTL0s5\nTk5pcUMxaEJobkcvWmRGeU9jMkdNdzAKLS0tIDR6M0Y5eE1ETHJJejAzVW1EYy9v\nZCtPWHJPUkhuWnRzSGhMUUtTa280UmMKXvtnxyop7PmRvTOFkV80LziDjhGh93Pf\nYwhD/ByD/vMmr21Fd6PVHOX70FFT30BdnMc1/wt7c/0iAw4w4GoQsA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-08-13T09:20:18Z",
"mac": "ENC[AES256_GCM,data:3nXMTma0UYXCco+EM8UW45cth7DVMboFBKyesL86GmaG6OlTkA2/25AeDrtSVO13a5c2jC6yNFK5dE6pSe5R9f0BoDF7d41mgc85zyn+LGECNWKC6hy6gADNSDD6RRuV1S3FisFQl1F1LD8LiSWmg/XNMZzChNlHYsCS8M+I84g=,iv:pu5VVXAVPmVoXy0BJ+hq5Ar8R0pZttKSYa4YS+dhDNc=,tag:xp1S/4qExnxMTGwhfLJrkA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../../../../sops/users/admin

View File

@@ -0,0 +1 @@
t6qN4VGLR+VMhrBDNKQEXZVyRsEXs1/nGFRs5DI82F8=

View File

@@ -0,0 +1 @@
e3facc99b73fe029d4c295f71829a83f421f38d82361cf412326398175da162a

View File

@@ -0,0 +1 @@
e42b:bf85:33f4:f0b1

View File

@@ -0,0 +1,217 @@
# Wireguard VPN Service
This service provides a Wireguard-based VPN mesh network with automatic IPv6 address allocation and routing between clan machines.
## Overview
The wireguard service creates a secure mesh network between clan machines using two roles:
- **Controllers**: Machines with public endpoints that act as connection points and routers
- **Peers**: Machines that connect through controllers to access the network
## Requirements
- Controllers must have a publicly accessible endpoint (domain name or static IP)
- Peers must be in networks where UDP traffic is not blocked (uses port 51820 by default, configurable)
## Features
- Automatic IPv6 address allocation using ULA (Unique Local Address) prefixes
- Full mesh connectivity between all machines
- Automatic key generation and distribution
- IPv6 forwarding on controllers for inter-peer communication
- Support for multiple controllers for redundancy
## Network Architecture
### IPv6 Address Allocation
- Base network: `/40` ULA prefix (deterministically generated from instance name)
- Controllers: Each gets a `/56` subnet from the base `/40`
- Peers: Each gets a unique 64-bit host suffix that is used in ALL controller subnets
### Addressing Design
- Each peer generates a unique host suffix (e.g., `:8750:a09b:0:1`)
- This suffix is appended to each controller's `/56` prefix to create unique addresses
- Example: peer1 with suffix `:8750:a09b:0:1` gets:
- `fd51:19c1:3b:f700:8750:a09b:0:1` in controller1's subnet
- `fd51:19c1:c1:aa00:8750:a09b:0:1` in controller2's subnet
- Controllers allow each peer's `/96` subnet for routing flexibility
### Connectivity
- Peers use a single WireGuard interface with multiple IPs (one per controller subnet)
- Controllers connect to ALL other controllers and ALL peers on a single interface
- Controllers have IPv6 forwarding enabled to route traffic between peers
- All traffic between peers flows through controllers
- Symmetric routing is maintained as each peer has consistent IPs across all controllers
### Example Network Topology
```mermaid
graph TB
subgraph Controllers
C1[controller1<br/>endpoint: vpn1.example.com<br/>fd51:19c1:3b:f700::/56]
C2[controller2<br/>endpoint: vpn2.example.com<br/>fd51:19c1:c1:aa00::/56]
end
subgraph Peers
P1[peer1<br/>designated: controller1]
P2[peer2<br/>designated: controller2]
P3[peer3<br/>designated: controller1]
end
%% Controllers connect to each other
C1 <--> C2
%% All peers connect to all controllers
P1 <--> C1
P1 <--> C2
P2 <--> C1
P2 <--> C2
P3 <--> C1
P3 <--> C2
%% Peer-to-peer traffic flows through controllers
P1 -.->|via controllers| P3
P1 -.->|via controllers| P2
P2 -.->|via controllers| P3
classDef controller fill:#f9f,stroke:#333,stroke-width:4px
classDef peer fill:#bbf,stroke:#333,stroke-width:2px
class C1,C2 controller
class P1,P2,P3 peer
```
## Configuration
### Basic Setup with Single Controller
```nix
# In your flake.nix or inventory
{
services.wireguard.server1 = {
roles.controller = {
# Public endpoint where this controller can be reached
endpoint = "vpn.example.com";
# Optional: Change the UDP port (default: 51820)
port = 51820;
};
};
services.wireguard.laptop1 = {
roles.peer = {
# No configuration needed if only one controller exists
};
};
}
```
### Multiple Controllers Setup
```nix
{
services.wireguard.server1 = {
roles.controller = {
endpoint = "vpn1.example.com";
};
};
services.wireguard.server2 = {
roles.controller = {
endpoint = "vpn2.example.com";
};
};
services.wireguard.laptop1 = {
roles.peer = {
# Must specify which controller subnet is exposed as the default in /etc/hosts, when multiple controllers exist
controller = "server1";
};
};
}
```
### Advanced Options
### Automatic Hostname Resolution
The wireguard service automatically adds entries to `/etc/hosts` for all machines in the network. Each machine is accessible via its hostname in the format `<machine-name>.<instance-name>`.
For example, with an instance named `vpn`:
- `server1.vpn` - resolves to server1's IPv6 address
- `laptop1.vpn` - resolves to laptop1's IPv6 address
This allows machines to communicate using hostnames instead of IPv6 addresses:
```bash
# Ping another machine by hostname
ping6 server1.vpn
# SSH to another machine
ssh user@laptop1.vpn
```
## Troubleshooting
### Check Wireguard Status
```bash
sudo wg show
```
### Verify IP Addresses
```bash
ip addr show dev <instance-name>
```
### Check Routing
```bash
ip -6 route show dev <instance-name>
```
### Interface Fails to Start: "Address already in use"
If you see this error in your logs:
```
wireguard: Could not bring up interface, ignoring: Address already in use
```
This means the configured port (default: 51820) is already in use by another service or wireguard instance. Solutions:
1. **Check for conflicting wireguard instances:**
```bash
sudo wg show
sudo ss -ulnp | grep 51820
```
2. **Use a different port:**
```nix
services.wireguard.myinstance = {
roles.controller = {
endpoint = "vpn.example.com";
port = 51821; # Use a different port
};
};
```
3. **Ensure unique ports across multiple instances:**
If you have multiple wireguard instances on the same machine, each must use a different port.
### Key Management
Keys are automatically generated and stored in the clan vars system. To regenerate keys:
```bash
# Regenerate keys for a specific machine and instance
clan vars generate --service wireguard-keys-<instance-name> --regenerate --machine <machine-name>
# Apply the new keys
clan machines update <machine-name>
```
## Security Considerations
- All traffic is encrypted using Wireguard's modern cryptography
- Private keys never leave the machines they're generated on
- Public keys are distributed through the clan vars system
- Controllers must have publicly accessible endpoints
- Firewall rules are automatically configured for the Wireguard ports

View File

@@ -0,0 +1,456 @@
/*
There are two roles: peers and controllers:
- Every controller has an endpoint set
- There can be multiple peers
- There has to be one or more controllers
- Peers connect to ALL controllers (full mesh)
- If only one controller exists, peers automatically use it for IP allocation
- If multiple controllers exist, peers must specify which controller's subnet to use
- Controllers have IPv6 forwarding enabled, every peer and controller can reach
everyone else, via extra controller hops if necessary
Example:
controller2 controller1
peer2 peer1 peer3
Network Architecture:
IPv6 Address Allocation:
- Base network: /40 ULA prefix (generated from instance name)
- Controllers: Each gets a /56 subnet from the base /40
- Peers: Each gets a unique host suffix that is used in ALL controller subnets
Address Assignment:
- Each peer generates a unique 64-bit host suffix (e.g., :8750:a09b:0:1)
- This suffix is appended to each controller's /56 prefix
- Example: peer1 with suffix :8750:a09b:0:1 gets:
- fd51:19c1:3b:f700:8750:a09b:0:1 in controller1's subnet
- fd51:19c1:c1:aa00:8750:a09b:0:1 in controller2's subnet
Peers: Use a SINGLE interface that:
- Connects to ALL controllers
- Has multiple IPs, one in each controller's subnet (with /56 prefix)
- Routes to each controller's /56 subnet via that controller
- allowedIPs: Each controller's /56 subnet
- No routing conflicts due to unique IPs per subnet
Controllers: Use a SINGLE interface that:
- Connects to ALL peers and ALL other controllers
- Gets a /56 subnet from the base /40 network
- Has IPv6 forwarding enabled for routing between peers
- allowedIPs:
- For peers: A /96 range containing the peer's address in this controller's subnet
- For other controllers: The controller's /56 subnet
*/
{ ... }:
let
# Shared module for extraHosts configuration
extraHostsModule =
{
instanceName,
settings,
roles,
config,
lib,
...
}:
{
networking.extraHosts =
let
domain = if settings.domain == null then instanceName else settings.domain;
# Controllers use their subnet's ::1 address
controllerHosts = lib.mapAttrsToList (
name: _value:
let
prefix = builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
);
# Controller IP is always ::1 in their subnet
ip = prefix + "::1";
in
"${ip} ${name}.${domain}"
) roles.controller.machines;
# Peers use their suffix in their designated controller's subnet only
peerHosts = lib.mapAttrsToList (
peerName: peerValue:
let
peerSuffix = builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${peerName}/wireguard-network-${instanceName}/suffix/value"
);
# Determine designated controller
designatedController =
if (builtins.length (builtins.attrNames roles.controller.machines) == 1) then
(builtins.head (builtins.attrNames roles.controller.machines))
else
peerValue.settings.controller;
controllerPrefix = builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${designatedController}/wireguard-network-${instanceName}/prefix/value"
);
peerIP = controllerPrefix + ":" + peerSuffix;
in
"${peerIP} ${peerName}.${domain}"
) roles.peer.machines;
in
builtins.concatStringsSep "\n" (controllerHosts ++ peerHosts);
};
# Shared interface options
sharedInterface =
{ lib, ... }:
{
options.port = lib.mkOption {
type = lib.types.int;
example = 51820;
default = 51820;
description = ''
Port for the wireguard interface
'';
};
options.domain = lib.mkOption {
type = lib.types.nullOr lib.types.str;
defaultText = lib.literalExpression "instanceName";
default = null;
description = ''
Domain suffix to use for hostnames in /etc/hosts.
Defaults to the instance name.
'';
};
};
in
{
_class = "clan.service";
manifest.name = "clan-core/wireguard";
manifest.description = "Wireguard-based VPN mesh network with automatic IPv6 address allocation";
manifest.categories = [
"System"
"Network"
];
manifest.readme = builtins.readFile ./README.md;
# Peer options and configuration
roles.peer = {
interface =
{ lib, ... }:
{
imports = [ sharedInterface ];
options.controller = lib.mkOption {
type = lib.types.str;
example = "controller1";
description = ''
Machinename of the controller to attach to
'';
};
};
perInstance =
{
instanceName,
settings,
roles,
machine,
...
}:
{
# Set default domain to instanceName
# Peers connect to all controllers
nixosModule =
{
config,
pkgs,
lib,
...
}:
{
imports = [
(extraHostsModule {
inherit
instanceName
settings
roles
config
lib
;
})
];
# Network allocation generator for this peer - generates host suffix
clan.core.vars.generators."wireguard-network-${instanceName}" = {
files.suffix.secret = false;
runtimeInputs = with pkgs; [
python3
];
# Invalidate on hostname changes
validation.hostname = machine.name;
script = ''
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" peer "${machine.name}"
'';
};
# Single wireguard interface with multiple IPs
networking.wireguard.interfaces."${instanceName}" = {
ips =
# Get this peer's suffix
let
peerSuffix =
config.clan.core.vars.generators."wireguard-network-${instanceName}".files.suffix.value;
in
# Create an IP in each controller's subnet
lib.mapAttrsToList (
ctrlName: _:
let
controllerPrefix = builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${ctrlName}/wireguard-network-${instanceName}/prefix/value"
);
peerIP = controllerPrefix + ":" + peerSuffix;
in
"${peerIP}/56"
) roles.controller.machines;
privateKeyFile =
config.clan.core.vars.generators."wireguard-keys-${instanceName}".files."privatekey".path;
# Connect to all controllers
peers = lib.mapAttrsToList (name: value: {
publicKey = (
builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
)
);
# Allow each controller's /56 subnet
allowedIPs = [
"${
builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
)
}::/56"
];
endpoint = "${value.settings.endpoint}:${toString value.settings.port}";
persistentKeepalive = 25;
}) roles.controller.machines;
};
};
};
};
# Controller options and configuration
roles.controller = {
interface =
{ lib, ... }:
{
imports = [ sharedInterface ];
options.endpoint = lib.mkOption {
type = lib.types.str;
example = "vpn.clan.lol";
description = ''
Endpoint where the controller can be reached
'';
};
};
perInstance =
{
settings,
instanceName,
roles,
machine,
...
}:
{
# Controllers connect to all peers and other controllers
nixosModule =
{
config,
pkgs,
lib,
...
}:
let
allOtherControllers = lib.filterAttrs (name: _v: name != machine.name) roles.controller.machines;
allPeers = roles.peer.machines;
in
{
imports = [
(extraHostsModule {
inherit
instanceName
settings
roles
config
lib
;
})
];
# Network allocation generator for this controller
clan.core.vars.generators."wireguard-network-${instanceName}" = {
files.prefix.secret = false;
runtimeInputs = with pkgs; [
python3
];
# Invalidate on network or hostname changes
validation.hostname = machine.name;
script = ''
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" controller "${machine.name}"
'';
};
# Enable ip forwarding, so wireguard peers can reach eachother
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
networking.firewall.allowedUDPPorts = [ settings.port ];
# Single wireguard interface
networking.wireguard.interfaces."${instanceName}" = {
listenPort = settings.port;
ips = [
# Controller uses ::1 in its /56 subnet but with /40 prefix for proper routing
"${config.clan.core.vars.generators."wireguard-network-${instanceName}".files.prefix.value}::1/40"
];
privateKeyFile =
config.clan.core.vars.generators."wireguard-keys-${instanceName}".files."privatekey".path;
# Connect to all peers and other controllers
peers = lib.mapAttrsToList (
name: value:
if allPeers ? ${name} then
# For peers: they now have our entire /56 subnet
{
publicKey = (
builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
)
);
# Allow the peer's /96 range in ALL controller subnets
allowedIPs = lib.mapAttrsToList (
ctrlName: _:
let
controllerPrefix = builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${ctrlName}/wireguard-network-${instanceName}/prefix/value"
);
peerSuffix = builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/suffix/value"
);
in
"${controllerPrefix}:${peerSuffix}/96"
) roles.controller.machines;
persistentKeepalive = 25;
}
else
# For other controllers: use their /56 subnet
{
publicKey = (
builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
)
);
allowedIPs = [
"${
builtins.readFile (
config.clan.core.settings.directory
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
)
}::/56"
];
endpoint = "${value.settings.endpoint}:${toString value.settings.port}";
persistentKeepalive = 25;
}
) (allPeers // allOtherControllers);
};
};
};
};
# Maps over all machines and produces one result per machine, regardless of role
perMachine =
{ instances, machine, ... }:
{
nixosModule =
{ pkgs, lib, ... }:
let
# Check if this machine has conflicting roles across all instances
machineRoleConflicts = lib.flatten (
lib.mapAttrsToList (
instanceName: instanceInfo:
let
isController =
instanceInfo.roles ? controller && instanceInfo.roles.controller.machines ? ${machine.name};
isPeer = instanceInfo.roles ? peer && instanceInfo.roles.peer.machines ? ${machine.name};
in
lib.optional (isController && isPeer) {
inherit instanceName;
machineName = machine.name;
}
) instances
);
in
{
# Add assertions for role conflicts
assertions = lib.forEach machineRoleConflicts (conflict: {
assertion = false;
message = ''
Machine '${conflict.machineName}' cannot have both 'controller' and 'peer' roles in the wireguard instance '${conflict.instanceName}'.
A machine must be either a controller or a peer, not both.
'';
});
# Generate keys for each instance where this machine participates
clan.core.vars.generators = lib.mapAttrs' (
name: _instanceInfo:
lib.nameValuePair "wireguard-keys-${name}" {
files.publickey.secret = false;
files.privatekey = { };
runtimeInputs = with pkgs; [
wireguard-tools
];
script = ''
wg genkey > $out/privatekey
wg pubkey < $out/privatekey > $out/publickey
'';
}
) instances;
};
};
}

View File

@@ -0,0 +1,7 @@
{ lib, ... }:
let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules.wireguard = module;
}

View File

@@ -0,0 +1,135 @@
#!/usr/bin/env python3
"""
IPv6 address allocator for WireGuard networks.
Network layout:
- Base network: /40 ULA prefix (fd00::/8 + 32 bits from hash)
- Controllers: Each gets a /56 subnet from the base /40 (256 controllers max)
- Peers: Each gets a /96 subnet from their controller's /56
"""
import hashlib
import ipaddress
import sys
from pathlib import Path
def hash_string(s: str) -> str:
"""Generate SHA256 hash of string."""
return hashlib.sha256(s.encode()).hexdigest()
def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
"""
Generate a /40 ULA prefix from instance name.
Format: fd{32-bit hash}/40
This gives us fd00:0000:0000::/40 through fdff:ffff:ff00::/40
"""
h = hash_string(instance_name)
# For /40, we need 32 bits after 'fd' (8 hex chars)
# But only the first 32 bits count for the network prefix
# The last 8 bits of the 40-bit prefix must be 0
prefix_bits = int(h[:8], 16)
# Mask to ensure we only use the first 32 bits for /40
# This gives us addresses like fd28:387a::/40
prefix_bits = prefix_bits & 0xFFFFFF00 # Clear last 8 bits
# Format as IPv6 address
prefix = f"fd{prefix_bits:08x}"
prefix_formatted = f"{prefix[:4]}:{prefix[4:8]}::/40"
network = ipaddress.IPv6Network(prefix_formatted)
return network
def generate_controller_subnet(
base_network: ipaddress.IPv6Network, controller_name: str
) -> ipaddress.IPv6Network:
"""
Generate a /56 subnet for a controller from the base /40 network.
We have 16 bits (40 to 56) to allocate controller subnets.
This allows for 65,536 possible controller subnets.
"""
h = hash_string(controller_name)
# Take 16 bits from hash for the controller subnet ID
controller_id = int(h[:4], 16)
# Create the controller subnet by adding the controller ID to the base network
# The controller subnet is at base_prefix:controller_id::/56
base_int = int(base_network.network_address)
controller_subnet_int = base_int | (controller_id << (128 - 56))
controller_subnet = ipaddress.IPv6Network((controller_subnet_int, 56))
return controller_subnet
def generate_peer_suffix(peer_name: str) -> str:
"""
Generate a unique 64-bit host suffix for a peer.
This suffix will be used in all controller subnets to create unique addresses.
Format: :xxxx:xxxx:xxxx:xxxx (64 bits)
"""
h = hash_string(peer_name)
# Take 64 bits (16 hex chars) from hash for the host suffix
suffix_bits = h[:16]
# Format as IPv6 suffix without leading colon
suffix = f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
return suffix
def main() -> None:
if len(sys.argv) < 4:
print(
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>"
)
sys.exit(1)
output_dir = Path(sys.argv[1])
instance_name = sys.argv[2]
node_type = sys.argv[3]
# Generate base /40 network
base_network = generate_ula_prefix(instance_name)
if node_type == "controller":
if len(sys.argv) < 5:
print("Controller name required")
sys.exit(1)
controller_name = sys.argv[4]
subnet = generate_controller_subnet(base_network, controller_name)
# Extract clean prefix from subnet (e.g. "fd51:19c1:3b:f700::/56" -> "fd51:19c1:3b:f700")
prefix_str = str(subnet).split("/")[0].rstrip(":")
while prefix_str.endswith(":"):
prefix_str = prefix_str.rstrip(":")
# Write file
(output_dir / "prefix").write_text(prefix_str)
elif node_type == "peer":
if len(sys.argv) < 5:
print("Peer name required")
sys.exit(1)
peer_name = sys.argv[4]
# Generate the peer's host suffix
suffix = generate_peer_suffix(peer_name)
# Write file
(output_dir / "suffix").write_text(suffix)
else:
print(f"Unknown node type: {node_type}")
sys.exit(1)
if __name__ == "__main__":
main()

6
devFlake/flake.lock generated
View File

@@ -3,10 +3,10 @@
"clan-core-for-checks": { "clan-core-for-checks": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1755008427, "lastModified": 1755093452,
"narHash": "sha256-obvJA4HxWsxPA7gfT0guabiz4xCn7xQgSbcDbPPkl9w=", "narHash": "sha256-NKBss7QtNnOqYVyJmYCgaCvYZK0mpQTQc9fLgE1mGyk=",
"ref": "main", "ref": "main",
"rev": "ff979eba616d5f1d303881d08bdea3ddefdd79da", "rev": "7e97734797f0c6bd3c2d3a51cf54a2a6b371c222",
"shallow": true, "shallow": true,
"type": "git", "type": "git",
"url": "https://git.clan.lol/clan/clan-core" "url": "https://git.clan.lol/clan/clan-core"

View File

@@ -92,7 +92,6 @@ nav:
- Services: - Services:
- Overview: - Overview:
- reference/clanServices/index.md - reference/clanServices/index.md
- reference/clanServices/admin.md - reference/clanServices/admin.md
- reference/clanServices/borgbackup.md - reference/clanServices/borgbackup.md
- reference/clanServices/data-mesher.md - reference/clanServices/data-mesher.md
@@ -109,6 +108,7 @@ nav:
- reference/clanServices/trusted-nix-caches.md - reference/clanServices/trusted-nix-caches.md
- reference/clanServices/users.md - reference/clanServices/users.md
- reference/clanServices/wifi.md - reference/clanServices/wifi.md
- reference/clanServices/wireguard.md
- reference/clanServices/zerotier.md - reference/clanServices/zerotier.md
- API: reference/clanServices/clan-service-author-interface.md - API: reference/clanServices/clan-service-author-interface.md

6
flake.lock generated
View File

@@ -146,11 +146,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1754328224, "lastModified": 1754988908,
"narHash": "sha256-glPK8DF329/dXtosV7YSzRlF4n35WDjaVwdOMEoEXHA=", "narHash": "sha256-t+voe2961vCgrzPFtZxha0/kmFSHFobzF00sT8p9h0U=",
"owner": "Mic92", "owner": "Mic92",
"repo": "sops-nix", "repo": "sops-nix",
"rev": "49021900e69812ba7ddb9e40f9170218a7eca9f4", "rev": "3223c7a92724b5d804e9988c6b447a0d09017d48",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@@ -124,7 +124,7 @@ rec {
] ]
) )
}" \ }" \
${pkgs.runtimeShell} ${genInfo.finalScript} ${pkgs.runtimeShell} -x "${genInfo.finalScript}"
# Verify expected outputs were created # Verify expected outputs were created
${lib.concatStringsSep "\n" ( ${lib.concatStringsSep "\n" (

View File

@@ -22,7 +22,7 @@ export interface ButtonProps
startIcon?: IconVariant; startIcon?: IconVariant;
endIcon?: IconVariant; endIcon?: IconVariant;
class?: string; class?: string;
onAction?: Action; loading?: boolean;
} }
const iconSizes: Record<Size, string> = { const iconSizes: Record<Size, string> = {
@@ -40,31 +40,12 @@ export const Button = (props: ButtonProps) => {
"startIcon", "startIcon",
"endIcon", "endIcon",
"class", "class",
"onAction", "loading",
]); ]);
const size = local.size || "default"; const size = local.size || "default";
const hierarchy = local.hierarchy || "primary"; const hierarchy = local.hierarchy || "primary";
const [loading, setLoading] = createSignal(false);
const onClick = async () => {
if (!local.onAction) {
console.error("this should not be possible");
return;
}
setLoading(true);
try {
await local.onAction();
} catch (error) {
console.error("Error while executing action", error);
}
setLoading(false);
};
const iconSize = iconSizes[local.size || "default"]; const iconSize = iconSizes[local.size || "default"];
const loadingClass = const loadingClass =
@@ -81,16 +62,19 @@ export const Button = (props: ButtonProps) => {
hierarchy, hierarchy,
{ {
icon: local.icon, icon: local.icon,
loading: loading(), loading: props.loading,
ghost: local.ghost, ghost: local.ghost,
}, },
)} )}
onClick={local.onAction ? onClick : undefined} onClick={props.onClick}
{...other} {...other}
> >
<Loader <Loader
hierarchy={hierarchy} hierarchy={hierarchy}
class={cx({ [idleClass]: !loading(), [loadingClass]: loading() })} class={cx({
[idleClass]: !props.loading,
[loadingClass]: props.loading,
})}
/> />
{local.startIcon && ( {local.startIcon && (

View File

@@ -318,8 +318,13 @@ export const useMachineGenerators = (
], ],
queryFn: async () => { queryFn: async () => {
const call = client.fetch("get_generators", { const call = client.fetch("get_generators", {
base_dir: clanUri, machine: {
machine_name: machineName, name: machineName,
flake: {
identifier: clanUri,
},
},
full_closure: true, // TODO: Make this configurable
// TODO: Make this configurable // TODO: Make this configurable
include_previous_values: true, include_previous_values: true,
}); });

View File

@@ -99,8 +99,12 @@ const welcome = (props: {
}) => { }) => {
const navigate = useNavigate(); const navigate = useNavigate();
const [loading, setLoading] = createSignal(false);
const selectFolder = async () => { const selectFolder = async () => {
setLoading(true);
const uri = await selectClanFolder(); const uri = await selectClanFolder();
setLoading(false);
navigateToClan(navigate, uri); navigateToClan(navigate, uri);
}; };
@@ -148,7 +152,12 @@ const welcome = (props: {
</Typography> </Typography>
<Divider orientation="horizontal" /> <Divider orientation="horizontal" />
</div> </div>
<Button hierarchy="primary" ghost={true} onAction={selectFolder}> <Button
hierarchy="primary"
ghost={true}
loading={loading()}
onClick={selectFolder}
>
Select folder Select folder
</Button> </Button>
</div> </div>

View File

@@ -72,7 +72,7 @@ const mockFetcher: Fetcher = <K extends OperationNames>(
], ],
}, },
], ],
run_generators: true, run_generators: null,
get_machine_hardware_summary: { get_machine_hardware_summary: {
hardware_config: "nixos-facter", hardware_config: "nixos-facter",
}, },

View File

@@ -4,6 +4,7 @@ import {
createForm, createForm,
FieldValues, FieldValues,
getError, getError,
getValue,
SubmitHandler, SubmitHandler,
valiForm, valiForm,
} from "@modular-forms/solid"; } from "@modular-forms/solid";
@@ -13,7 +14,7 @@ import { getStepStore, useStepper } from "@/src/hooks/stepper";
import { InstallSteps, InstallStoreType, PromptValues } from "../install"; import { InstallSteps, InstallStoreType, PromptValues } from "../install";
import { TextInput } from "@/src/components/Form/TextInput"; import { TextInput } from "@/src/components/Form/TextInput";
import { Alert } from "@/src/components/Alert/Alert"; import { Alert } from "@/src/components/Alert/Alert";
import { For, Match, Show, Switch } from "solid-js"; import { createSignal, For, Match, Show, Switch } from "solid-js";
import { Divider } from "@/src/components/Divider/Divider"; import { Divider } from "@/src/components/Divider/Divider";
import { Orienter } from "@/src/components/Form/Orienter"; import { Orienter } from "@/src/components/Form/Orienter";
import { Button } from "@/src/components/Button/Button"; import { Button } from "@/src/components/Button/Button";
@@ -29,6 +30,7 @@ import {
import { useClanURI } from "@/src/hooks/clan"; import { useClanURI } from "@/src/hooks/clan";
import { useApiClient } from "@/src/hooks/ApiClient"; import { useApiClient } from "@/src/hooks/ApiClient";
import { ProcessMessage, useNotifyOrigin } from "@/src/hooks/notify"; import { ProcessMessage, useNotifyOrigin } from "@/src/hooks/notify";
import { Loader } from "@/src/components/Loader/Loader";
export const InstallHeader = (props: { machineName: string }) => { export const InstallHeader = (props: { machineName: string }) => {
return ( return (
@@ -58,8 +60,9 @@ const ConfigureAddress = () => {
}, },
}); });
const [isReachable, setIsReachable] = createSignal<string | null>(null);
const client = useApiClient(); const client = useApiClient();
const clanUri = useClanURI();
// TODO: push values to the parent form Store // TODO: push values to the parent form Store
const handleSubmit: SubmitHandler<ConfigureAdressForm> = async ( const handleSubmit: SubmitHandler<ConfigureAdressForm> = async (
values, values,
@@ -72,6 +75,24 @@ const ConfigureAddress = () => {
stepSignal.next(); stepSignal.next();
}; };
const tryReachable = async () => {
const address = getValue(formStore, "targetHost");
if (!address) {
return;
}
const call = client.fetch("check_machine_ssh_login", {
remote: {
address,
},
});
const result = await call.result;
console.log("SSH login check result:", result);
if (result.status === "success") {
setIsReachable(address);
}
};
return ( return (
<Form onSubmit={handleSubmit} class="h-full"> <Form onSubmit={handleSubmit} class="h-full">
<StepLayout <StepLayout
@@ -98,12 +119,28 @@ const ConfigureAddress = () => {
)} )}
</Field> </Field>
</Fieldset> </Fieldset>
<Button
disabled={!getValue(formStore, "targetHost")}
endIcon="ArrowRight"
onClick={tryReachable}
hierarchy="secondary"
>
Test Connection
</Button>
</div> </div>
} }
footer={ footer={
<div class="flex justify-between"> <div class="flex justify-between">
<BackButton /> <BackButton />
<NextButton type="submit">Next</NextButton> <NextButton
type="submit"
disabled={
!isReachable() ||
isReachable() !== getValue(formStore, "targetHost")
}
>
Next
</NextButton>
</div> </div>
} }
/> />
@@ -157,15 +194,18 @@ const CheckHardware = () => {
Hardware Report Hardware Report
</Typography> </Typography>
<Button <Button
disabled={hardwareQuery.isLoading}
hierarchy="secondary" hierarchy="secondary"
startIcon="Report" startIcon="Report"
onClick={handleUpdateSummary} onClick={handleUpdateSummary}
class="flex gap-3"
loading={hardwareQuery.isFetching}
> >
Update hardware report Update hardware report
</Button> </Button>
</Orienter> </Orienter>
<Divider orientation="horizontal" /> <Divider orientation="horizontal" />
<Show when={hardwareQuery.isLoading}>Loading...</Show>
<Show when={hardwareQuery.data}> <Show when={hardwareQuery.data}>
{(d) => ( {(d) => (
<Alert <Alert
@@ -508,8 +548,12 @@ const InstallSummary = () => {
const runGenerators = client.fetch("run_generators", { const runGenerators = client.fetch("run_generators", {
all_prompt_values: store.install.promptValues, all_prompt_values: store.install.promptValues,
base_dir: clanUri, machine: {
machine_name: store.install.machineName, name: store.install.machineName,
flake: {
identifier: clanUri,
},
},
}); });
set("install", (s) => ({ set("install", (s) => ({
@@ -545,13 +589,16 @@ const InstallSummary = () => {
<StepLayout <StepLayout
body={ body={
<div class="flex flex-col gap-4"> <div class="flex flex-col gap-4">
<Fieldset legend="Address Configuration"> <Fieldset legend="Machine">
<Orienter orientation="horizontal"> <Orienter orientation="horizontal">
{/* TOOD: Display the values emited from previous steps */} <Display label="Name" value={store.install.machineName} />
<Display label="Target" value="flash-installer.local" /> </Orienter>
<Divider orientation="horizontal" />
<Orienter orientation="horizontal">
<Display label="Address" value={store.install.targetHost} />
</Orienter> </Orienter>
</Fieldset> </Fieldset>
<Fieldset legend="Disk Configuration"> <Fieldset legend="Disk">
<Orienter orientation="horizontal"> <Orienter orientation="horizontal">
<Display label="Disk Schema" value="Single" /> <Display label="Disk Schema" value="Single" />
</Orienter> </Orienter>

View File

@@ -1,8 +1,6 @@
# !/usr/bin/env python3 # !/usr/bin/env python3
import argparse import argparse
from clan_cli.clan.inspect import register_inspect_parser
from .create import register_create_parser from .create import register_create_parser
@@ -16,5 +14,3 @@ def register_parser(parser: argparse.ArgumentParser) -> None:
) )
create_parser = subparser.add_parser("create", help="Create a clan") create_parser = subparser.add_parser("create", help="Create a clan")
register_create_parser(create_parser) register_create_parser(create_parser)
inspect_parser = subparser.add_parser("inspect", help="Inspect a clan ")
register_inspect_parser(inspect_parser)

View File

@@ -1,143 +0,0 @@
import argparse
from dataclasses import dataclass
from pathlib import Path
from typing import Any
from clan_lib.cmd import run
from clan_lib.dirs import machine_gcroot
from clan_lib.errors import ClanError
from clan_lib.flake import Flake
from clan_lib.machines.actions import list_machines
from clan_lib.machines.machines import Machine
from clan_lib.nix import (
nix_add_to_gcroots,
nix_build,
nix_config,
nix_eval,
nix_metadata,
)
from clan_cli.vms.inspect import VmConfig, inspect_vm
@dataclass
class FlakeConfig:
flake_url: Flake
flake_attr: str
clan_name: str
nar_hash: str
icon: str | None
description: str | None
last_updated: str
revision: str | None
vm: VmConfig
@classmethod
def from_json(cls: type["FlakeConfig"], data: dict[str, Any]) -> "FlakeConfig":
return cls(
flake_url=Flake.from_json(data["flake_url"]),
flake_attr=data["flake_attr"],
clan_name=data["clan_name"],
nar_hash=data["nar_hash"],
icon=data.get("icon"),
description=data.get("description"),
last_updated=data["last_updated"],
revision=data.get("revision"),
vm=VmConfig.from_json(data["vm"]),
)
def run_cmd(cmd: list[str]) -> str:
proc = run(cmd)
return proc.stdout.strip()
def inspect_flake(flake_url: str | Path, machine_name: str) -> FlakeConfig:
config = nix_config()
system = config["system"]
# Check if the machine exists
machines = list_machines(Flake(str(flake_url)))
if machine_name not in machines:
msg = f"Machine {machine_name} not found in {flake_url}. Available machines: {', '.join(machines)}"
raise ClanError(msg)
machine = Machine(machine_name, Flake(str(flake_url)))
vm = inspect_vm(machine)
# Make symlink to gcroots from vm.machine_icon
if vm.machine_icon:
gcroot_icon: Path = machine_gcroot(flake_url=str(flake_url)) / vm.machine_name
nix_add_to_gcroots(vm.machine_icon, gcroot_icon)
# Get the Clan name
cmd = nix_eval(
[
f'{flake_url}#clanInternals.machines."{system}"."{machine_name}".config.clan.core.name'
]
)
res = run_cmd(cmd)
clan_name = res.strip('"')
# Get the clan icon path
cmd = nix_eval(
[
f'{flake_url}#clanInternals.machines."{system}"."{machine_name}".config.clan.core.icon'
]
)
res = run_cmd(cmd)
# If the icon is null, no icon is set for this Clan
if res == "null":
icon_path = None
else:
icon_path = res.strip('"')
cmd = nix_build(
[
f'{flake_url}#clanInternals.machines."{system}"."{machine_name}".config.clan.core.icon'
],
machine_gcroot(flake_url=str(flake_url)) / "icon",
)
run_cmd(cmd)
# Get the flake metadata
meta = nix_metadata(flake_url)
return FlakeConfig(
vm=vm,
flake_url=Flake(str(flake_url)),
clan_name=clan_name,
flake_attr=machine_name,
nar_hash=meta["locked"]["narHash"],
icon=icon_path,
description=meta.get("description"),
last_updated=meta["lastModified"],
revision=meta.get("revision"),
)
@dataclass
class InspectOptions:
machine: str
flake: Flake
def inspect_command(args: argparse.Namespace) -> None:
inspect_options = InspectOptions(
machine=args.machine,
flake=args.flake or Flake(str(Path.cwd())),
)
res = inspect_flake(
flake_url=str(inspect_options.flake), machine_name=inspect_options.machine
)
print("Clan name:", res.clan_name)
print("Icon:", res.icon)
print("Description:", res.description)
print("Last updated:", res.last_updated)
print("Revision:", res.revision)
def register_inspect_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--machine", type=str, default="defaultVM")
parser.set_defaults(func=inspect_command)

View File

@@ -6,9 +6,9 @@ from tempfile import TemporaryDirectory
from clan_lib.flake import require_flake from clan_lib.flake import require_flake
from clan_lib.machines.machines import Machine from clan_lib.machines.machines import Machine
from clan_lib.ssh.host import Host from clan_lib.ssh.host import Host
from clan_lib.ssh.upload import upload
from clan_cli.completions import add_dynamic_completer, complete_machines from clan_cli.completions import add_dynamic_completer, complete_machines
from clan_cli.ssh.upload import upload
log = logging.getLogger(__name__) log = logging.getLogger(__name__)

View File

@@ -1,6 +1,8 @@
import argparse import argparse
import json
import logging import logging
import sys import sys
from contextlib import ExitStack
from pathlib import Path from pathlib import Path
from typing import get_args from typing import get_args
@@ -8,6 +10,7 @@ from clan_lib.errors import ClanError
from clan_lib.flake import require_flake from clan_lib.flake import require_flake
from clan_lib.machines.install import BuildOn, InstallOptions, run_machine_install from clan_lib.machines.install import BuildOn, InstallOptions, run_machine_install
from clan_lib.machines.machines import Machine from clan_lib.machines.machines import Machine
from clan_lib.network.qr_code import read_qr_image, read_qr_json
from clan_lib.ssh.host_key import HostKeyCheck from clan_lib.ssh.host_key import HostKeyCheck
from clan_lib.ssh.remote import Remote from clan_lib.ssh.remote import Remote
@@ -17,7 +20,6 @@ from clan_cli.completions import (
complete_target_host, complete_target_host,
) )
from clan_cli.machines.hardware import HardwareConfig from clan_cli.machines.hardware import HardwareConfig
from clan_cli.ssh.deploy_info import DeployInfo, find_reachable_host, ssh_command_parse
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -27,80 +29,71 @@ def install_command(args: argparse.Namespace) -> None:
flake = require_flake(args.flake) flake = require_flake(args.flake)
# Only if the caller did not specify a target_host via args.target_host # Only if the caller did not specify a target_host via args.target_host
# Find a suitable target_host that is reachable # Find a suitable target_host that is reachable
target_host_str = args.target_host with ExitStack() as stack:
deploy_info: DeployInfo | None = ( remote: Remote
ssh_command_parse(args) if target_host_str is None else None if args.target_host:
) # TODO add network support here with either --network or some url magic
remote = Remote.from_ssh_uri(
use_tor = False machine_name=args.machine, address=args.target_host
if deploy_info:
host = find_reachable_host(deploy_info)
if host is None or host.socks_port:
use_tor = True
target_host_str = deploy_info.tor.target
else:
target_host_str = host.target
if args.password:
password = args.password
elif deploy_info and deploy_info.addrs[0].password:
password = deploy_info.addrs[0].password
else:
password = None
machine = Machine(name=args.machine, flake=flake)
host_key_check = args.host_key_check
if target_host_str is not None:
target_host = Remote.from_ssh_uri(
machine_name=machine.name, address=target_host_str
).override(host_key_check=host_key_check)
else:
target_host = machine.target_host().override(host_key_check=host_key_check)
if args.identity_file:
target_host = target_host.override(private_key=args.identity_file)
if machine._class_ == "darwin":
msg = "Installing macOS machines is not yet supported"
raise ClanError(msg)
if not args.yes:
while True:
ask = (
input(f"Install {args.machine} to {target_host.target}? [y/N] ")
.strip()
.lower()
) )
if ask == "y": elif args.png:
break data = read_qr_image(Path(args.png))
if ask == "n" or ask == "": qr_code = read_qr_json(data, args.flake)
return None remote = stack.enter_context(qr_code.get_best_remote())
print(f"Invalid input '{ask}'. Please enter 'y' for yes or 'n' for no.") elif args.json:
json_file = Path(args.json)
if json_file.is_file():
data = json.loads(json_file.read_text())
else:
data = json.loads(args.json)
if args.identity_file: qr_code = read_qr_json(data, args.flake)
target_host = target_host.override(private_key=args.identity_file) remote = stack.enter_context(qr_code.get_best_remote())
else:
msg = "No --target-host, --json or --png data provided"
raise ClanError(msg)
if password: machine = Machine(name=args.machine, flake=flake)
target_host = target_host.override(password=password) if args.host_key_check:
remote.override(host_key_check=args.host_key_check)
if use_tor: if machine._class_ == "darwin":
target_host = target_host.override( msg = "Installing macOS machines is not yet supported"
socks_port=9050, socks_wrapper=["torify"] raise ClanError(msg)
if not args.yes:
while True:
ask = (
input(f"Install {args.machine} to {remote.target}? [y/N] ")
.strip()
.lower()
)
if ask == "y":
break
if ask == "n" or ask == "":
return None
print(
f"Invalid input '{ask}'. Please enter 'y' for yes or 'n' for no."
)
if args.identity_file:
remote = remote.override(private_key=args.identity_file)
if args.password:
remote = remote.override(password=args.password)
return run_machine_install(
InstallOptions(
machine=machine,
kexec=args.kexec,
phases=args.phases,
debug=args.debug,
no_reboot=args.no_reboot,
build_on=args.build_on if args.build_on is not None else None,
update_hardware_config=HardwareConfig(args.update_hardware_config),
),
target_host=remote,
) )
return run_machine_install(
InstallOptions(
machine=machine,
kexec=args.kexec,
phases=args.phases,
debug=args.debug,
no_reboot=args.no_reboot,
build_on=args.build_on if args.build_on is not None else None,
update_hardware_config=HardwareConfig(args.update_hardware_config),
),
target_host=target_host,
)
except KeyboardInterrupt: except KeyboardInterrupt:
log.warning("Interrupted by user") log.warning("Interrupted by user")
sys.exit(1) sys.exit(1)

View File

@@ -16,6 +16,9 @@ def overview_command(args: argparse.Namespace) -> None:
for peer_name, peer in network["peers"].items(): for peer_name, peer in network["peers"].items():
print(f"\t{peer_name}: {'[OFFLINE]' if not peer else f'[{peer}]'}") print(f"\t{peer_name}: {'[OFFLINE]' if not peer else f'[{peer}]'}")
if not overview:
print("No networks found.")
def register_overview_parser(parser: argparse.ArgumentParser) -> None: def register_overview_parser(parser: argparse.ArgumentParser) -> None:
parser.set_defaults(func=overview_command) parser.set_defaults(func=overview_command)

View File

@@ -16,8 +16,8 @@ def ping_command(args: argparse.Namespace) -> None:
networks = networks_from_flake(flake) networks = networks_from_flake(flake)
if not networks: if not networks:
print("No networks found in the flake") print("No networks found")
return
# If network is specified, only check that network # If network is specified, only check that network
if network_name: if network_name:
networks_to_check = [(network_name, networks[network_name])] networks_to_check = [(network_name, networks[network_name])]

View File

@@ -1,17 +1,14 @@
import argparse import argparse
import contextlib
import json import json
import logging import logging
import textwrap from contextlib import ExitStack
from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Any, get_args from typing import get_args
from clan_lib.cmd import run
from clan_lib.errors import ClanError from clan_lib.errors import ClanError
from clan_lib.machines.machines import Machine from clan_lib.machines.machines import Machine
from clan_lib.network.tor.lib import spawn_tor from clan_lib.network.network import get_best_remote
from clan_lib.nix import nix_shell from clan_lib.network.qr_code import read_qr_image, read_qr_json
from clan_lib.ssh.remote import HostKeyCheck, Remote from clan_lib.ssh.remote import HostKeyCheck, Remote
from clan_cli.completions import ( from clan_cli.completions import (
@@ -22,180 +19,57 @@ from clan_cli.completions import (
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@dataclass def get_tor_remote(remotes: list[Remote]) -> Remote:
class DeployInfo: """Get the Remote configured for SOCKS5 proxy (Tor)."""
addrs: list[Remote] tor_remotes = [r for r in remotes if r.socks_port]
@property if not tor_remotes:
def tor(self) -> Remote: msg = "No socks5 proxy address provided, please provide a socks5 proxy address."
"""Return a list of Remote objects that are configured for SOCKS5 proxy."""
addrs = [addr for addr in self.addrs if addr.socks_port]
if not addrs:
msg = "No socks5 proxy address provided, please provide a socks5 proxy address."
raise ClanError(msg)
if len(addrs) > 1:
msg = "Multiple socks5 proxy addresses provided, expected only one."
raise ClanError(msg)
return addrs[0]
def overwrite_remotes(
self,
host_key_check: HostKeyCheck | None = None,
private_key: Path | None = None,
ssh_options: dict[str, str] | None = None,
) -> "DeployInfo":
"""Return a new DeployInfo with all Remotes overridden with the given host_key_check."""
return DeployInfo(
addrs=[
addr.override(
host_key_check=host_key_check,
private_key=private_key,
ssh_options=ssh_options,
)
for addr in self.addrs
]
)
@staticmethod
def from_json(data: dict[str, Any], host_key_check: HostKeyCheck) -> "DeployInfo":
addrs = []
password = data.get("pass")
for addr in data.get("addrs", []):
if isinstance(addr, str):
remote = Remote.from_ssh_uri(
machine_name="clan-installer",
address=addr,
).override(host_key_check=host_key_check, password=password)
addrs.append(remote)
else:
msg = f"Invalid address format: {addr}"
raise ClanError(msg)
if tor_addr := data.get("tor"):
remote = Remote.from_ssh_uri(
machine_name="clan-installer",
address=tor_addr,
).override(
host_key_check=host_key_check,
socks_port=9050,
socks_wrapper=["torify"],
password=password,
)
addrs.append(remote)
return DeployInfo(addrs=addrs)
@staticmethod
def from_qr_code(picture_file: Path, host_key_check: HostKeyCheck) -> "DeployInfo":
cmd = nix_shell(
["zbar"],
[
"zbarimg",
"--quiet",
"--raw",
str(picture_file),
],
)
res = run(cmd)
data = res.stdout.strip()
return DeployInfo.from_json(json.loads(data), host_key_check=host_key_check)
def find_reachable_host(deploy_info: DeployInfo) -> Remote | None:
# If we only have one address, we have no choice but to use it.
if len(deploy_info.addrs) == 1:
return deploy_info.addrs[0]
for addr in deploy_info.addrs:
with contextlib.suppress(ClanError):
addr.check_machine_ssh_reachable()
return addr
return None
def ssh_shell_from_deploy(
deploy_info: DeployInfo, command: list[str] | None = None
) -> None:
if command and len(command) == 1 and command[0].count(" ") > 0:
msg = (
textwrap.dedent("""
It looks like you quoted the remote command.
The first argument should be the command to run, not a quoted string.
""")
.lstrip("\n")
.rstrip("\n")
)
raise ClanError(msg) raise ClanError(msg)
if host := find_reachable_host(deploy_info): if len(tor_remotes) > 1:
host.interactive_ssh(command) msg = "Multiple socks5 proxy addresses provided, expected only one."
return
log.info("Could not reach host via clearnet 'addrs'")
log.info(f"Trying to reach host via tor '{deploy_info}'")
tor_addrs = [addr for addr in deploy_info.addrs if addr.socks_port]
if not tor_addrs:
msg = "No tor address provided, please provide a tor address."
raise ClanError(msg) raise ClanError(msg)
with spawn_tor(): return tor_remotes[0]
for tor_addr in tor_addrs:
log.info(f"Trying to reach host via tor address: {tor_addr}")
with contextlib.suppress(ClanError):
tor_addr.check_machine_ssh_reachable()
log.info(
"Host reachable via tor address, starting interactive ssh session."
)
tor_addr.interactive_ssh(command)
return
log.error("Could not reach host via tor address.")
def ssh_command_parse(args: argparse.Namespace) -> DeployInfo | None:
host_key_check = args.host_key_check
deploy = None
if args.json:
json_file = Path(args.json)
if json_file.is_file():
data = json.loads(json_file.read_text())
return DeployInfo.from_json(data, host_key_check)
data = json.loads(args.json)
deploy = DeployInfo.from_json(data, host_key_check)
elif args.png:
deploy = DeployInfo.from_qr_code(Path(args.png), host_key_check)
elif hasattr(args, "machine") and args.machine:
machine = Machine(args.machine, args.flake)
target = machine.target_host().override(
command_prefix=machine.name, host_key_check=host_key_check
)
deploy = DeployInfo(addrs=[target])
else:
return None
ssh_options = None
if hasattr(args, "ssh_option") and args.ssh_option:
for name, value in args.ssh_option:
ssh_options = {}
ssh_options[name] = value
deploy = deploy.overwrite_remotes(ssh_options=ssh_options)
return deploy
def ssh_command(args: argparse.Namespace) -> None: def ssh_command(args: argparse.Namespace) -> None:
deploy_info = ssh_command_parse(args) with ExitStack() as stack:
if not deploy_info: remote: Remote
msg = "No MACHINE, --json or --png data provided" if hasattr(args, "machine") and args.machine:
raise ClanError(msg) machine = Machine(args.machine, args.flake)
ssh_shell_from_deploy(deploy_info, args.remote_command) remote = stack.enter_context(get_best_remote(machine))
elif args.png:
data = read_qr_image(Path(args.png))
qr_code = read_qr_json(data, args.flake)
remote = stack.enter_context(qr_code.get_best_remote())
elif args.json:
json_file = Path(args.json)
if json_file.is_file():
data = json.loads(json_file.read_text())
else:
data = json.loads(args.json)
qr_code = read_qr_json(data, args.flake)
remote = stack.enter_context(qr_code.get_best_remote())
else:
msg = "No MACHINE, --json or --png data provided"
raise ClanError(msg)
# Convert ssh_option list to dictionary
ssh_options = {}
if args.ssh_option:
for name, value in args.ssh_option:
ssh_options[name] = value
remote = remote.override(
host_key_check=args.host_key_check, ssh_options=ssh_options
)
if args.remote_command:
remote.interactive_ssh(args.remote_command)
else:
remote.interactive_ssh()
def register_parser(parser: argparse.ArgumentParser) -> None: def register_parser(parser: argparse.ArgumentParser) -> None:

View File

@@ -3,15 +3,17 @@ from pathlib import Path
import pytest import pytest
from clan_lib.cmd import RunOpts, run from clan_lib.cmd import RunOpts, run
from clan_lib.flake import Flake
from clan_lib.network.qr_code import read_qr_image, read_qr_json
from clan_lib.nix import nix_shell from clan_lib.nix import nix_shell
from clan_lib.ssh.remote import Remote from clan_lib.ssh.remote import Remote
from clan_cli.ssh.deploy_info import DeployInfo, find_reachable_host
from clan_cli.tests.fixtures_flakes import ClanFlake from clan_cli.tests.fixtures_flakes import ClanFlake
from clan_cli.tests.helpers import cli from clan_cli.tests.helpers import cli
def test_qrcode_scan(temp_dir: Path) -> None: @pytest.mark.with_core
def test_qrcode_scan(temp_dir: Path, flake: ClanFlake) -> None:
data = '{"pass":"scabbed-defender-headlock","tor":"qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion","addrs":["192.168.122.86"]}' data = '{"pass":"scabbed-defender-headlock","tor":"qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion","addrs":["192.168.122.86"]}'
img_path = temp_dir / "qrcode.png" img_path = temp_dir / "qrcode.png"
cmd = nix_shell( cmd = nix_shell(
@@ -25,63 +27,93 @@ def test_qrcode_scan(temp_dir: Path) -> None:
run(cmd, RunOpts(input=data.encode())) run(cmd, RunOpts(input=data.encode()))
# Call the qrcode_scan function # Call the qrcode_scan function
deploy_info = DeployInfo.from_qr_code(img_path, "none") json_data = read_qr_image(img_path)
qr_code = read_qr_json(json_data, Flake(str(flake.path)))
host = deploy_info.addrs[0] # Check addresses
assert host.address == "192.168.122.86" addresses = qr_code.addresses
assert host.user == "root" assert len(addresses) >= 2 # At least direct and tor
assert host.password == "scabbed-defender-headlock"
tor_host = deploy_info.addrs[1] # Find direct connection
direct_remote = None
for addr in addresses:
if addr.network.module_name == "clan_lib.network.direct":
direct_remote = addr.remote
break
assert direct_remote is not None
assert direct_remote.address == "192.168.122.86"
assert direct_remote.user == "root"
assert direct_remote.password == "scabbed-defender-headlock"
# Find tor connection
tor_remote = None
for addr in addresses:
if addr.network.module_name == "clan_lib.network.tor":
tor_remote = addr.remote
break
assert tor_remote is not None
assert ( assert (
tor_host.address tor_remote.address
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
)
assert tor_host.socks_port == 9050
assert tor_host.password == "scabbed-defender-headlock"
assert tor_host.user == "root"
assert (
tor_host.address
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion" == "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
) )
assert tor_remote.socks_port == 9050
assert tor_remote.password == "scabbed-defender-headlock"
assert tor_remote.user == "root"
def test_from_json() -> None: def test_from_json(temp_dir: Path) -> None:
data = '{"pass":"scabbed-defender-headlock","tor":"qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion","addrs":["192.168.122.86"]}' data = '{"pass":"scabbed-defender-headlock","tor":"qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion","addrs":["192.168.122.86"]}'
deploy_info = DeployInfo.from_json(json.loads(data), "none") flake = Flake(str(temp_dir))
qr_code = read_qr_json(json.loads(data), flake)
host = deploy_info.addrs[0] # Check addresses
assert host.password == "scabbed-defender-headlock" addresses = qr_code.addresses
assert host.address == "192.168.122.86" assert len(addresses) >= 2 # At least direct and tor
tor_host = deploy_info.addrs[1] # Find direct connection
direct_remote = None
for addr in addresses:
if addr.network.module_name == "clan_lib.network.direct":
direct_remote = addr.remote
break
assert direct_remote is not None
assert direct_remote.password == "scabbed-defender-headlock"
assert direct_remote.address == "192.168.122.86"
# Find tor connection
tor_remote = None
for addr in addresses:
if addr.network.module_name == "clan_lib.network.tor":
tor_remote = addr.remote
break
assert tor_remote is not None
assert ( assert (
tor_host.address tor_remote.address
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
)
assert tor_host.socks_port == 9050
assert tor_host.password == "scabbed-defender-headlock"
assert tor_host.user == "root"
assert (
tor_host.address
== "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion" == "qjeerm4r6t55hcfum4pinnvscn5njlw2g3k7ilqfuu7cdt3ahaxhsbid.onion"
) )
assert tor_remote.socks_port == 9050
assert tor_remote.password == "scabbed-defender-headlock"
assert tor_remote.user == "root"
@pytest.mark.with_core # TODO: This test needs to be updated to use get_best_remote from clan_lib.network.network
def test_find_reachable_host(hosts: list[Remote]) -> None: # @pytest.mark.with_core
host = hosts[0] # def test_find_reachable_host(hosts: list[Remote]) -> None:
# host = hosts[0]
uris = ["172.19.1.2", host.ssh_url()] #
remotes = [Remote.from_ssh_uri(machine_name="some", address=uri) for uri in uris] # uris = ["172.19.1.2", host.ssh_url()]
deploy_info = DeployInfo(addrs=remotes) # remotes = [Remote.from_ssh_uri(machine_name="some", address=uri) for uri in uris]
#
assert deploy_info.addrs[0].address == "172.19.1.2" # assert remotes[0].address == "172.19.1.2"
#
remote = find_reachable_host(deploy_info=deploy_info) # remote = find_reachable_host(remotes=remotes)
#
assert remote is not None # assert remote is not None
assert remote.ssh_url() == host.ssh_url() # assert remote.ssh_url() == host.ssh_url()
@pytest.mark.with_core @pytest.mark.with_core

View File

@@ -1,27 +0,0 @@
from typing import TYPE_CHECKING
import pytest
from clan_cli.tests.fixtures_flakes import FlakeForTest
from clan_cli.tests.helpers import cli
from clan_cli.tests.stdout import CaptureOutput
if TYPE_CHECKING:
pass
@pytest.mark.impure
def test_flakes_inspect(
test_flake_with_core: FlakeForTest, capture_output: CaptureOutput
) -> None:
with capture_output as output:
cli.run(
[
"flakes",
"inspect",
"--flake",
str(test_flake_with_core.path),
"--machine",
"vm1",
]
)
assert "Icon" in output.out

View File

@@ -1,8 +1,8 @@
from pathlib import Path from pathlib import Path
import pytest import pytest
from clan_cli.ssh.upload import upload
from clan_lib.ssh.remote import Remote from clan_lib.ssh.remote import Remote
from clan_lib.ssh.upload import upload
@pytest.mark.with_core @pytest.mark.with_core

View File

@@ -699,8 +699,7 @@ def test_api_set_prompts(
monkeypatch.chdir(flake.path) monkeypatch.chdir(flake.path)
run_generators( run_generators(
machine_name="my_machine", machine=Machine(name="my_machine", flake=Flake(str(flake.path))),
base_dir=flake.path,
generators=["my_generator"], generators=["my_generator"],
all_prompt_values={ all_prompt_values={
"my_generator": { "my_generator": {
@@ -714,8 +713,7 @@ def test_api_set_prompts(
assert store.exists(my_generator, "prompt1") assert store.exists(my_generator, "prompt1")
assert store.get(my_generator, "prompt1").decode() == "input1" assert store.get(my_generator, "prompt1").decode() == "input1"
run_generators( run_generators(
machine_name="my_machine", machine=Machine(name="my_machine", flake=Flake(str(flake.path))),
base_dir=flake.path,
generators=["my_generator"], generators=["my_generator"],
all_prompt_values={ all_prompt_values={
"my_generator": { "my_generator": {
@@ -725,8 +723,9 @@ def test_api_set_prompts(
) )
assert store.get(my_generator, "prompt1").decode() == "input2" assert store.get(my_generator, "prompt1").decode() == "input2"
machine = Machine(name="my_machine", flake=Flake(str(flake.path)))
generators = get_generators( generators = get_generators(
machine_name="my_machine", base_dir=flake.path, include_previous_values=True machine=machine, full_closure=True, include_previous_values=True
) )
# get_generators should bind the store # get_generators should bind the store
assert generators[0].files[0]._store is not None assert generators[0].files[0]._store is not None

View File

@@ -23,6 +23,7 @@ from clan_lib.errors import ClanError
from clan_lib.flake import Flake, require_flake from clan_lib.flake import Flake, require_flake
from clan_lib.git import commit_files from clan_lib.git import commit_files
from clan_lib.machines.list import list_full_machines from clan_lib.machines.list import list_full_machines
from clan_lib.machines.machines import Machine
from clan_lib.nix import nix_config, nix_shell, nix_test_store from clan_lib.nix import nix_config, nix_shell, nix_test_store
from .check import check_vars from .check import check_vars
@@ -34,7 +35,6 @@ log = logging.getLogger(__name__)
if TYPE_CHECKING: if TYPE_CHECKING:
from clan_lib.flake import Flake from clan_lib.flake import Flake
from clan_lib.machines.machines import Machine
@dataclass(frozen=True) @dataclass(frozen=True)
@@ -357,6 +357,11 @@ def _execute_generator(
if not secret_file.is_file(): if not secret_file.is_file():
msg = f"did not generate a file for '{file.name}' when running the following command:\n" msg = f"did not generate a file for '{file.name}' when running the following command:\n"
msg += str(final_script) msg += str(final_script)
# list all files in the output directory
if tmpdir_out.is_dir():
msg += "\nOutput files:\n"
for f in tmpdir_out.iterdir():
msg += f" - {f.name}\n"
raise ClanError(msg) raise ClanError(msg)
if file.secret: if file.secret:
file_path = secret_vars_store.set( file_path = secret_vars_store.set(
@@ -422,12 +427,25 @@ def _get_previous_value(
return None return None
def _get_closure( @API.register
machine: "Machine", def get_generators(
generator_name: str | None, machine: Machine,
full_closure: bool, full_closure: bool,
generator_name: str | None = None,
include_previous_values: bool = False, include_previous_values: bool = False,
) -> list[Generator]: ) -> list[Generator]:
"""
Get generators for a machine, with optional closure computation.
Args:
machine: The machine to get generators for.
full_closure: If True, include all dependency generators. If False, only include missing ones.
generator_name: Name of a specific generator to get, or None for all generators.
include_previous_values: If True, populate prompts with their previous values.
Returns:
List of generators based on the specified selection and closure mode.
"""
from . import graph from . import graph
vars_generators = Generator.get_machine_generators(machine.name, machine.flake) vars_generators = Generator.get_machine_generators(machine.name, machine.flake)
@@ -489,7 +507,7 @@ def _generate_vars_for_machine(
generators: list[Generator], generators: list[Generator],
all_prompt_values: dict[str, dict[str, str]], all_prompt_values: dict[str, dict[str, str]],
no_sandbox: bool = False, no_sandbox: bool = False,
) -> bool: ) -> None:
_ensure_healthy(machine=machine, generators=generators) _ensure_healthy(machine=machine, generators=generators)
for generator in generators: for generator in generators:
if check_can_migrate(machine, generator): if check_can_migrate(machine, generator):
@@ -503,42 +521,15 @@ def _generate_vars_for_machine(
prompt_values=all_prompt_values.get(generator.name, {}), prompt_values=all_prompt_values.get(generator.name, {}),
no_sandbox=no_sandbox, no_sandbox=no_sandbox,
) )
return True
@API.register
def get_generators(
machine_name: str,
base_dir: Path,
include_previous_values: bool = False,
) -> list[Generator]:
"""
Get the list of generators for a machine, optionally with previous values.
If `full_closure` is True, it returns the full closure of generators.
If `include_previous_values` is True, it includes the previous values for prompts.
Args:
machine_name (str): The name of the machine.
base_dir (Path): The base directory of the flake.
Returns:
list[Generator]: A list of generators for the machine.
"""
return Generator.get_machine_generators(
machine_name,
Flake(str(base_dir)),
include_previous_values,
)
@API.register @API.register
def run_generators( def run_generators(
machine_name: str, machine: Machine,
all_prompt_values: dict[str, dict[str, str]], all_prompt_values: dict[str, dict[str, str]],
base_dir: Path,
generators: list[str] | None = None, generators: list[str] | None = None,
no_sandbox: bool = False, no_sandbox: bool = False,
) -> bool: ) -> None:
"""Run the specified generators for a machine. """Run the specified generators for a machine.
Args: Args:
machine_name (str): The name of the machine. machine_name (str): The name of the machine.
@@ -553,21 +544,19 @@ def run_generators(
ClanError: If the machine or generator is not found, or if there are issues with ClanError: If the machine or generator is not found, or if there are issues with
executing the generator. executing the generator.
""" """
from clan_lib.machines.machines import Machine
machine = Machine(name=machine_name, flake=Flake(str(base_dir)))
if not generators: if not generators:
generator_objects = Generator.get_machine_generators( generator_objects = Generator.get_machine_generators(
machine_name, machine.flake machine.name, machine.flake
) )
else: else:
generators_set = set(generators) generators_set = set(generators)
generator_objects = [ generator_objects = [
g g
for g in Generator.get_machine_generators(machine_name, machine.flake) for g in Generator.get_machine_generators(machine.name, machine.flake)
if g.name in generators_set if g.name in generators_set
] ]
return _generate_vars_for_machine( _generate_vars_for_machine(
machine=machine, machine=machine,
generators=generator_objects, generators=generator_objects,
all_prompt_values=all_prompt_values, all_prompt_values=all_prompt_values,
@@ -580,14 +569,14 @@ def create_machine_vars_interactive(
generator_name: str | None, generator_name: str | None,
regenerate: bool, regenerate: bool,
no_sandbox: bool = False, no_sandbox: bool = False,
) -> bool: ) -> None:
generators = _get_closure(machine, generator_name, regenerate) generators = get_generators(machine, regenerate, generator_name)
if len(generators) == 0: if len(generators) == 0:
return False return
all_prompt_values = {} all_prompt_values = {}
for generator in generators: for generator in generators:
all_prompt_values[generator.name] = _ask_prompts(generator) all_prompt_values[generator.name] = _ask_prompts(generator)
return _generate_vars_for_machine( _generate_vars_for_machine(
machine, machine,
generators, generators,
all_prompt_values, all_prompt_values,
@@ -601,30 +590,26 @@ def generate_vars(
regenerate: bool = False, regenerate: bool = False,
no_sandbox: bool = False, no_sandbox: bool = False,
) -> None: ) -> None:
was_regenerated = False
for machine in machines: for machine in machines:
errors = [] errors = []
try: try:
was_regenerated |= create_machine_vars_interactive( create_machine_vars_interactive(
machine, machine,
generator_name, generator_name,
regenerate, regenerate,
no_sandbox=no_sandbox, no_sandbox=no_sandbox,
) )
machine.info("All vars are up to date")
except Exception as exc: except Exception as exc:
errors += [(machine, exc)] errors += [(machine, exc)]
if len(errors) == 1: if len(errors) == 1:
raise errors[0][1] raise errors[0][1]
if len(errors) > 1: if len(errors) > 1:
msg = f"Failed to generate facts for {len(errors)} hosts:" msg = f"Failed to generate vars for {len(errors)} hosts:"
for machine, error in errors: for machine, error in errors:
msg += f"\n{machine}: {error}" msg += f"\n{machine}: {error}"
raise ClanError(msg) from errors[0][1] raise ClanError(msg) from errors[0][1]
if not was_regenerated and len(machines) > 0:
for machine in machines:
machine.info("All vars are already up to date")
def generate_command(args: argparse.Namespace) -> None: def generate_command(args: argparse.Namespace) -> None:
flake = require_flake(args.flake) flake = require_flake(args.flake)

View File

@@ -1,6 +1,5 @@
import argparse import argparse
import logging import logging
from pathlib import Path
from clan_cli.completions import add_dynamic_completer, complete_machines from clan_cli.completions import add_dynamic_completer, complete_machines
from clan_lib.flake import Flake, require_flake from clan_lib.flake import Flake, require_flake
@@ -20,7 +19,7 @@ def get_machine_vars(base_dir: str, machine_name: str) -> list[Var]:
all_vars = [] all_vars = []
generators = get_generators(base_dir=Path(base_dir), machine_name=machine_name) generators = get_generators(machine=machine, full_closure=True)
for generator in generators: for generator in generators:
for var in generator.files: for var in generator.files:
if var.secret: if var.secret:

View File

@@ -6,11 +6,11 @@ from collections.abc import Iterable
from pathlib import Path from pathlib import Path
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from clan_cli.ssh.upload import upload
from clan_cli.vars._types import StoreBase from clan_cli.vars._types import StoreBase
from clan_cli.vars.generate import Generator, Var from clan_cli.vars.generate import Generator, Var
from clan_lib.flake import Flake from clan_lib.flake import Flake
from clan_lib.ssh.host import Host from clan_lib.ssh.host import Host
from clan_lib.ssh.upload import upload
log = logging.getLogger(__name__) log = logging.getLogger(__name__)

View File

@@ -22,13 +22,13 @@ from clan_cli.secrets.secrets import (
has_secret, has_secret,
) )
from clan_cli.secrets.sops import load_age_plugins from clan_cli.secrets.sops import load_age_plugins
from clan_cli.ssh.upload import upload
from clan_cli.vars._types import StoreBase from clan_cli.vars._types import StoreBase
from clan_cli.vars.generate import Generator from clan_cli.vars.generate import Generator
from clan_cli.vars.var import Var from clan_cli.vars.var import Var
from clan_lib.errors import ClanError from clan_lib.errors import ClanError
from clan_lib.flake import Flake from clan_lib.flake import Flake
from clan_lib.ssh.host import Host from clan_lib.ssh.host import Host
from clan_lib.ssh.upload import upload
@dataclass @dataclass

View File

@@ -187,11 +187,13 @@ def run_machine_install(opts: InstallOptions, target_host: Remote) -> None:
cmd.append(target_host.target) cmd.append(target_host.target)
if target_host.socks_port: if target_host.socks_port:
# nix copy does not support socks5 proxy, use wrapper command # nix copy does not support socks5 proxy, use wrapper command
wrapper_cmd = target_host.socks_wrapper or ["torify"] wrapper = target_host.socks_wrapper
wrapper_cmd = wrapper.cmd if wrapper else []
wrapper_packages = wrapper.packages if wrapper else []
cmd = nix_shell( cmd = nix_shell(
[ [
"nixos-anywhere", "nixos-anywhere",
*wrapper_cmd, *wrapper_packages,
], ],
[*wrapper_cmd, *cmd], [*wrapper_cmd, *cmd],
) )

View File

@@ -10,7 +10,6 @@ from clan_cli.facts import secret_modules as facts_secret_modules
from clan_cli.vars._types import StoreBase from clan_cli.vars._types import StoreBase
from clan_lib.api import API from clan_lib.api import API
from clan_lib.errors import ClanError
from clan_lib.flake import ClanSelectError, Flake from clan_lib.flake import ClanSelectError, Flake
from clan_lib.nix_models.clan import InventoryMachine from clan_lib.nix_models.clan import InventoryMachine
from clan_lib.ssh.remote import Remote from clan_lib.ssh.remote import Remote
@@ -125,15 +124,10 @@ class Machine:
return self.flake.path return self.flake.path
def target_host(self) -> Remote: def target_host(self) -> Remote:
remote = get_machine_host(self.name, self.flake, field="targetHost") from clan_lib.network.network import get_best_remote
if remote is None:
msg = f"'targetHost' is not set for machine '{self.name}'" with get_best_remote(self) as remote:
raise ClanError( return remote
msg,
description="See https://docs.clan.lol/guides/getting-started/update/#setting-the-target-host for more information.",
)
data = remote.data
return data
def build_host(self) -> Remote | None: def build_host(self) -> Remote | None:
""" """

View File

@@ -19,12 +19,10 @@ class NetworkTechnology(NetworkTechnologyBase):
"""Direct connections are always 'running' as they don't require a daemon""" """Direct connections are always 'running' as they don't require a daemon"""
return True return True
def ping(self, peer: Peer) -> None | float: def ping(self, remote: Remote) -> None | float:
if self.is_running(): if self.is_running():
try: try:
# Parse the peer's host address to create a Remote object, use peer here since we don't have the machine_name here # Parse the peer's host address to create a Remote object, use peer here since we don't have the machine_name here
remote = Remote.from_ssh_uri(machine_name="peer", address=peer.host)
# Use the existing SSH reachability check # Use the existing SSH reachability check
now = time.time() now = time.time()
@@ -33,7 +31,7 @@ class NetworkTechnology(NetworkTechnologyBase):
return (time.time() - now) * 1000 return (time.time() - now) * 1000
except ClanError as e: except ClanError as e:
log.debug(f"Error checking peer {peer.host}: {e}") log.debug(f"Error checking peer {remote}: {e}")
return None return None
return None return None

View File

@@ -12,9 +12,10 @@ from clan_cli.vars.get import get_machine_var
from clan_lib.errors import ClanError from clan_lib.errors import ClanError
from clan_lib.flake import Flake from clan_lib.flake import Flake
from clan_lib.import_utils import ClassSource, import_with_source from clan_lib.import_utils import ClassSource, import_with_source
from clan_lib.ssh.remote import Remote
if TYPE_CHECKING: if TYPE_CHECKING:
from clan_lib.ssh.remote import Remote from clan_lib.machines.machines import Machine
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@@ -51,7 +52,7 @@ class Peer:
.lstrip("\n") .lstrip("\n")
) )
raise ClanError(msg) raise ClanError(msg)
return var.value.decode() return var.value.decode().strip()
msg = f"Unknown Var Type {self._host}" msg = f"Unknown Var Type {self._host}"
raise ClanError(msg) raise ClanError(msg)
@@ -75,7 +76,7 @@ class Network:
return self.module.is_running() return self.module.is_running()
def ping(self, peer: str) -> float | None: def ping(self, peer: str) -> float | None:
return self.module.ping(self.peers[peer]) return self.module.ping(self.remote(peer))
def remote(self, peer: str) -> "Remote": def remote(self, peer: str) -> "Remote":
# TODO raise exception if peer is not in peers # TODO raise exception if peer is not in peers
@@ -95,7 +96,7 @@ class NetworkTechnologyBase(ABC):
pass pass
@abstractmethod @abstractmethod
def ping(self, peer: Peer) -> None | float: def ping(self, remote: "Remote") -> None | float:
pass pass
@contextmanager @contextmanager
@@ -108,12 +109,18 @@ def networks_from_flake(flake: Flake) -> dict[str, Network]:
# TODO more precaching, for example for vars # TODO more precaching, for example for vars
flake.precache( flake.precache(
[ [
"clan.exports.instances.*.networking", "clan.?exports.instances.*.networking",
] ]
) )
networks: dict[str, Network] = {} networks: dict[str, Network] = {}
networks_ = flake.select("clan.exports.instances.*.networking") networks_ = flake.select("clan.?exports.instances.*.networking")
for network_name, network in networks_.items(): if "exports" not in networks_:
msg = """You are not exporting the clan exports through your flake.
Please add exports next to clanInternals and nixosConfiguration into the global flake.
"""
log.warning(msg)
return {}
for network_name, network in networks_["exports"].items():
if network: if network:
peers: dict[str, Peer] = {} peers: dict[str, Peer] = {}
for _peer in network["peers"].values(): for _peer in network["peers"].values():
@@ -128,15 +135,103 @@ def networks_from_flake(flake: Flake) -> dict[str, Network]:
return networks return networks
def get_best_network(machine_name: str, networks: dict[str, Network]) -> Network | None: @contextmanager
for network_name, network in sorted( def get_best_remote(machine: "Machine") -> Iterator["Remote"]:
networks.items(), key=lambda network: -network[1].priority """
): Context manager that yields the best remote connection for a machine following this priority:
if machine_name in network.peers: 1. If machine has targetHost in inventory, return a direct connection
if network.is_running() and network.ping(machine_name): 2. Return the highest priority network where machine is reachable
print(f"connecting via {network_name}") 3. If no network works, try to get targetHost from machine nixos config
return network
return None Args:
machine: Machine instance to connect to
Yields:
Remote object for connecting to the machine
Raises:
ClanError: If no connection method works
"""
# Step 1: Check if targetHost is set in inventory
inv_machine = machine.get_inv_machine()
target_host = inv_machine.get("deploy", {}).get("targetHost")
if target_host:
log.debug(f"Using targetHost from inventory for {machine.name}: {target_host}")
# Create a direct network with just this machine
try:
remote = Remote.from_ssh_uri(machine_name=machine.name, address=target_host)
yield remote
return
except Exception as e:
log.debug(f"Inventory targetHost not reachable for {machine.name}: {e}")
# Step 2: Try existing networks by priority
try:
networks = networks_from_flake(machine.flake)
sorted_networks = sorted(networks.items(), key=lambda x: -x[1].priority)
for network_name, network in sorted_networks:
if machine.name not in network.peers:
continue
# Check if network is running and machine is reachable
log.debug(f"trying to connect via {network_name}")
if network.is_running():
try:
ping_time = network.ping(machine.name)
if ping_time is not None:
log.info(
f"Machine {machine.name} reachable via {network_name} network"
)
yield network.remote(machine.name)
return
except Exception as e:
log.debug(f"Failed to reach {machine.name} via {network_name}: {e}")
else:
try:
log.debug(f"Establishing connection for network {network_name}")
with network.module.connection(network) as connected_network:
ping_time = connected_network.ping(machine.name)
if ping_time is not None:
log.info(
f"Machine {machine.name} reachable via {network_name} network after connection"
)
yield connected_network.remote(machine.name)
return
except Exception as e:
log.debug(
f"Failed to establish connection to {machine.name} via {network_name}: {e}"
)
except Exception as e:
log.debug(f"Failed to use networking modules to determine machines remote: {e}")
# Step 3: Try targetHost from machine nixos config
try:
target_host = machine.select('config.clan.core.networking."targetHost"')
if target_host:
log.debug(
f"Using targetHost from machine config for {machine.name}: {target_host}"
)
# Check if reachable
try:
remote = Remote.from_ssh_uri(
machine_name=machine.name, address=target_host
)
yield remote
return
except Exception as e:
log.debug(
f"Machine config targetHost not reachable for {machine.name}: {e}"
)
except Exception as e:
log.debug(f"Could not get targetHost from machine config: {e}")
# No connection method found
msg = f"Could not find any way to connect to machine '{machine.name}'. No targetHost configured and machine not reachable via any network."
raise ClanError(msg)
def get_network_overview(networks: dict[str, Network]) -> dict: def get_network_overview(networks: dict[str, Network]) -> dict:

View File

@@ -26,46 +26,48 @@ def test_networks_from_flake(mock_get_machine_var: MagicMock) -> None:
# Define the expected return value from flake.select # Define the expected return value from flake.select
mock_networking_data = { mock_networking_data = {
"vpn-network": { "exports": {
"peers": { "vpn-network": {
"machine1": { "peers": {
"name": "machine1", "machine1": {
"host": { "name": "machine1",
"var": { "host": {
"machine": "machine1", "var": {
"generator": "wireguard", "machine": "machine1",
"file": "address", "generator": "wireguard",
} "file": "address",
}
},
},
"machine2": {
"name": "machine2",
"host": {
"var": {
"machine": "machine2",
"generator": "wireguard",
"file": "address",
}
},
}, },
}, },
"machine2": { "module": "clan_lib.network.tor",
"name": "machine2", "priority": 1000,
"host": { },
"var": { "local-network": {
"machine": "machine2", "peers": {
"generator": "wireguard", "machine1": {
"file": "address", "name": "machine1",
} "host": {"plain": "10.0.0.10"},
},
"machine3": {
"name": "machine3",
"host": {"plain": "10.0.0.12"},
}, },
}, },
"module": "clan_lib.network.direct",
"priority": 500,
}, },
"module": "clan_lib.network.tor", }
"priority": 1000,
},
"local-network": {
"peers": {
"machine1": {
"name": "machine1",
"host": {"plain": "10.0.0.10"},
},
"machine3": {
"name": "machine3",
"host": {"plain": "10.0.0.12"},
},
},
"module": "clan_lib.network.direct",
"priority": 500,
},
} }
# Mock the select method # Mock the select method
@@ -75,7 +77,7 @@ def test_networks_from_flake(mock_get_machine_var: MagicMock) -> None:
networks = networks_from_flake(flake) networks = networks_from_flake(flake)
# Verify the flake.select was called with the correct pattern # Verify the flake.select was called with the correct pattern
flake.select.assert_called_once_with("clan.exports.instances.*.networking") flake.select.assert_called_once_with("clan.?exports.instances.*.networking")
# Verify the returned networks # Verify the returned networks
assert len(networks) == 2 assert len(networks) == 2

View File

@@ -0,0 +1,166 @@
import json
import logging
from collections.abc import Iterator
from contextlib import contextmanager
from dataclasses import dataclass
from pathlib import Path
from typing import Any
from clan_lib.cmd import run
from clan_lib.errors import ClanError
from clan_lib.flake import Flake
from clan_lib.network.network import Network, Peer
from clan_lib.nix import nix_shell
from clan_lib.ssh.remote import Remote
from clan_lib.ssh.socks_wrapper import tor_wrapper
log = logging.getLogger(__name__)
@dataclass(frozen=True)
class RemoteWithNetwork:
network: Network
remote: Remote
@dataclass(frozen=True)
class QRCodeData:
addresses: list[RemoteWithNetwork]
@contextmanager
def get_best_remote(self) -> Iterator[Remote]:
for address in self.addresses:
try:
log.debug(f"Establishing connection via {address}")
with address.network.module.connection(
address.network
) as connected_network:
ping_time = connected_network.module.ping(address.remote)
if ping_time is not None:
log.info(f"reachable via {address} after connection")
yield address.remote
except Exception as e:
log.debug(f"Failed to establish connection via {address}: {e}")
def read_qr_json(qr_data: dict[str, Any], flake: Flake) -> QRCodeData:
"""
Parse QR code JSON contents and output a dict of networks with remotes.
Args:
qr_data: JSON data from QR code containing network information
flake: Flake instance for creating peers
Returns:
Dictionary mapping network type to dict with "network" and "remote" keys
Example input:
{
"pass": "password123",
"tor": "ssh://user@hostname.onion",
"addrs": ["ssh://user@192.168.1.100", "ssh://user@example.com"]
}
Example output:
{
"direct": {
"network": Network(...),
"remote": Remote(...)
},
"tor": {
"network": Network(...),
"remote": Remote(...)
}
}
"""
addresses: list[RemoteWithNetwork] = []
password = qr_data.get("pass")
# Process clearnet addresses
clearnet_addrs = qr_data.get("addrs", [])
if clearnet_addrs:
for addr in clearnet_addrs:
if isinstance(addr, str):
peer = Peer(name="installer", _host={"plain": addr}, flake=flake)
network = Network(
peers={"installer": peer},
module_name="clan_lib.network.direct",
priority=1000,
)
# Create the remote with password
remote = Remote.from_ssh_uri(
machine_name="installer",
address=addr,
).override(password=password)
addresses.append(RemoteWithNetwork(network=network, remote=remote))
else:
msg = f"Invalid address format: {addr}"
raise ClanError(msg)
# Process tor address
if tor_addr := qr_data.get("tor"):
peer = Peer(name="installer-tor", _host={"plain": tor_addr}, flake=flake)
network = Network(
peers={"installer-tor": peer},
module_name="clan_lib.network.tor",
priority=500,
)
# Create the remote with password and tor settings
remote = Remote.from_ssh_uri(
machine_name="installer-tor",
address=tor_addr,
).override(
password=password,
socks_port=9050,
socks_wrapper=tor_wrapper,
)
addresses.append(RemoteWithNetwork(network=network, remote=remote))
return QRCodeData(addresses=addresses)
def read_qr_image(image_path: Path) -> dict[str, Any]:
"""
Parse a QR code image and extract the JSON data.
Args:
image_path: Path to the QR code image file
Returns:
Parsed JSON data from the QR code
Raises:
ClanError: If the QR code cannot be read or contains invalid JSON
"""
if not image_path.exists():
msg = f"QR code image file not found: {image_path}"
raise ClanError(msg)
cmd = nix_shell(
["zbar"],
[
"zbarimg",
"--quiet",
"--raw",
str(image_path),
],
)
try:
res = run(cmd)
data = res.stdout.strip()
if not data:
msg = f"No QR code found in image: {image_path}"
raise ClanError(msg)
return json.loads(data)
except json.JSONDecodeError as e:
msg = f"Invalid JSON in QR code: {e}"
raise ClanError(msg) from e
except Exception as e:
msg = f"Failed to read QR code from {image_path}: {e}"
raise ClanError(msg) from e

View File

@@ -8,6 +8,8 @@ from typing import TYPE_CHECKING
from clan_lib.errors import ClanError from clan_lib.errors import ClanError
from clan_lib.network import Network, NetworkTechnologyBase, Peer from clan_lib.network import Network, NetworkTechnologyBase, Peer
from clan_lib.network.tor.lib import is_tor_running, spawn_tor from clan_lib.network.tor.lib import is_tor_running, spawn_tor
from clan_lib.ssh.remote import Remote
from clan_lib.ssh.socks_wrapper import tor_wrapper
if TYPE_CHECKING: if TYPE_CHECKING:
from clan_lib.ssh.remote import Remote from clan_lib.ssh.remote import Remote
@@ -27,11 +29,9 @@ class NetworkTechnology(NetworkTechnologyBase):
"""Check if Tor is running by sending HTTP request to SOCKS port.""" """Check if Tor is running by sending HTTP request to SOCKS port."""
return is_tor_running(self.proxy) return is_tor_running(self.proxy)
def ping(self, peer: Peer) -> None | float: def ping(self, remote: Remote) -> None | float:
if self.is_running(): if self.is_running():
try: try:
remote = self.remote(peer)
# Use the existing SSH reachability check # Use the existing SSH reachability check
now = time.time() now = time.time()
remote.check_machine_ssh_reachable() remote.check_machine_ssh_reachable()
@@ -39,7 +39,7 @@ class NetworkTechnology(NetworkTechnologyBase):
return (time.time() - now) * 1000 return (time.time() - now) * 1000
except ClanError as e: except ClanError as e:
log.debug(f"Error checking peer {peer.host}: {e}") log.debug(f"Error checking peer {remote}: {e}")
return None return None
return None return None
@@ -58,5 +58,5 @@ class NetworkTechnology(NetworkTechnologyBase):
address=peer.host, address=peer.host,
command_prefix=peer.name, command_prefix=peer.name,
socks_port=self.proxy, socks_port=self.proxy,
socks_wrapper=["torify"], socks_wrapper=tor_wrapper,
) )

View File

@@ -28,6 +28,7 @@
"sops", "sops",
"sshpass", "sshpass",
"tor", "tor",
"torsocks",
"util-linux", "util-linux",
"virt-viewer", "virt-viewer",
"virtiofsd", "virtiofsd",

View File

@@ -18,6 +18,7 @@ from clan_lib.errors import ClanError, indent_command # Assuming these are avai
from clan_lib.nix import nix_shell from clan_lib.nix import nix_shell
from clan_lib.ssh.host_key import HostKeyCheck, hostkey_to_ssh_opts from clan_lib.ssh.host_key import HostKeyCheck, hostkey_to_ssh_opts
from clan_lib.ssh.parse import parse_ssh_uri from clan_lib.ssh.parse import parse_ssh_uri
from clan_lib.ssh.socks_wrapper import SocksWrapper
from clan_lib.ssh.sudo_askpass_proxy import SudoAskpassProxy from clan_lib.ssh.sudo_askpass_proxy import SudoAskpassProxy
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -42,7 +43,7 @@ class Remote:
verbose_ssh: bool = False verbose_ssh: bool = False
ssh_options: dict[str, str] = field(default_factory=dict) ssh_options: dict[str, str] = field(default_factory=dict)
socks_port: int | None = None socks_port: int | None = None
socks_wrapper: list[str] | None = None socks_wrapper: SocksWrapper | None = None
_control_path_dir: Path | None = None _control_path_dir: Path | None = None
_askpass_path: str | None = None _askpass_path: str | None = None
@@ -63,7 +64,7 @@ class Remote:
private_key: Path | None = None, private_key: Path | None = None,
password: str | None = None, password: str | None = None,
socks_port: int | None = None, socks_port: int | None = None,
socks_wrapper: list[str] | None = None, socks_wrapper: SocksWrapper | None = None,
command_prefix: str | None = None, command_prefix: str | None = None,
port: int | None = None, port: int | None = None,
ssh_options: dict[str, str] | None = None, ssh_options: dict[str, str] | None = None,

View File

@@ -0,0 +1,16 @@
from dataclasses import dataclass
@dataclass(frozen=True)
class SocksWrapper:
"""Configuration for SOCKS proxy wrapper commands."""
# The command to execute for wrapping network connections through SOCKS (e.g., ["torify"])
cmd: list[str]
# Nix packages required to provide the wrapper command (e.g., ["tor", "torsocks"])
packages: list[str]
# Pre-configured Tor wrapper instance
tor_wrapper = SocksWrapper(cmd=["torify"], packages=["tor", "torsocks"])

View File

@@ -222,7 +222,7 @@ def test_clan_create_api(
# Invalidate cache because of new inventory # Invalidate cache because of new inventory
clan_dir_flake.invalidate_cache() clan_dir_flake.invalidate_cache()
generators = get_generators(machine.name, machine.flake.path) generators = get_generators(machine=machine, full_closure=True)
all_prompt_values = {} all_prompt_values = {}
for generator in generators: for generator in generators:
prompt_values = {} prompt_values = {}
@@ -236,8 +236,7 @@ def test_clan_create_api(
all_prompt_values[generator.name] = prompt_values all_prompt_values[generator.name] = prompt_values
run_generators( run_generators(
machine_name=machine.name, machine=machine,
base_dir=machine.flake.path,
generators=[gen.name for gen in generators], generators=[gen.name for gen in generators],
all_prompt_values=all_prompt_values, all_prompt_values=all_prompt_values,
) )

View File

@@ -5,10 +5,10 @@ import json
import logging import logging
from typing import Any from typing import Any
from clan_cli.clan.inspect import FlakeConfig, inspect_flake
from clan_lib.dirs import user_history_file from clan_lib.dirs import user_history_file
from clan_lib.errors import ClanError from clan_lib.errors import ClanError
from clan_lib.flake import Flake from clan_lib.flake import Flake
from clan_lib.machines.machines import Machine
from clan_lib.locked_open import read_history_file, write_history_file from clan_lib.locked_open import read_history_file, write_history_file
from clan_lib.machines.list import list_machines from clan_lib.machines.list import list_machines
@@ -20,14 +20,15 @@ log = logging.getLogger(__name__)
@dataclasses.dataclass @dataclasses.dataclass
class HistoryEntry: class HistoryEntry:
last_used: str last_used: str
flake: FlakeConfig machine: Machine
settings: dict[str, Any] = dataclasses.field(default_factory=dict) settings: dict[str, Any] = dataclasses.field(default_factory=dict)
@classmethod @classmethod
def from_json(cls: type["HistoryEntry"], data: dict[str, Any]) -> "HistoryEntry": def from_json(cls: type["HistoryEntry"], data: dict[str, Any]) -> "HistoryEntry":
return cls( return cls(
last_used=data["last_used"], last_used=data["last_used"],
flake=FlakeConfig.from_json(data["flake"]), # TODO: This needs to be fixed, if we every spin up the vm Manager again
machine=Machine(data["name"], Flake(data["flake"])),
settings=data.get("settings", {}), settings=data.get("settings", {}),
) )
@@ -66,9 +67,8 @@ def list_history() -> list[HistoryEntry]:
def new_history_entry(url: str, machine: str) -> HistoryEntry: def new_history_entry(url: str, machine: str) -> HistoryEntry:
flake = inspect_flake(url, machine)
return HistoryEntry( return HistoryEntry(
flake=flake, machine=Machine(machine, Flake(url)),
last_used=datetime.datetime.now(tz=datetime.UTC).isoformat(), last_used=datetime.datetime.now(tz=datetime.UTC).isoformat(),
) )
@@ -96,8 +96,8 @@ def _add_maschine_to_history_list(
) -> HistoryEntry: ) -> HistoryEntry:
for new_entry in entries: for new_entry in entries:
if ( if (
new_entry.flake.flake_url == str(uri_path) new_entry.machine.flake.path == str(uri_path)
and new_entry.flake.flake_attr == uri_machine and new_entry.machine.flake.path == uri_machine
): ):
new_entry.last_used = datetime.datetime.now(tz=datetime.UTC).isoformat() new_entry.last_used = datetime.datetime.now(tz=datetime.UTC).isoformat()
return new_entry return new_entry
@@ -117,7 +117,7 @@ def add_history_command(args: argparse.Namespace) -> None:
def list_history_command(args: argparse.Namespace) -> None: def list_history_command(args: argparse.Namespace) -> None:
res: dict[str, list[HistoryEntry]] = {} res: dict[str, list[HistoryEntry]] = {}
for history_entry in list_history(): for history_entry in list_history():
url = str(history_entry.flake.flake_url) url = str(history_entry.machine.flake.path)
if res.get(url) is None: if res.get(url) is None:
res[url] = [] res[url] = []
res[url].append(history_entry) res[url].append(history_entry)
@@ -127,7 +127,7 @@ def list_history_command(args: argparse.Namespace) -> None:
for entry in entries: for entry in entries:
d = datetime.datetime.fromisoformat(entry.last_used) d = datetime.datetime.fromisoformat(entry.last_used)
last_used = d.strftime("%d/%m/%Y %H:%M:%S") last_used = d.strftime("%d/%m/%Y %H:%M:%S")
print(f" {entry.flake.flake_attr} ({last_used})") print(f" {entry.machine} ({last_used})")
def parse_args() -> argparse.Namespace: def parse_args() -> argparse.Namespace: