Compare commits
337 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a0b16e30b6 | ||
|
|
9e36b00b48 | ||
|
|
c48be6b34f | ||
|
|
6a3f5e077b | ||
|
|
0d088cac7e | ||
|
|
3ffad3f97f | ||
|
|
3d0c281fd6 | ||
|
|
5b399fd447 | ||
|
|
bfedc93f2c | ||
|
|
c63e706fe9 | ||
|
|
dfc241c62d | ||
|
|
6fcdc05911 | ||
|
|
137f22b39e | ||
|
|
9857a395f1 | ||
|
|
4302b06715 | ||
|
|
064bc43f27 | ||
|
|
7ab1b6823f | ||
|
|
f96a487bc3 | ||
|
|
e6a1953785 | ||
|
|
4bedb380b8 | ||
|
|
3b070ae1f3 | ||
|
|
bdb0a97285 | ||
|
|
a0cce07259 | ||
|
|
1edf576702 | ||
|
|
7824ee99cb | ||
|
|
13c20242ad | ||
|
|
ba81b1ae12 | ||
|
|
7eb54b6564 | ||
|
|
5c007edd9f | ||
|
|
03ba598842 | ||
|
|
d699f0b66a | ||
|
|
65e599b92a | ||
|
|
788f8beea4 | ||
|
|
da8768c4c0 | ||
|
|
eb11054f65 | ||
|
|
5922b4617c | ||
|
|
5286994288 | ||
|
|
8ad025b462 | ||
|
|
4442cb2fe0 | ||
|
|
460c6d4fc9 | ||
|
|
893fa47a50 | ||
|
|
62d3a18783 | ||
|
|
ac1cd5114a | ||
|
|
e0b5855013 | ||
|
|
4fd057413f | ||
|
|
96fcc41b19 | ||
|
|
811b994d57 | ||
|
|
87aa62e128 | ||
|
|
612275477a | ||
|
|
046cfcb4d7 | ||
|
|
965d41b37d | ||
|
|
c36935d81a | ||
|
|
ae4197277a | ||
|
|
0cc7bd7066 | ||
|
|
31f77f7a56 | ||
|
|
d3d56e83e7 | ||
|
|
c725fc9fa3 | ||
|
|
7d6d5967dc | ||
|
|
afcc5d9e26 | ||
|
|
0e9931d749 | ||
|
|
b4d1f07ed9 | ||
|
|
0d182f4431 | ||
|
|
20a6a5684d | ||
|
|
e5967bb0de | ||
|
|
dcd7b6a8a2 | ||
|
|
a48451cc8f | ||
|
|
7580475cb6 | ||
|
|
ebe7a8ed0b | ||
|
|
c7cee905d1 | ||
|
|
5b7925c079 | ||
|
|
5e56f746ad | ||
|
|
4f5abe32f9 | ||
|
|
64a0dcb37f | ||
|
|
422f3da9df | ||
|
|
f7e20f70a8 | ||
|
|
cbabcd91ed | ||
|
|
5221d34448 | ||
|
|
5ab3c86b68 | ||
|
|
0324f68709 | ||
|
|
7cd14a5959 | ||
|
|
a6584023ae | ||
|
|
2f381bf677 | ||
|
|
a1c29f8aed | ||
|
|
42eb8e7a05 | ||
|
|
890cd47b2a | ||
|
|
cf4622052d | ||
|
|
88bf893228 | ||
|
|
0b1f711f2c | ||
|
|
39d9f44286 | ||
|
|
f91aca959f | ||
|
|
8af166b899 | ||
|
|
61a185f947 | ||
|
|
6ad2b0b124 | ||
|
|
b70c20b260 | ||
|
|
9821e39b06 | ||
|
|
2ac65b9c83 | ||
|
|
18dc042a0b | ||
|
|
7a4a940e83 | ||
|
|
df73169392 | ||
|
|
50e4b7a2f4 | ||
|
|
e836ff86b4 | ||
|
|
714bc58573 | ||
|
|
17c35c4259 | ||
|
|
727474055e | ||
|
|
892cb1baae | ||
|
|
186656999f | ||
|
|
00b7347d00 | ||
|
|
5eb6b703f0 | ||
|
|
3d436b3c6b | ||
|
|
d78dca47e2 | ||
|
|
8254d197f0 | ||
|
|
a4839f9cf2 | ||
|
|
bb4b43f5be | ||
|
|
4c7699b205 | ||
|
|
b7013dc795 | ||
|
|
617e4b0ce1 | ||
|
|
600d37682c | ||
|
|
ac4800a7df | ||
|
|
0af64dad01 | ||
|
|
a32a5151dc | ||
|
|
16d245b179 | ||
|
|
24ecdb227e | ||
|
|
867fa5140b | ||
|
|
891aac8381 | ||
|
|
2b616575e1 | ||
|
|
3f07f6ac79 | ||
|
|
c6b0b114c5 | ||
|
|
8803343ae1 | ||
|
|
4cfe866079 | ||
|
|
8609538756 | ||
|
|
5ea0e7776e | ||
|
|
a296b8a1fe | ||
|
|
ebbbdcaa59 | ||
|
|
ccf64d5951 | ||
|
|
16e20e159f | ||
|
|
43a5a5db5a | ||
|
|
c1686691fa | ||
|
|
115d0a05b7 | ||
|
|
4cfef1e21c | ||
|
|
42e3fea9e5 | ||
|
|
6b7530f27d | ||
|
|
7e00a08111 | ||
|
|
0c245f8eda | ||
|
|
9469968851 | ||
|
|
c2a71fb423 | ||
|
|
13d3bc9391 | ||
|
|
3161ab3903 | ||
|
|
6df67aee00 | ||
|
|
ed9f9c0d9d | ||
|
|
969c17e410 | ||
|
|
3c7c52e35a | ||
|
|
b0e327e0d1 | ||
|
|
550b374d20 | ||
|
|
23008d1f73 | ||
|
|
733d80d0b2 | ||
|
|
9dceddc6c2 | ||
|
|
f1747079c8 | ||
|
|
ffdcd9b41e | ||
|
|
2ef56aff70 | ||
|
|
2ca4abbfef | ||
|
|
1344466097 | ||
|
|
b1b68c514d | ||
|
|
08072b3850 | ||
|
|
67637007a2 | ||
|
|
558dd55058 | ||
|
|
63ccbd7ca7 | ||
|
|
d9f6b7e3fb | ||
|
|
254f9b9c5f | ||
|
|
44ff545436 | ||
|
|
40de60946a | ||
|
|
f25d17d9c2 | ||
|
|
70233b5e53 | ||
|
|
044d5f1c7d | ||
|
|
389c586a26 | ||
|
|
e01a79696b | ||
|
|
aade61b019 | ||
|
|
6956858d61 | ||
|
|
7e7e58eb64 | ||
|
|
46f746d09c | ||
|
|
56e03d1f25 | ||
|
|
0343e4b91a | ||
|
|
137d505c3b | ||
|
|
dd783bdf85 | ||
|
|
bf41a9ef00 | ||
|
|
d8c9508507 | ||
|
|
f313ace19a | ||
|
|
fe8f7e919e | ||
|
|
c64276b64e | ||
|
|
436da16bf9 | ||
|
|
1c3282bb63 | ||
|
|
3c4b3e180e | ||
|
|
3953715b48 | ||
|
|
7b95fa039f | ||
|
|
347668a57f | ||
|
|
63fdc13928 | ||
|
|
9b0557803e | ||
|
|
c13879ce69 | ||
|
|
f57bc30c5a | ||
|
|
38712d6fe0 | ||
|
|
1d38ffa9c2 | ||
|
|
665f036dec | ||
|
|
b74b6ff449 | ||
|
|
9c8797e770 | ||
|
|
2be6cedec4 | ||
|
|
7f49449f94 | ||
|
|
1f7bfa4e34 | ||
|
|
67fab4b11d | ||
|
|
18e3c72ef0 | ||
|
|
84d4660a8d | ||
|
|
13c3e1411a | ||
|
|
3c3a505aca | ||
|
|
f33c8e98fe | ||
|
|
869a04e5af | ||
|
|
d09fdc3528 | ||
|
|
652677d06f | ||
|
|
ec163657cd | ||
|
|
7d3aa5936d | ||
|
|
f8f8efbb88 | ||
|
|
8887e209d6 | ||
|
|
a72f74a36e | ||
|
|
0e0f8e73ec | ||
|
|
f15a113f52 | ||
|
|
1fbb4f5014 | ||
|
|
980a3c90b5 | ||
|
|
c01b14aef5 | ||
|
|
0a3e564ec0 | ||
|
|
bc09d5c886 | ||
|
|
f6b8d660d8 | ||
|
|
6014ddcd9a | ||
|
|
551f5144c7 | ||
|
|
9a664c323c | ||
|
|
7572dc8c2b | ||
|
|
e22f0d9e36 | ||
|
|
f93ae13448 | ||
|
|
749bac63f4 | ||
|
|
2bac2ec7ee | ||
|
|
f224d4b20c | ||
|
|
47aa0a3b8e | ||
|
|
dd1cab5daa | ||
|
|
32edae4ebd | ||
|
|
d829aa5838 | ||
|
|
fd6619668b | ||
|
|
50a26ece32 | ||
|
|
8f224b00a6 | ||
|
|
27d43ee21d | ||
|
|
9626e22db7 | ||
|
|
1df329fe0d | ||
|
|
9da38abc77 | ||
|
|
2814c46e68 | ||
|
|
feef0a513e | ||
|
|
9cc85b36c6 | ||
|
|
1465b18820 | ||
|
|
6fa0062573 | ||
|
|
6cd68c23f5 | ||
|
|
fdddc60676 | ||
|
|
684aa27068 | ||
|
|
35d8deb393 | ||
|
|
e2f20b5ffc | ||
|
|
fd5d7934a0 | ||
|
|
f194c31e0e | ||
|
|
061b598adf | ||
|
|
744f35e0cc | ||
|
|
4a6d46198c | ||
|
|
82d5ca9a0b | ||
|
|
28d8a91a30 | ||
|
|
18f8d69728 | ||
|
|
1feead4ce4 | ||
|
|
7f28110558 | ||
|
|
38787da891 | ||
|
|
2b587da9fe | ||
|
|
acd2c1654b | ||
|
|
2ecb1399c3 | ||
|
|
46ae6b49c1 | ||
|
|
50a8a69719 | ||
|
|
203761a99c | ||
|
|
990b4e0223 | ||
|
|
032f54cbfb | ||
|
|
47146efa0f | ||
|
|
c031abcd9e | ||
|
|
6b5dca5842 | ||
|
|
016fe3d114 | ||
|
|
9b60b4a989 | ||
|
|
3088ce025b | ||
|
|
4f1fda3de6 | ||
|
|
57f14827c2 | ||
|
|
0390d5999d | ||
|
|
58e9a28f14 | ||
|
|
b4ad5ca1bd | ||
|
|
84ecb1aae6 | ||
|
|
2b9971f538 | ||
|
|
81e15cab34 | ||
|
|
215c808071 | ||
|
|
4de052e58b | ||
|
|
a06a7a7a2c | ||
|
|
94df3855b5 | ||
|
|
a83f3c23f4 | ||
|
|
da6cd324f0 | ||
|
|
c5b96df7b0 | ||
|
|
c4feeace31 | ||
|
|
6117b664ae | ||
|
|
b8fdb48fd8 | ||
|
|
9165f7ccaf | ||
|
|
8058a7c158 | ||
|
|
fed61f49f9 | ||
|
|
f1f05c7e6b | ||
|
|
7597d1560f | ||
|
|
f739e1b66d | ||
|
|
5d3609aacd | ||
|
|
7aa51d6bd7 | ||
|
|
af91ae8c7f | ||
|
|
077bf55fd7 | ||
|
|
1f6dcb910f | ||
|
|
6363d9c99c | ||
|
|
fd30dbd1be | ||
|
|
ba4dc36ddf | ||
|
|
5abac04b15 | ||
|
|
8c84d32b13 | ||
|
|
c083548795 | ||
|
|
11af5c3471 | ||
|
|
dac8a40b9f | ||
|
|
204f9d09e3 | ||
|
|
668067080d | ||
|
|
10ed2cc7f7 | ||
|
|
060b22cf21 | ||
|
|
965dddfee1 | ||
|
|
6e5d74ba22 | ||
|
|
4257f47a1a | ||
|
|
72b64a8b70 | ||
|
|
e46e0543cd | ||
|
|
0de79962ea | ||
|
|
6209816115 | ||
|
|
ec21cda0cf | ||
|
|
8a29d102cd | ||
|
|
22787e7c93 | ||
|
|
19fd72e075 | ||
|
|
50be33088c | ||
|
|
6e7a67c830 |
12
.gitea/PULL_REQUEST_TEMPLATE.md
Normal file
12
.gitea/PULL_REQUEST_TEMPLATE.md
Normal file
@@ -0,0 +1,12 @@
|
||||
## Description of the change
|
||||
|
||||
<!-- Brief summary of the change if not already clear from the title -->
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] Updated Documentation
|
||||
- [ ] Added tests
|
||||
- [ ] Doesn't affect backwards compatibility - or check the next points
|
||||
- [ ] Add the breaking change and migration details to docs/release-notes.md
|
||||
- !!! Review from another person is required *BEFORE* merge !!!
|
||||
- [ ] Add introduction of major feature to docs/release-notes.md
|
||||
@@ -17,4 +17,4 @@ jobs:
|
||||
|
||||
- name: Build clan-app for x86_64-darwin
|
||||
run: |
|
||||
nix build .#packages.x86_64-darwin.clan-app --system x86_64-darwin --log-format bar-with-logs
|
||||
nix build .#packages.x86_64-darwin.clan-app --log-format bar-with-logs
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Copyright 2023-2024 Clan contributors
|
||||
Copyright 2023-2025 Clan contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
|
||||
@@ -19,28 +19,19 @@ let
|
||||
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
|
||||
in
|
||||
{
|
||||
imports =
|
||||
let
|
||||
clanCoreModulesDir = ../nixosModules/clanCore;
|
||||
getClanCoreTestModules =
|
||||
let
|
||||
moduleNames = attrNames (builtins.readDir clanCoreModulesDir);
|
||||
testPaths = map (
|
||||
moduleName: clanCoreModulesDir + "/${moduleName}/tests/flake-module.nix"
|
||||
) moduleNames;
|
||||
in
|
||||
filter pathExists testPaths;
|
||||
in
|
||||
getClanCoreTestModules
|
||||
++ filter pathExists [
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./update/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
];
|
||||
imports = filter pathExists [
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./installation/flake-module.nix
|
||||
./update/flake-module.nix
|
||||
./morph/flake-module.nix
|
||||
./nixos-documentation/flake-module.nix
|
||||
./dont-depend-on-repo-root.nix
|
||||
# clan core submodule tests
|
||||
../nixosModules/clanCore/machine-id/tests/flake-module.nix
|
||||
../nixosModules/clanCore/postgresql/tests/flake-module.nix
|
||||
../nixosModules/clanCore/state-version/tests/flake-module.nix
|
||||
];
|
||||
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
|
||||
system:
|
||||
let
|
||||
@@ -95,11 +86,12 @@ in
|
||||
|
||||
# Container Tests
|
||||
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
|
||||
nixos-systemd-abstraction = self.clanLib.test.containerTest ./systemd-abstraction nixosTestArgs;
|
||||
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
|
||||
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
||||
nixos-test-extra-python-packages = self.clanLib.test.containerTest ./test-extra-python-packages nixosTestArgs;
|
||||
|
||||
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
||||
wireguard = import ./wireguard nixosTestArgs;
|
||||
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
||||
};
|
||||
|
||||
@@ -120,7 +112,7 @@ in
|
||||
) (self.darwinConfigurations or { })
|
||||
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") (
|
||||
if system == "aarch64-darwin" then
|
||||
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "docs-options") packagesToBuild
|
||||
lib.filterAttrs (n: _: n != "docs" && n != "deploy-docs" && n != "option-search") packagesToBuild
|
||||
else
|
||||
packagesToBuild
|
||||
)
|
||||
|
||||
@@ -15,7 +15,6 @@ let
|
||||
networking.useNetworkd = true;
|
||||
services.openssh.enable = true;
|
||||
services.openssh.settings.UseDns = false;
|
||||
services.openssh.settings.PasswordAuthentication = false;
|
||||
system.nixos.variant_id = "installer";
|
||||
environment.systemPackages = [
|
||||
pkgs.nixos-facter
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
manifest.readme = "Just a sample readme to not trigger the warning.";
|
||||
roles.peer = {
|
||||
description = "A peer that uses the new-service to generate some files.";
|
||||
};
|
||||
|
||||
@@ -34,6 +34,7 @@ nixosLib.runTest (
|
||||
modules.new-service = {
|
||||
_class = "clan.service";
|
||||
manifest.name = "new-service";
|
||||
manifest.readme = "Just a sample readme to not trigger the warning.";
|
||||
roles.peer = {
|
||||
description = "A peer that uses the new-service to generate some files.";
|
||||
};
|
||||
|
||||
67
checks/systemd-abstraction/default.nix
Normal file
67
checks/systemd-abstraction/default.nix
Normal file
@@ -0,0 +1,67 @@
|
||||
{ self, pkgs, ... }:
|
||||
|
||||
let
|
||||
|
||||
cli = self.packages.${pkgs.hostPlatform.system}.clan-cli-full;
|
||||
in
|
||||
{
|
||||
name = "systemd-abstraction";
|
||||
|
||||
nodes = {
|
||||
peer1 = {
|
||||
|
||||
users.users.text-user = {
|
||||
isNormalUser = true;
|
||||
linger = true;
|
||||
uid = 1000;
|
||||
extraGroups = [ "systemd-journal" ];
|
||||
};
|
||||
|
||||
# Set environment variables for user systemd
|
||||
environment.extraInit = ''
|
||||
if [ "$(id -u)" = "1000" ]; then
|
||||
export XDG_RUNTIME_DIR="/run/user/1000"
|
||||
export DBUS_SESSION_BUS_ADDRESS="unix:path=/run/user/1000/bus"
|
||||
fi
|
||||
'';
|
||||
|
||||
# Enable PAM for user systemd sessions
|
||||
security.pam.services.systemd-user = {
|
||||
startSession = true;
|
||||
# Workaround for containers - use pam_permit to avoid helper binary issues
|
||||
text = pkgs.lib.mkForce ''
|
||||
account required pam_permit.so
|
||||
session required pam_permit.so
|
||||
session required pam_env.so conffile=/etc/pam/environment readenv=0
|
||||
session required ${pkgs.systemd}/lib/security/pam_systemd.so
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [
|
||||
cli
|
||||
(cli.pythonRuntime.withPackages (
|
||||
ps: with ps; [
|
||||
pytest
|
||||
pytest-xdist
|
||||
]
|
||||
))
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript =
|
||||
{ ... }:
|
||||
''
|
||||
start_all()
|
||||
|
||||
peer1.wait_for_unit("multi-user.target")
|
||||
peer1.wait_for_unit("user@1000.service")
|
||||
|
||||
# Fix user journal permissions so text-user can read their own logs
|
||||
peer1.succeed("chown text-user:systemd-journal /var/log/journal/*/user-1000.journal*")
|
||||
peer1.succeed("chmod 640 /var/log/journal/*/user-1000.journal*")
|
||||
|
||||
# Run tests as text-user (environment variables are set automatically)
|
||||
peer1.succeed("su - text-user -c 'pytest -s -n0 ${cli}/${cli.pythonRuntime.sitePackages}/clan_lib/service_runner'")
|
||||
'';
|
||||
}
|
||||
26
checks/test-extra-python-packages/default.nix
Normal file
26
checks/test-extra-python-packages/default.nix
Normal file
@@ -0,0 +1,26 @@
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
name = "test-extra-python-packages";
|
||||
|
||||
extraPythonPackages = ps: [ ps.numpy ];
|
||||
|
||||
nodes.machine =
|
||||
{ ... }:
|
||||
{
|
||||
networking.hostName = "machine";
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import numpy as np
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Test availability of numpy
|
||||
arr = np.array([1, 2, 3])
|
||||
print(f"Numpy array: {arr}")
|
||||
assert len(arr) == 3
|
||||
'';
|
||||
}
|
||||
)
|
||||
@@ -1,115 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
nixosLib,
|
||||
clan-core,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
nixosLib.runTest (
|
||||
{ ... }:
|
||||
|
||||
let
|
||||
machines = [
|
||||
"controller1"
|
||||
"controller2"
|
||||
"peer1"
|
||||
"peer2"
|
||||
"peer3"
|
||||
];
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
clan-core.modules.nixosTest.clanTest
|
||||
];
|
||||
|
||||
hostPkgs = pkgs;
|
||||
|
||||
name = "wireguard";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
|
||||
inventory = {
|
||||
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
|
||||
instances = {
|
||||
|
||||
/*
|
||||
wg-test-one
|
||||
┌───────────────────────────────┐
|
||||
│ ◄───────────── │
|
||||
│ controller2 controller1
|
||||
│ ▲ ─────────────► ▲ ▲
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ └───────────────┐ │ │ │ │
|
||||
│ │ │ └──────────────┐ │ │ │ │ │
|
||||
│ ▼ │ ▼ ▼ ▼
|
||||
└─► peer2 │ peer1 peer3
|
||||
│ ▲
|
||||
└──────────┘
|
||||
*/
|
||||
|
||||
wg-test-one = {
|
||||
|
||||
module.name = "@clan/wireguard";
|
||||
module.input = "self";
|
||||
|
||||
roles.controller.machines."controller1".settings = {
|
||||
endpoint = "192.168.1.1";
|
||||
};
|
||||
|
||||
roles.controller.machines."controller2".settings = {
|
||||
endpoint = "192.168.1.2";
|
||||
};
|
||||
|
||||
roles.peer.machines = {
|
||||
peer1.settings.controller = "controller1";
|
||||
peer2.settings.controller = "controller2";
|
||||
peer3.settings.controller = "controller1";
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
|
||||
#wg-test-two = {
|
||||
# module.name = "@clan/wireguard";
|
||||
|
||||
# roles.controller.machines."controller1".settings = {
|
||||
# endpoint = "192.168.1.1";
|
||||
# port = 51922;
|
||||
# };
|
||||
|
||||
# roles.peer.machines = {
|
||||
# peer1 = { };
|
||||
# };
|
||||
#};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Show all addresses
|
||||
machines = [peer1, peer2, peer3, controller1, controller2]
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
m.wait_for_unit("systemd-networkd.service")
|
||||
|
||||
print("\n\n" + "="*60)
|
||||
print("STARTING PING TESTS")
|
||||
print("="*60)
|
||||
|
||||
for m1 in machines:
|
||||
for m2 in machines:
|
||||
if m1 != m2:
|
||||
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
|
||||
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
|
||||
'';
|
||||
}
|
||||
)
|
||||
25
clanServices/admin/README.md
Normal file
25
clanServices/admin/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
The admin service aggregates components that allow an administrator to log in to and manage the machine.
|
||||
|
||||
The following configuration:
|
||||
|
||||
1. Enables OpenSSH with root login and adds an SSH public key named`myusersKey` to the machine's authorized_keys via the `allowedKeys` setting.
|
||||
|
||||
2. Automatically generates a password for the root user.
|
||||
|
||||
```nix
|
||||
instances = {
|
||||
admin = {
|
||||
roles.default.tags = {
|
||||
all = { };
|
||||
};
|
||||
roles.default.settings = {
|
||||
allowedKeys = {
|
||||
myusersKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEFDNnynMbFWatSFdANzbJ8iiEKL7+9ZpDaMLrWRQjyH lhebendanz@wintux";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
manifest.name = "clan-core/admin";
|
||||
manifest.description = "Adds a root user with ssh access";
|
||||
manifest.categories = [ "Utility" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
description = "Placeholder role to apply the admin service";
|
||||
|
||||
@@ -2,7 +2,7 @@ let
|
||||
public-key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6zj7ubTg6z/aDwRNwvM/WlQdUocMprQ8E92NWxl6t+ test@test";
|
||||
in
|
||||
{
|
||||
name = "service-admin";
|
||||
name = "admin";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -3,7 +3,7 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "service-borgbackup";
|
||||
name = "borgbackup";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -9,7 +9,7 @@ in
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.service-data-mesher = {
|
||||
clan.nixosTests.data-mesher = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
clan.modules."@clan/data-mesher" = module;
|
||||
};
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "service-data-mesher";
|
||||
name = "data-mesher";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -3,7 +3,7 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "service-dyndns";
|
||||
name = "dyndns";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
6
clanServices/dyndns/tests/vm/sops/machines/server/key.json
Executable file
6
clanServices/dyndns/tests/vm/sops/machines/server/key.json
Executable file
@@ -0,0 +1,6 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age164wrhlnake7f7duhzs936lq6w49dtg53hcdyxqwxj0agad6tqg2s2u4yta",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:seLxbv590dO0KvMJmtN7WVvUcH27VYwAc3rmyD7q6ZmwCgswOKx55LFnh0stRDKSZa8K7Dq1x7D9adhZtPAMWX8tbJswBeNMPt8=,iv:G52eugxfTi0tTzH4EN4CWmpyv6feSL34++UVSjb0aAo=,tag:6r10/a7kD2hBAmae0nz2OQ==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHVC8wZUZJYUl5MXVNa2k5\ndGV1MnFWbUNLNVdxeEtCVUc3MTd0ck9aeFFBCnFhZW40amVYc3FlN1FPRTFSWTJR\nQzhNOERKbnRnSlJVeElNSEM5ZUJsZGsKLS0tIG1uNnlNN3MweHlYczNRTW9xSytu\neThzUmxKZTJBT2lCcTdiNUI4N3paTVEKgS9j2/GVt1KBoggUj9d6UK/mIlK4niLQ\nzVq2BHt3irxQpkpGUogXH2b86zSAOEJFzsL1Rk8HM1mogTG8jqf0qA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-10-19T12:49:11Z",
|
||||
"mac": "ENC[AES256_GCM,data:T/2xw2mvUi8YALyxz78qG/g/xguoUTeHNzcZfXwwSyCXMg9ircsGGLO9SOVWy/QNkibnw3Yp80tXNJyr4oJH28PhFH7RrRp8jzNdopF49ZNJb2IqJ3C7xNYRZMHfjOCd/raka+ehZq8YGilEpXUWLRk1ere9lbBMh1ycL7jJS3c=,iv:FZbY/jTNPM+p4qD41FD0K7B9zoppGuvnUY5hL/EkmYM=,tag:IF5QTyUkHXWthlAGBn9R8w==,type:str]",
|
||||
"version": "3.11.0"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/machines/server
|
||||
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:Zu+n+DDYP7rQRTS17PJ6Apo=,iv:5WOs81Pj+S85kdC1AlOXSyPMGDfwM5UD8x7nyRZtRYQ=,tag:2JYkGnLugAni49Upv43o2g==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age164wrhlnake7f7duhzs936lq6w49dtg53hcdyxqwxj0agad6tqg2s2u4yta",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlR3RGQ2ZLTkR3ZWxNVCsv\naXJHRjBiVUVYZVRIY2swY2xubGhmb3pLRkNvCldhQUV2WDlqYjZ4ZUFWYXkvUEEw\nZi9XRWw0Mi9mRENDcnI0aENDR2Z4MHcKLS0tIGFQU3Q4WEErbnBjOHpNR1BSR2cr\nRFg0anE1cHExT0sySmxuUks1R05nczAKZO3R6+f9co2+YGO8HPufoq1fLqqrdTWD\n4zqemMmG2BjMRDumxtcKp8CLaZWlJoP4e/+tonfdoe42qmNF5NJcFw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzZWo4WGh1cWxKeDhDdlBm\nTVFjVFBIUU9xaGRkanNHaUVUUHN1czNRSUhNCkp5MmwzSGdycmsrZGhaRUhEbXBF\nNUhtdEF6bHZQOGJYUVhFVHlYc3FPODAKLS0tIDBRQ2VGT2IvU1F4MEVabzhYSFJq\nOWZmbGpkQmNSMnNKa0s4K2JXdGgwRlkKUQRREpG5H1mNHSc/cZrdMiSz0veJFR4N\n+W49XL/wQUZwajykwYj++G+dWDO7DQ+fpbB9w4mzbsAmCsXirseTLA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-10-19T12:49:11Z",
|
||||
"mac": "ENC[AES256_GCM,data:0msda7WbQQxXQ+juT7yErgT7NADgnzqEZLTQw+4JPuAE4xcqRIYwrrAALaA0GCCM2aIWlICzJigLCuzQUfSUbIzeP79tEHiKez+NOt/xgSM9ljz7GlsmLd0vzkxdt3WSxP+sHxy0S866N2sLMUkLqPGdqeTjB+Jji5ghGhzk9ys=,iv:8UU7iA4SdR6ZlVolm708l2Iea0sQYRT+5wPBBP5tpS0=,tag:VQXslAlqLqs1QEkwW6x6qg==,type:str]",
|
||||
"version": "3.11.0"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/users/admin
|
||||
12
clanServices/garage/README.md
Normal file
12
clanServices/garage/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
[Garage](https://garagehq.deuxfleurs.fr/) is an open-source, S3-compatible distributed object storage service for self-hosting.
|
||||
|
||||
This module provisions a single-instance S3 bucket. To customize its behavior, set `services.garage.settings` in your Nix configuration.
|
||||
|
||||
Example configuration:
|
||||
```
|
||||
instances = {
|
||||
garage = {
|
||||
roles.default.machines."server" = {};
|
||||
};
|
||||
};
|
||||
```
|
||||
@@ -4,6 +4,7 @@
|
||||
manifest.name = "clan-core/garage";
|
||||
manifest.description = "S3-compatible object store for small self-hosted geo-distributed deployments";
|
||||
manifest.categories = [ "System" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
description = "Placeholder role to apply the garage service";
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "service-garage";
|
||||
name = "garage";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
1
clanServices/hello-world/README.md
Normal file
1
clanServices/hello-world/README.md
Normal file
@@ -0,0 +1 @@
|
||||
This a test README just to appease the eval warnings if we don't have one
|
||||
@@ -9,6 +9,7 @@
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/hello-word";
|
||||
manifest.description = "This is a test";
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
# This service provides two roles: "morning" and "evening". Roles can be
|
||||
# defined in this file directly (e.g. the "morning" role) or split up into a
|
||||
@@ -34,10 +35,13 @@
|
||||
settings,
|
||||
|
||||
# The name of this instance of the service
|
||||
instanceName,
|
||||
|
||||
# The current machine
|
||||
machine,
|
||||
|
||||
# All roles of this service, with their assigned machines
|
||||
roles,
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
name = "service-hello-service";
|
||||
name = "hello-service";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
27
clanServices/internet/README.md
Normal file
27
clanServices/internet/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
🚧🚧🚧 Experimental 🚧🚧🚧
|
||||
|
||||
Use at your own risk.
|
||||
|
||||
We are still refining its interfaces, instability and breakages are expected.
|
||||
|
||||
---
|
||||
|
||||
This module is part of Clan's [networking interface](https://docs.clan.lol/guides/networking/networking/).
|
||||
|
||||
Clan's networking module automatically manages connections across available network transports and falls back intelligently. When you run `clan ssh` or `clan machines update`, Clan attempts each configured network in priority order until a connection succeeds.
|
||||
|
||||
The example below shows how to configure a domain so server1 is reachable over the clearnet. By default, the `internet` module has the highest priority among networks.
|
||||
|
||||
```nix
|
||||
inventory.instances = {
|
||||
# Direct SSH with fallback support
|
||||
internet = {
|
||||
roles.default.machines.server1 = {
|
||||
settings.host = "server1.example.com";
|
||||
};
|
||||
roles.default.machines.server2 = {
|
||||
settings.host = "192.168.1.100";
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
@@ -7,6 +7,7 @@
|
||||
"System"
|
||||
"Network"
|
||||
];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
roles.default = {
|
||||
description = "Placeholder role to apply the internet service";
|
||||
interface =
|
||||
|
||||
28
clanServices/kde/README.md
Normal file
28
clanServices/kde/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
This module sets up the [KDE Plasma](https://kde.org) Desktop environment.
|
||||
|
||||
!!! Note "Customisation"
|
||||
This service intentionally does not provide any settings or customisation
|
||||
options, as desktop preferences are highly subjective. Clan currently
|
||||
supports only this default desktop configuration. Any additional
|
||||
customisation can be done via the `extraModules` option. Furthermore, if you
|
||||
want to use a different desktop environment or compositor (e.g. Gnome or
|
||||
sway), we encourage you to to build your own
|
||||
[Clan Service](https://docs.clan.lol/guides/services/community/) or have a
|
||||
look at the [Community Services](https://docs.clan.lol/services/community/).
|
||||
|
||||
## Example Usage
|
||||
|
||||
```nix
|
||||
inventory = {
|
||||
instances = {
|
||||
kde = {
|
||||
|
||||
# Deploy on all machines
|
||||
roles.default.tags.all = { };
|
||||
|
||||
# Or individual hosts
|
||||
roles.default.machines.laptop = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
19
clanServices/kde/default.nix
Normal file
19
clanServices/kde/default.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/kde";
|
||||
manifest.description = "Sets up a graphical desktop environment";
|
||||
manifest.categories = [ "Desktop" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
description = "KDE/Plasma (wayland): Full-featured desktop environment with modern Qt-based interface";
|
||||
perInstance.nixosModule = {
|
||||
services = {
|
||||
displayManager.sddm.enable = true;
|
||||
displayManager.sddm.wayland.enable = true;
|
||||
desktopManager.plasma6.enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
24
clanServices/kde/flake-module.nix
Normal file
24
clanServices/kde/flake-module.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
module = lib.modules.importApply ./default.nix {
|
||||
inherit (self) packages;
|
||||
};
|
||||
in
|
||||
{
|
||||
clan.modules = {
|
||||
kde = module;
|
||||
};
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.kde = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
|
||||
clan.modules.kde = module;
|
||||
};
|
||||
};
|
||||
}
|
||||
30
clanServices/kde/tests/vm/default.nix
Normal file
30
clanServices/kde/tests/vm/default.nix
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
name = "kde";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
|
||||
machines.client = { };
|
||||
|
||||
instances = {
|
||||
kde = {
|
||||
module.name = "kde";
|
||||
module.input = "self";
|
||||
roles.default.machines."client" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
client.systemctl("start network-online.target")
|
||||
client.wait_for_unit("network-online.target")
|
||||
|
||||
client.wait_for_unit("graphical.target")
|
||||
client.wait_for_unit("display-manager.service")
|
||||
client.succeed("systemctl status display-manager.service")
|
||||
'';
|
||||
}
|
||||
4
clanServices/kde/tests/vm/sops/users/admin/key.json
Normal file
4
clanServices/kde/tests/vm/sops/users/admin/key.json
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -1,6 +1,6 @@
|
||||
{ ... }:
|
||||
{
|
||||
name = "service-localbackup";
|
||||
name = "localbackup";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"type": "age"
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
23
clanServices/matrix-synapse/README.md
Normal file
23
clanServices/matrix-synapse/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
This NixOS module installs and configures Synapse — a federated Matrix homeserver with end-to-end encryption — and optionally provides the Element web client.
|
||||
|
||||
The example below demonstrates a minimal setup that includes:
|
||||
|
||||
- Element web client.
|
||||
- Synapse backed by PostgreSQL and nginx.
|
||||
- An admin user and an additional regular user.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```nix
|
||||
instances = {
|
||||
matrix-synapse = {
|
||||
roles.default.machines."jon".settings = {
|
||||
acmeEmail = "admins@clan.lol";
|
||||
server_tld = "clan.test";
|
||||
app_domain = "matrix.clan.test";
|
||||
users.admin.admin = true;
|
||||
users.someuser = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
@@ -4,6 +4,7 @@
|
||||
manifest.name = "clan-core/matrix-synapese";
|
||||
manifest.description = "A federated messaging server with end-to-end encryption.";
|
||||
manifest.categories = [ "Social" ];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
description = "Placeholder role to apply the matrix-synapse service";
|
||||
|
||||
20
clanServices/mycelium/README.md
Normal file
20
clanServices/mycelium/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
|
||||
[Mycelium](https://github.com/threefoldtech/mycelium) is an end-to-end encrypted IPv6 overlay network that spans the globe.
|
||||
|
||||
## Features
|
||||
- Locality-aware routing: finds the shortest path between nodes.
|
||||
- All traffic is end-to-end encrypted.
|
||||
- Can route traffic via friend nodes and is location-aware.
|
||||
- Automatic rerouting if a physical link goes down.
|
||||
- IPv6 addresses are derived from private keys.
|
||||
- A simple, reliable message bus is implemented on top of Mycelium.
|
||||
- Supports multiple transports (QUIC, TCP, …). Hole punching for QUIC is in progress to enable true P2P connectivity behind NATs.
|
||||
- Designed for planetary-scale scalability; previous overlay networks reached practical limits, and Mycelium focuses on scaling.
|
||||
- Can run without a TUN device and be used solely as a reliable message bus.
|
||||
|
||||
Example configuration below connects all your machines to the Mycelium network:
|
||||
```nix
|
||||
mycelium = {
|
||||
roles.peer.tags.all = {};
|
||||
};
|
||||
```
|
||||
@@ -7,6 +7,7 @@
|
||||
"System"
|
||||
"Network"
|
||||
];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.peer = {
|
||||
description = "A peer in the mycelium network";
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "service-mycelium";
|
||||
name = "mycelium";
|
||||
|
||||
clan = {
|
||||
test.useContainers = false;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
11
clanServices/packages/README.md
Normal file
11
clanServices/packages/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
This service is meant to be consumed by the UI / API, and exposes a JSON serializable interface to add packages to a machine over the inventory.
|
||||
|
||||
The example below demonstrates installing the "cbonsai" application to a machine named "server.
|
||||
|
||||
```
|
||||
instances.packages = {
|
||||
roles.default.machines."server".settings = {
|
||||
packages = [ "cbonsai" ];
|
||||
};
|
||||
};
|
||||
```
|
||||
@@ -6,6 +6,7 @@
|
||||
manifest.categories = [
|
||||
"System"
|
||||
];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.default = {
|
||||
description = "Placeholder role to apply the packages service";
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
name = "service-packages";
|
||||
name = "packages";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -1,16 +1,16 @@
|
||||
# Clan service: sshd
|
||||
What it does
|
||||
|
||||
## What it does
|
||||
- Generates and persists SSH host keys via `vars`.
|
||||
- Optionally issues CA‑signed host certificates for servers.
|
||||
- Installs the `server` CA public key into `clients` `known_hosts` for TOFU‑less verification.
|
||||
- Optionally issues CA-signed host certificates for servers.
|
||||
- Installs the `server` CA public key into `clients` `known_hosts` for TOFU-less verification.
|
||||
|
||||
|
||||
When to use it
|
||||
- Zero‑TOFU SSH for dynamic fleets: admins/CI can connect to frequently rebuilt hosts (e.g., server-1.example.com) without prompts or per‑host `known_hosts` churn.
|
||||
## When to use it
|
||||
- Zero-TOFU SSH for dynamic fleets: admins/CI can connect to frequently rebuilt hosts (e.g., server-1.example.com) without prompts or per-host `known_hosts` churn.
|
||||
|
||||
Roles
|
||||
- Server: runs sshd, presents a CA‑signed host certificate for `<machine>.<domain>`.
|
||||
- Client: trusts the CA for the given domains to verify servers’ certificates.
|
||||
### Roles
|
||||
- Server: runs sshd, presents a CA-signed host certificate for `<machine>.<domain>`.
|
||||
- Client: trusts the CA for the given domains to verify servers' certificates.
|
||||
Tip: assign both roles to a machine if it should both present a cert and verify others.
|
||||
|
||||
Quick start (with host certificates)
|
||||
@@ -80,12 +80,13 @@ Admins should trust only production; CI should trust prod and staging. Servers a
|
||||
};
|
||||
}
|
||||
```
|
||||
- Admin -> server1.prod.example.com: zero‑TOFU (verified via cert).
|
||||
### Explanation
|
||||
- Admin -> server1.prod.example.com: zero-TOFU (verified via cert).
|
||||
- Admin -> server1.staging.example.com: falls back to TOFU (or is blocked by policy).
|
||||
- CI -> either prod or staging: zero‑TOFU for both.
|
||||
Note: server and client searchDomains don’t have to be identical; they only need to overlap for the hostnames you actually use.
|
||||
- CI -> either prod or staging: zero-TOFU for both.
|
||||
Note: server and client searchDomains don't have to be identical; they only need to overlap for the hostnames you actually use.
|
||||
|
||||
Notes
|
||||
### Notes
|
||||
- Connect using a name that matches a cert principal (e.g., `server1.example.com`); wildcards are not allowed inside the certificate.
|
||||
- CA private key stays in `vars` (not deployed); only the CA public key is distributed.
|
||||
- Logins still require your user SSH keys on the server (passwords are disabled).
|
||||
@@ -2,7 +2,7 @@
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/sshd";
|
||||
manifest.description = "Enables secure remote access to the machine over SSH";
|
||||
manifest.description = "Enables secure remote access to the machine over SSH with automatic host key management and optional CA-signed host certificates.";
|
||||
manifest.categories = [
|
||||
"System"
|
||||
"Network"
|
||||
@@ -180,7 +180,9 @@
|
||||
settings.PasswordAuthentication = false;
|
||||
|
||||
settings.HostCertificate = lib.mkIf (
|
||||
# this check needs to go first, as otherwise generators.openssh-cert does not exist
|
||||
settings.certificate.searchDomains != [ ]
|
||||
&& config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".exists
|
||||
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
|
||||
|
||||
hostKeys = [
|
||||
|
||||
@@ -13,6 +13,11 @@ in
|
||||
clan.nixosTests.sshd = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
|
||||
clan.modules."@clan/sshd" = module;
|
||||
};
|
||||
clan.nixosTests.sshd-no-search-domains = {
|
||||
imports = [ ./tests/vm/no-search-domains.nix ];
|
||||
|
||||
clan.modules."@clan/sshd" = module;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "service-sshd";
|
||||
name = "sshd";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
45
clanServices/sshd/tests/vm/no-search-domains.nix
Normal file
45
clanServices/sshd/tests/vm/no-search-domains.nix
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
This is a regression test for the following error:
|
||||
error: attribute 'openssh-cert' missing
|
||||
at /nix/store/y1k4bqwjql6bhlry456cs4marpamiqlr-source/clanServices/sshd/default.nix:184:17:
|
||||
183| # this check needs to go first, as otherwise generators.openssh-cert does not exist
|
||||
184| config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".exists
|
||||
| ^
|
||||
185| && settings.certificate.searchDomains != [ ]
|
||||
*/
|
||||
{
|
||||
...
|
||||
}:
|
||||
{
|
||||
name = "sshd";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines.server = { };
|
||||
machines.client = { };
|
||||
|
||||
instances = {
|
||||
sshd-test = {
|
||||
module.name = "@clan/sshd";
|
||||
module.input = "self";
|
||||
roles.server.machines."server".settings = {
|
||||
hostKeys.rsa.enable = true;
|
||||
};
|
||||
roles.client.machines."client".settings = {
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes = {
|
||||
server = { };
|
||||
client = { };
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
# don't do anything, just evaluate the machines
|
||||
exit(0)
|
||||
'';
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/machines/client
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/machines/server
|
||||
@@ -2,9 +2,17 @@
|
||||
"data": "ENC[AES256_GCM,data:Qje3bXRHcAiFCslFfAeUTcOn2woc06e1sLAoH16x1sZ7N0i07rHqwsBjn7nKvMee6tktIjLMGTPOQL9TLgYI+wDgU5MHqlZlVBnYLk+VXYEKhymDlS3RAg5pbrmJzkucl/Vw5VBlDK+n8qnrgFG2pgpiC6Lzb5f4I7pxUl4zcz9jCf0Stj10lVQgkbvnr8UMUvcb1vUF/EAhF6WhhhgrhpbYrK+PkNW6EXmxIXdUdXvSqVWvJby2NgBaGOM1lwnWv4LOOiJ1BNXnOnLE0a8+8SjqahzqroeFvUCtoFxJ45/LqFTtgS9cQ44FaqrMRSWlOgsNR2BkeT449fKUCwYfEW1g/R47I9hAm/bcKrOBpHdAji/hpcDy73Qb9ZKrY6/0kRXl/ECI3YVX8nrHIjEJpoZ7a/dxJu7z8OOdb7gP2mVPTiWcbpD/KwpefDWLKnYhBrXUNaqSajf95Ahln2Vn6sALbbjcUzs/x5OqchYe1/kYy7MaGbEFNPeINnGid73c7xfUoyI90Ho4azLW8Q/n,iv:9heXX8g1P8/4gGT3+RYYmz6rJ5EnIDr5w1OAbGybL+I=,tag:p6mHZ8+EaJ+Nyn59n2TQ4g==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age124l8cfswl97ck0e0qw8l47usf375srn69e4mhxr3gr40erxw7pesftxshx",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5b0Y3aldQWXJyZzU3aGZU\naWpOK1VXdUVlSUVvZzJqSDVCekQ0Z2JxblRvCmFWd0c1SC9jckMweTIrNmg0VnN3\ncTNwMkVTL0ZyT3RGeUs2NnlNb21GWTQKLS0tIEpGM2FacUNoakJ0dlJwMWZMNU0x\nVWhmV1pDOTRFNExpVHBTRUx2L2ZaMVUK+ka9oqcvoLjwwUqIol0rU1VsJHhs4S1P\nWpKKPetUPEF4xxWj0OdeMc40XCTjl6CBdbtcrslH3tuZHjeSWQ6QCg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBkNlpCNGR0SHVtVzJ2K0hM\ncng1ZWlTSGQzWndXVWhlbEZMN25TUklXV1I4Cm9PZkJ3Zi81YXoxdzVPTFI3dE1a\nUnJnUmJPbi9CYmdFL2ltaTRSQ1MreFUKLS0tIDE4Y05IamJjL0huY2l2YU4zbDJs\ndTl1b0wzaTM4MndlcFZYVThqbWtVeFUKuSZLJpUrccuusJPU2xWHw19wTN8mKZW3\n1GJJjlb79rZp/RbSMxFxkyVHgu+F9kbpRgViICJSWkeR495786oArA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBLM3lNL1UyUjhRdFBvenlv\naXBTYXJva0hCTS8vK1hCYmdQdlpBNGFNTkNrCmVTbFROTWVWRzZyc2E0SjVDT1hs\nMm9qT0plcHNJaWczcWJTd0llYldGMVkKLS0tIGh6M04yQ3RiMGVYbTNYMEdCVTNK\nUDhaS3FSVWl4ZGlPYTRodW80VElsZ1UKxi48UZpxuu7gkRtQrCi4//suOpuFY6sl\n8b1xcN/tMn2MWW9hvx4K4qJLXTWOm+9GiZqJ51JBb0hihRh7fC3SfA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1f39qxz84yv272wk636el0kdyagzudcs99ucpkjarsj2rey6yvccse9lwet",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBGK2Z5OGFsVjBJcGFITENH\nL205TDBaNkRvQ3ZBbGVDU0wrWnl5QkJaSFZNCjhNSXpMTTd1ejlqbVNBUDZ2TlFn\neUhNdEh4M1RhQnFabHpaMVd5eG11THcKLS0tIE9PS3JlVFVPNG5sM3hpWG95V1ho\nSXBySU1SUExkVHNHZElmQWExTVN3cHMKvzdlCWP8/9xviu/9AMxw/4ZyXo4O3AE9\n84IQBDO4GYrqnXvOroxjsNCDrCBDH0WPuYAphctJvyI5SSAtL4uHhQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-06-09T19:51:28Z",
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
## Usage
|
||||
This service configures Syncthing to continuously synchronize a folder peer-to-peer across your machines.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```nix
|
||||
{
|
||||
instances.syncthing = {
|
||||
roles.peer.tags.all = { };
|
||||
roles.peer.settings.folders = {
|
||||
documents = {
|
||||
path = "~/syncthing/documents";
|
||||
};
|
||||
};
|
||||
instances.syncthing = {
|
||||
roles.peer.tags.all = { };
|
||||
roles.peer.settings.folders = {
|
||||
documents = {
|
||||
path = "/home/youruser/syncthing/documents";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Now the folder `~/syncthing/documents` will be shared and kept in sync with all your machines.
|
||||
Notes:
|
||||
- Each key under `folders` is a folder ID (an arbitrary identifier for Syncthing).
|
||||
- Prefer absolute paths (example shown). `~` may work in some environments but can be ambiguous in service contexts.
|
||||
|
||||
|
||||
## Documentation
|
||||
Extensive documentation is available on the [Syncthing](https://docs.syncthing.net/) website.
|
||||
See the official Syncthing docs: https://docs.syncthing.net/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
name = "service-syncthing-service";
|
||||
name = "syncthing-service";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
22
clanServices/tor/README.md
Normal file
22
clanServices/tor/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
🚧🚧🚧 Experimental 🚧🚧🚧
|
||||
|
||||
Use at your own risk.
|
||||
|
||||
We are still refining its interfaces, instability and breakages are expected.
|
||||
|
||||
---
|
||||
|
||||
This module is part of Clan's [networking interface](https://docs.clan.lol/guides/networking/networking/).
|
||||
|
||||
Clan's networking module automatically manages connections across available network transports and falls back intelligently. When you run `clan ssh` or `clan machines update`, Clan attempts each configured network in priority order until a connection succeeds.
|
||||
|
||||
The example below configures all your nixos machines to be reachable over the Tor network. By default, the `tor` module has the lowest priority among networks, as it's the slowest.
|
||||
|
||||
```nix
|
||||
inventory.instances = {
|
||||
# Fallback: Secure connections via Tor
|
||||
tor = {
|
||||
roles.server.tags.nixos = { };
|
||||
};
|
||||
};
|
||||
```
|
||||
@@ -7,6 +7,7 @@
|
||||
"System"
|
||||
"Network"
|
||||
];
|
||||
manifest.readme = builtins.readFile ./README.md;
|
||||
|
||||
roles.client = {
|
||||
description = ''
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
name = "service-trusted-nix-caches";
|
||||
name = "trusted-nix-caches";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
name = "service-users";
|
||||
name = "users";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
name = "service-wifi";
|
||||
name = "wifi";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
|
||||
6
clanServices/wifi/tests/vm/sops/machines/second/key.json
Executable file
6
clanServices/wifi/tests/vm/sops/machines/second/key.json
Executable file
@@ -0,0 +1,6 @@
|
||||
[
|
||||
{
|
||||
"publickey": "age1afr59kwlpuapv2g9m0sa4k9yc22ulj7zcway538z7nnsgvf2re5qd7k03a",
|
||||
"type": "age"
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:xAQ6TcwttQNb1BXlq2j+xZX+vGbqV5XKZyZMBAnQ00hWLu6Ba6pN2qR4HItKQ1KWza8WGhzgGdcwZv6Qobgmp9wAcwWlAubS7FU=,iv:KalRsDqWxqscJOeZcnQfFkP7QTBVZpP8XAdPHvikfaM=,tag:EAndFTqRYRcCgD5/ixniBA==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjZnpWd0grazhjak0ra3NK\nSEpVTWVEaXEzc3ZIeloySXBqVk01SVNsVmtzCk5weWJCSGNlb2FRM0RWVEYwTHlC\nTVFBd1cvYlZrNlQ4TFRydFNzWXNicE0KLS0tIFZ5WllRNEI0Q3hQaVU2WjhGNDFr\nLy9pSXU5Y2FINy9LbXg4dzdHREQveE0KyxWDDyRpo0eTIXj0lHQtOunLtAP/Q+70\n+GvfjW7WXHXvXyg6CXzpCy6F6UWie4LHO9VrJM2mTjoh+q4l5DT6CA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-10-19T12:53:35Z",
|
||||
"mac": "ENC[AES256_GCM,data:P6Wcm7daPMe6laceFIiIzhcGa9k37Fo5ZnBFhdVmkATuR2oqMZp9Ke5r73SbC5B95QoPnWVNNnrxkn/oiVQmSiiDaf718isLAfU+7zGkV0BZCtfCrqe82JzH2iQ0tKSxsVJqklCijAfUBGpt/EYN6c4QnM2IFulbiWs2kqWMi4I=,iv:mRuTg2RblZCNX3yQNFKCtuwjwIptZ5VGpSyLV6Ah5NI=,tag:Xl8/jrtIhxfAi+FrnwKh7Q==,type:str]",
|
||||
"version": "3.11.0"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
25.11
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/machines/second
|
||||
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:B08uqk2DxB8QJ93QBM71,iv:XawCB0nwWxso5+yC8az33cFnt/qKzITOwUP0ZFI20Ho=,tag:mZg5U4t9beHch+Oic2VsVw==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1afr59kwlpuapv2g9m0sa4k9yc22ulj7zcway538z7nnsgvf2re5qd7k03a",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBLWEV0STNnMlRreElVSk1y\nRTB6cCtWc0tkNUEycklSU0x5cHZwN3FIQkcwCkJQemRZOWZFSit3QmZ2a3RrOGFh\nUEhLSEZLRW9lWkN2b1VKK2EybUYvcTQKLS0tIGt6VWFmVHpNc2c0T3E0TjBYeWp1\ncjFCcEd3MTlYVGRsRlpXWWRsWlU0dkEKe5NUijC+GVxzj8bMEY6v+qHw9iylpwQz\nFLKneLikKVcRYoTYecimaQdUbYCiEwUB7KCpENcNmjZx6eVmTvGeMQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6T1hheHNaZ2h6Mlc5WEJq\nUk8zTC82N2ZHUUJQK0VLdXBkNEFnNlRHamh3CitrWGpVVGQreDRZdTF5L2VXNENn\neThUd0tjUWpCNW01QklZakx6NnNoT28KLS0tIGVOLytqUGtsaWhFbi9mU2ttMmow\nOFhwVmkyVDdZK09ramZOYktFdkV5R1EKWGseGbOtLS45gb6fb8LFzlfdsRGC7x9B\ncn03oAcfC2Yo/kqoT4tjQF/COn408SBkytwZ3GbL8Bnul/RyjcHJNQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-10-19T12:53:38Z",
|
||||
"mac": "ENC[AES256_GCM,data:MneMlobCnJZVn5QEwxL1os48TOJhrfETry39G2Si1kT5OKq6SlNB2mtVOshmGv21Cue/JNZWr9kwO0SS4egs4CsWaW8MMoR+84KIGIu9uol3vieEayd8nOjJfCKp0fRQKi1ElJRUtjLApA1KQ05WjG+vlb2JxNl67NWRqtykA44=,iv:f91eVJiSfmvM9ym8a3VYlijNZW5SoGUCUqAgFEax6Bo=,tag:ciANv3ov6p0gCJTIQwBoJg==,type:str]",
|
||||
"version": "3.11.0"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/users/admin
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/machines/second
|
||||
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"data": "ENC[AES256_GCM,data:PCpXPkFa5FzHArW1/g3QF9A=,iv:yKGtX4ZqiFYE/bvMAEIUmGQB7Oklo++vbed+K2JPxuA=,tag:UiiuchttMIv+T2TuXcpjZg==,type:str]",
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age1afr59kwlpuapv2g9m0sa4k9yc22ulj7zcway538z7nnsgvf2re5qd7k03a",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvcGJEM3ZLYUJleStRWkpy\nSmwzZXVXWUZTY2htbzk0UGlxV2gwWExyT21nCjRGYmFyRnNBYnJQa0hSRGtCZDdG\nTjdHMG94Q3diZnVsRy84TXJXKzJrTW8KLS0tIEF2dk1uSW5NQ0JxRFdmczNSYmJF\nRThyTnY5QVlOTEZ4eThpdEJmOFhvVFEKA7oltxLLS3w3LOOS2S4RZNJreZftJD2m\niXtDAfWOKLGYTdhfttzWWfxFHt/72OCL1WKm1COgmzBznJJ+RBehEw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB5ZmZ3d0xNRkQycnFaM2pB\nL1ZLeVRFQXozeGFpMGtnc1FBR20ycXl2UmxvCk42QVVieC8waDBwenhVdVFTc2M0\nL2lSM0QvUzhDMXFjTVVnZGY2YXcxakUKLS0tIHZiTGZsMEZ3elVkK2p2Q1kyNXFZ\nTmJybzJaSnZyZE5sb2ZPUjZyNjRhak0KjIyzfPw1BHYJ5REWRSoKG9cY2b23dbdt\ndHioloXv3tW5bTSWytuDSQ5+xzruJqr1w099EZ6Gl6aeeo0oQQkpyw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-10-19T12:53:38Z",
|
||||
"mac": "ENC[AES256_GCM,data:jtj+zGk6D+a+R45wmWD/g2lNWfAPCAAXtwPxZLOOYVZ0IizwqOhsQ6IYtGvKqty0nsr//pCihhazJGFSiGT4fDv6eYOO83+JLTWdvHbfJjMCPsD+Shbjenu/m8dbifsEcnrLkAgzMin2nOeUdRombz91K7O8hbDrcIkS0AXh5G4=,iv:G0l+5SkAj5ii4Us+WG1/xWspglD0xa7viKyZQt2nF9Q=,tag:F48Ry3rmB/Y6nHyikB5Pjw==,type:str]",
|
||||
"version": "3.11.0"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/users/admin
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/machines/second
|
||||
@@ -3,12 +3,16 @@
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age13ahclyps97532zt2sfta5zrfx976d3r2jmctj8d36vj9x5v5ffqq304fqf",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxN2EwVHN3SENVTjdjZGRi\nQmJOWlNGYmpmM1BnZnpYWGhaSlRaUVJIODFRCkhhMUhyZzVWWk53SDBwSVBVZGVY\nVUpMTm9qWTIzc3VwdGJHcUVWVzFlV0UKLS0tIDBBVXdlS1FFbzNPSnlZWWtEaDJi\nK215OWQvMVRCRUZyQjFZckJFbHBZeDQK2cqgDnGM5uIm834dbQ3bi3nQA5nPq6Bf\n0+sezXuY55GdFS6OxIgI5/KcitHzDE0WHOvklIGDCSysoXIQ3QXanA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBUdDNFSE8zdm00QTc1L3kx\nUkZYMFRwVTZkZDQxZTZEWlZBV2pSZ1NidGxvCk5MRE5Da0I1R0MxRzBFYVpNNWwz\nNGdDazh4cll5MHlDUGZ4N1lZb242UlUKLS0tIFBwWVhpS1JEc2JBWDZKdFBCUUF3\nRlVRZTR4YW93SXZlNXhjWlFDYnd5UEEKcZ7sbVO4ZhhTV4pNinJhk7+qWk4nr2E9\nvjQCXZvAMhXP485S+Dbiuvc426cOOL+KrOelMFRJgZg3sDtuN4AFLw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA0NDB5SVcrU0V6akYwbDlv\na1BuSm5XbjYwN2ZkZWtIcnhBVHBTWGFxd24wCnZTVGlPRm5uZEd3QXYwdFRMS09K\nWWw5N2RJZ3d4N0VDMWZmM2lkYVM4VncKLS0tIGplTDVka1VoUVdXMU9VS3hYSlZ1\nRjZGL25hQWxHWEx3OXdQamJiNG9KaDgKk94uXPuCE/M4Hz/7hVKJPHuzQfbOQi/9\nVfR2i17Hjcq08l68Xzn+DllQEAFdts2fS96Pu4FFKfiLK7INl/fUOg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"recipient": "age1afr59kwlpuapv2g9m0sa4k9yc22ulj7zcway538z7nnsgvf2re5qd7k03a",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6RHhiQVFKMCs2NkZ6NENl\nRzUxVWRXWmxzcGhRSXNxNjBIQ1RkN2tpN0RRCmF3QzRvQXdzSVFFcnpBOHdmRFpL\nVzBoSWZKQjJLVDRkZDdlbEVZR2ZzRTAKLS0tIHlOR1dmWEk0UTNYa05nbVR6Q2pn\nVXFDMnNxZ0xJd1ZnVWhJSVlJYmZxQUkKnWc72eUjsVpeipP76ZoiPSGockLXMR/p\nUswkQR01sCcvuPV0sJLBUK+PRMEKBjUH7O9opOvzCsTTzIVaJyLpRg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age13ahclyps97532zt2sfta5zrfx976d3r2jmctj8d36vj9x5v5ffqq304fqf",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSVXUxNGpGYnFEdlpNNUwv\nTEo3a25sSnhHNmlZb0h0QTBwUlpjTFBtZXljCngxcUpxTEZRZlJoTHAxeFRGWXpL\nUzM0eTFOMXQzS0ZEak9NOEJzbHlGS1kKLS0tIFpkWGcwVnVIVUZEYkNlcWVMUTdE\nR21qaTgxT3BpUzVoRUlQODlyRUxodFkKYQPKrDaogCO3m9JfFHc8IrGauipcWPRF\n4P51UqhAUtWwZLdiUIoY5ucG+NaDzBxxUJqJYlUqkQMbW/OvHihu9g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-05-29T13:15:02Z",
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
../../../../../sops/machines/second
|
||||
@@ -3,12 +3,16 @@
|
||||
"sops": {
|
||||
"age": [
|
||||
{
|
||||
"recipient": "age13ahclyps97532zt2sfta5zrfx976d3r2jmctj8d36vj9x5v5ffqq304fqf",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPdkQyYnQ1UzlCWEFtdnJh\nMWlBK0RGcENTMmRITWM5SSs2Mkt2N0ZKdm5VClNTS0NuR05OVHY3QkFLZWt6bTUx\nMzJLc2Vib1ZUbW1VM0lhYXFFeEhOaEEKLS0tIHVoODVOK3BUU2JDZkJkN2I2Wm1L\nMWM0TUNQazljZS9uWXRKRFlxWmd0clUKg1YhJoRea05c24hCuZKYvqyvjuu965KD\nr4GLtyqQ6wt9sn50Rzx5cAY/Ac684DNFJVZ1RwG1NTB2kmXcVP8SJA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBYS83dDhMV1JINWtyT3Rp\ndlRBcjIyNEZuOTBaY24zMGZYdGQwRkIzTzBFCm1VVEN5bllBbklpcDBsNzZ3V1dy\nQ3owdm5RUWlEQzI0Y21NQ1lqcjR1UWcKLS0tIFpwYlVHTUtUR0tDL1BZcWtNMUo5\nUzVwcFh3MDVZTWJCRTkzUm5pc0d6UnMKlCvwvAvS2tvZel8VrHpU1B76owilCgnV\narlc+s7i2lBIyEW5npkZGkn8RgI558C34SPRhM2+c+ennSVIYZM56g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBoZTA5QXpsOXR3L2FKcnJD\neUxzNVp3M2VQMFFaUUxwNXQ4UTlXa01rR0IwCjkyU2hmdlVYbWY4WUpVK0J1ZC9Q\nRjVkYWlGTlh1MFY3R3FxMEZHODZXMmcKLS0tIFV3bGdvUEtnT21wRWJveEQwdTBV\nbGFUUExBZWR1enQ0c0l0dUY3TnErM3cKutl5cv8dSlpQA7SXUYWJq1M0yLmko/Bx\nUvxxGGLQaK0Mp81Z5mOsjNhcVQrY160AyVnWJ0z39cqOJq9PpXRP+A==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
"recipient": "age1afr59kwlpuapv2g9m0sa4k9yc22ulj7zcway538z7nnsgvf2re5qd7k03a",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvUTlBZ3NKNFVQVzdWMnFv\neHVsTStkdHJ0dmVCRFVxODhxbWZwdHlOMEZNCjUrRENVNUxDWnZJQWhKYzJUVnNK\ncXNqSWdvajlUYkdjNEVYaDdxTVZKQ0EKLS0tIFM3WWxSOGU4Yk1Wb3B3VHBtRFlj\nemFIZ0xNTnNiaSttcU1lR0xMSzVXWDQKhY4zo/aoePu1kZ1uvvu+za2vkZVNFqO0\nBYYt88gOBS7Wb2N4+54w6CIKZy7oYljY+MRwlifArFIMpCt/EF473Q==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
},
|
||||
{
|
||||
"recipient": "age13ahclyps97532zt2sfta5zrfx976d3r2jmctj8d36vj9x5v5ffqq304fqf",
|
||||
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHalgveTJwYWEyZDRMZVB0\naVFWTnM3K1VQQzBYckVzbVVqREIxUXBsOUN3ClVRRzl5c2dqbkdTdFRUdmxtRmh0\nV1NCYURxT1d1MFdCUVRWV1hmNVh2L2MKLS0tIEdGQkFoc3FtbWQxUW43WTVwUDJr\nYlZrTmlXNVBhN3dSajlCaWxGb3JCQjAKDWYtDWKDoK6FybakbUOz1X82egHlkHte\n4noQjZ4yESGCWr9Pi3S+14IYItFObP1zh//Sab4e3uR9uVFBWLiVEw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||
}
|
||||
],
|
||||
"lastmodified": "2025-05-29T13:15:02Z",
|
||||
|
||||
@@ -85,22 +85,27 @@ graph TB
|
||||
### Basic Setup with Single Controller
|
||||
|
||||
```nix
|
||||
# In your flake.nix or inventory
|
||||
# In your clan.nix
|
||||
{
|
||||
services.wireguard.server1 = {
|
||||
roles.controller = {
|
||||
# Public endpoint where this controller can be reached
|
||||
endpoint = "vpn.example.com";
|
||||
# Optional: Change the UDP port (default: 51820)
|
||||
port = 51820;
|
||||
instances = {
|
||||
wireguard = {
|
||||
module.name = "wireguard";
|
||||
module.input = "clan-core";
|
||||
roles.controller = {
|
||||
machines.server1 = {};
|
||||
settings = {
|
||||
# Public endpoint where this controller can be reached
|
||||
endpoint = "vpn.example.com";
|
||||
# Optional: Change the UDP port (default: 51820)
|
||||
port = 51820;
|
||||
};
|
||||
};
|
||||
roles.peer = {
|
||||
# No configuration needed if only one controller exists
|
||||
machines.laptop1 = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.wireguard.laptop1 = {
|
||||
roles.peer = {
|
||||
# No configuration needed if only one controller exists
|
||||
};
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -108,29 +113,95 @@ graph TB
|
||||
|
||||
```nix
|
||||
{
|
||||
services.wireguard.server1 = {
|
||||
roles.controller = {
|
||||
endpoint = "vpn1.example.com";
|
||||
instances = {
|
||||
wireguard = {
|
||||
module.name = "wireguard";
|
||||
module.input = "clan-core";
|
||||
roles.controller.machines = {
|
||||
server1.settings.endpoint = "vpn1.example.com";
|
||||
server2.settings.endpoint = "vpn2.example.com";
|
||||
server3.settings.endpoint = "vpn3.example.com";
|
||||
};
|
||||
roles.peer.machines.laptop1 = {
|
||||
# Must specify which controller subnet is exposed as the default in /etc/hosts, when multiple controllers exist
|
||||
settings.controller = "server1";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.wireguard.server2 = {
|
||||
roles.controller = {
|
||||
endpoint = "vpn2.example.com";
|
||||
};
|
||||
};
|
||||
|
||||
services.wireguard.laptop1 = {
|
||||
roles.peer = {
|
||||
# Must specify which controller subnet is exposed as the default in /etc/hosts, when multiple controllers exist
|
||||
controller = "server1";
|
||||
};
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Options
|
||||
|
||||
#### External Peers
|
||||
|
||||
External peers are devices outside of your clan (like phones, laptops, etc.) that can connect to the mesh network through controllers. Each external peer gets its own keypair and can be configured with specific options.
|
||||
|
||||
##### IPv6-only external peers
|
||||
|
||||
```nix
|
||||
{
|
||||
instances = {
|
||||
wireguard = {
|
||||
module.name = "wireguard";
|
||||
module.input = "clan-core";
|
||||
roles.controller.machines.server1.settings = {
|
||||
endpoint = "vpn.example.com";
|
||||
# Define external peers with configuration options
|
||||
externalPeers = {
|
||||
dave = {
|
||||
# No internet access - can only reach clan mesh
|
||||
allowInternetAccess = false;
|
||||
};
|
||||
moms-phone = {
|
||||
# Internet access enabled - IPv6 traffic routed through VPN
|
||||
allowInternetAccess = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
roles.peer.machines.laptop1 = {};
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### IPv4 support for external peers
|
||||
|
||||
If you need IPv4 internet access for external peers, you can enable IPv4 on the controller and assign IPv4 addresses to external peers:
|
||||
|
||||
```nix
|
||||
{
|
||||
instances = {
|
||||
wireguard = {
|
||||
module.name = "wireguard";
|
||||
module.input = "clan-core";
|
||||
roles.controller.machines.server1.settings = {
|
||||
endpoint = "vpn.example.com";
|
||||
# Enable IPv4 with controller's address
|
||||
ipv4.enable = true;
|
||||
ipv4.address = "10.42.1.1/24";
|
||||
externalPeers = {
|
||||
dave = {
|
||||
# No internet access - can only reach clan mesh
|
||||
allowInternetAccess = false;
|
||||
ipv4.address = "10.42.1.50/32";
|
||||
};
|
||||
moms-phone = {
|
||||
# Internet access enabled - IPv4 and IPv6 traffic routed through VPN
|
||||
allowInternetAccess = true;
|
||||
ipv4.address = "10.42.1.51/32";
|
||||
};
|
||||
};
|
||||
};
|
||||
roles.peer.machines.laptop1 = {};
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** IPv4 addresses for external peers are only used for internet access through the controller, not for mesh communication (which uses IPv6).
|
||||
|
||||
External peers can connect to multiple controllers by adding the same peer name to multiple controllers' `externalPeers` configuration.
|
||||
|
||||
### Automatic Hostname Resolution
|
||||
|
||||
|
||||
@@ -105,9 +105,31 @@ let
|
||||
peerIP = controllerPrefix + ":" + peerSuffix;
|
||||
in
|
||||
"${peerIP} ${peerName}.${domain}"
|
||||
) roles.peer.machines;
|
||||
) roles.peer.machines or { };
|
||||
|
||||
# External peers
|
||||
externalPeerHosts = lib.flatten (
|
||||
lib.mapAttrsToList (
|
||||
ctrlName: _ctrlValue:
|
||||
lib.mapAttrsToList (
|
||||
peer: _peerSettings:
|
||||
let
|
||||
peerSuffix = builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/shared/wireguard-network-${instanceName}-external-peer-${peer}/suffix/value"
|
||||
);
|
||||
controllerPrefix = builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${ctrlName}/wireguard-network-${instanceName}/prefix/value"
|
||||
);
|
||||
peerIP = controllerPrefix + ":" + peerSuffix;
|
||||
in
|
||||
"${peerIP} ${peer}.${domain}"
|
||||
) (roles.controller.machines.${ctrlName}.settings.externalPeers)
|
||||
) roles.controller.machines
|
||||
);
|
||||
in
|
||||
builtins.concatStringsSep "\n" (controllerHosts ++ peerHosts);
|
||||
builtins.concatStringsSep "\n" (controllerHosts ++ peerHosts ++ externalPeerHosts);
|
||||
};
|
||||
|
||||
# Shared interface options
|
||||
@@ -268,12 +290,89 @@ in
|
||||
{
|
||||
imports = [ sharedInterface ];
|
||||
|
||||
options.endpoint = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "vpn.clan.lol";
|
||||
description = ''
|
||||
Endpoint where the controller can be reached
|
||||
'';
|
||||
options = {
|
||||
endpoint = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "vpn.clan.lol";
|
||||
description = ''
|
||||
Endpoint where the controller can be reached
|
||||
'';
|
||||
};
|
||||
ipv4 = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable IPv4 support for external peers on this controller.
|
||||
When enabled, the controller will have an IPv4 address and can route IPv4 traffic.
|
||||
|
||||
IPv4 is only used for internet access, not for mesh communication (which uses IPv6).
|
||||
'';
|
||||
};
|
||||
address = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "10.42.1.1/24";
|
||||
description = ''
|
||||
IPv4 address for this controller in CIDR notation.
|
||||
External peers with IPv4 addresses must be within the same subnet.
|
||||
|
||||
IPv4 is only used for internet access, not for mesh communication (which uses IPv6).
|
||||
'';
|
||||
};
|
||||
};
|
||||
externalPeers = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule {
|
||||
options = {
|
||||
allowInternetAccess = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to allow this external peer to access the internet through the controller.
|
||||
When enabled, the controller will route internet traffic for this peer.
|
||||
|
||||
IPv4 is only used for internet access, not for mesh communication (which uses IPv6).
|
||||
'';
|
||||
};
|
||||
ipv4.address = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
example = "10.42.1.50/32";
|
||||
description = ''
|
||||
IPv4 address for this external peer in CIDR notation.
|
||||
The peer must be within the controller's IPv4 subnet.
|
||||
Only used when the controller has IPv4 enabled.
|
||||
|
||||
IPv4 is only used for internet access, not for mesh communication (which uses IPv6).
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
example = {
|
||||
dave = {
|
||||
allowInternetAccess = false;
|
||||
};
|
||||
"moms-phone" = {
|
||||
allowInternetAccess = true;
|
||||
ipv4.address = "10.42.1.51/32";
|
||||
};
|
||||
};
|
||||
description = ''
|
||||
External peers that are not part of the clan.
|
||||
|
||||
For every entry here, a key pair for an external device will be generated.
|
||||
This key pair can then be displayed via `clan vars get` and inserted into an external device, like a phone or laptop.
|
||||
|
||||
Each external peer can connect to the mesh through one or more controllers.
|
||||
To connect to multiple controllers, add the same peer name to multiple controllers' `externalPeers`, or simply set set `roles.controller.settings.externalPeers`.
|
||||
|
||||
The external peer names must not collide with machine names in the clan.
|
||||
The machines which are part of the clan will be able to resolve the external peers via their host names, but not vice versa.
|
||||
External peers can still reach machines from within the clan via their IPv6 addresses.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
perInstance =
|
||||
@@ -296,7 +395,37 @@ in
|
||||
}:
|
||||
let
|
||||
allOtherControllers = lib.filterAttrs (name: _v: name != machine.name) roles.controller.machines;
|
||||
allPeers = roles.peer.machines;
|
||||
allPeers = roles.peer.machines or { };
|
||||
# Collect all external peers from all controllers
|
||||
allExternalPeers = lib.unique (
|
||||
lib.flatten (
|
||||
lib.mapAttrsToList (_: ctrl: lib.attrNames ctrl.settings.externalPeers) roles.controller.machines
|
||||
)
|
||||
);
|
||||
|
||||
controllerPrefix =
|
||||
controllerName:
|
||||
builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${controllerName}/wireguard-network-${instanceName}/prefix/value"
|
||||
);
|
||||
|
||||
peerSuffix =
|
||||
peerName:
|
||||
builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${peerName}/wireguard-network-${instanceName}/suffix/value"
|
||||
);
|
||||
|
||||
externalPeerSuffix =
|
||||
externalName:
|
||||
builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/shared/wireguard-network-${instanceName}-external-peer-${externalName}/suffix/value"
|
||||
);
|
||||
|
||||
thisControllerPrefix =
|
||||
config.clan.core.vars.generators."wireguard-network-${instanceName}".files.prefix.value;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
@@ -310,93 +439,172 @@ in
|
||||
;
|
||||
})
|
||||
];
|
||||
# Network allocation generator for this controller
|
||||
clan.core.vars.generators."wireguard-network-${instanceName}" = {
|
||||
files.prefix.secret = false;
|
||||
# Network prefix allocation generator for this controller
|
||||
clan.core.vars.generators = {
|
||||
"wireguard-network-${instanceName}" = {
|
||||
files.prefix.secret = false;
|
||||
|
||||
runtimeInputs = with pkgs; [
|
||||
python3
|
||||
];
|
||||
runtimeInputs = with pkgs; [
|
||||
python3
|
||||
];
|
||||
|
||||
# Invalidate on network or hostname changes
|
||||
validation.hostname = machine.name;
|
||||
# Invalidate on network or hostname changes
|
||||
validation.hostname = machine.name;
|
||||
|
||||
script = ''
|
||||
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" controller "${machine.name}"
|
||||
'';
|
||||
script = ''
|
||||
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" controller "${machine.name}"
|
||||
'';
|
||||
};
|
||||
}
|
||||
# For external peers, generate: suffix, public key, private key
|
||||
// lib.genAttrs' (lib.attrNames settings.externalPeers) (peer: {
|
||||
name = "wireguard-network-${instanceName}-external-peer-${peer}";
|
||||
value = {
|
||||
files.suffix.secret = false;
|
||||
files.publickey.secret = false;
|
||||
files.privatekey.secret = true;
|
||||
files.privatekey.deploy = false;
|
||||
|
||||
# The external peers keys are not deployed and are globally unique.
|
||||
# Even if an external peer is connected to more than one controller,
|
||||
# its private keys will remain the same.
|
||||
share = true;
|
||||
|
||||
runtimeInputs = with pkgs; [
|
||||
python3
|
||||
wireguard-tools
|
||||
];
|
||||
|
||||
# Invalidate on hostname changes
|
||||
validation.hostname = peer;
|
||||
|
||||
script = ''
|
||||
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" peer "${peer}"
|
||||
wg genkey > $out/privatekey
|
||||
wg pubkey < $out/privatekey > $out/publickey
|
||||
'';
|
||||
};
|
||||
});
|
||||
|
||||
# Enable ip forwarding, so wireguard peers can reach each other
|
||||
boot.kernel.sysctl = {
|
||||
"net.ipv6.conf.all.forwarding" = 1;
|
||||
}
|
||||
// lib.optionalAttrs settings.ipv4.enable {
|
||||
"net.ipv4.conf.all.forwarding" = 1;
|
||||
};
|
||||
|
||||
# Enable ip forwarding, so wireguard peers can reach eachother
|
||||
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
|
||||
|
||||
networking.firewall.allowedUDPPorts = [ settings.port ];
|
||||
|
||||
networking.firewall.extraCommands =
|
||||
let
|
||||
peersWithInternetAccess = lib.filterAttrs (
|
||||
_: peerConfig: peerConfig.allowInternetAccess
|
||||
) settings.externalPeers;
|
||||
|
||||
peerInfo = lib.mapAttrs (
|
||||
peer: peerConfig:
|
||||
let
|
||||
ipv6Address = "${thisControllerPrefix}:${externalPeerSuffix peer}";
|
||||
ipv4Address =
|
||||
if settings.ipv4.enable && peerConfig.ipv4.address != null then
|
||||
lib.head (lib.splitString "/" peerConfig.ipv4.address)
|
||||
else
|
||||
null;
|
||||
in
|
||||
{
|
||||
inherit ipv6Address ipv4Address;
|
||||
}
|
||||
) peersWithInternetAccess;
|
||||
|
||||
in
|
||||
lib.concatStringsSep "\n" (
|
||||
(lib.mapAttrsToList (_peer: info: ''
|
||||
ip6tables -t nat -A POSTROUTING -s ${info.ipv6Address}/128 ! -o '${instanceName}' -j MASQUERADE
|
||||
'') peerInfo)
|
||||
++ (lib.mapAttrsToList (
|
||||
_peer: info:
|
||||
lib.optionalString (info.ipv4Address != null) ''
|
||||
iptables -t nat -A POSTROUTING -s ${info.ipv4Address} ! -o '${instanceName}' -j MASQUERADE
|
||||
''
|
||||
) peerInfo)
|
||||
);
|
||||
|
||||
# Single wireguard interface
|
||||
networking.wireguard.interfaces."${instanceName}" = {
|
||||
listenPort = settings.port;
|
||||
|
||||
ips = [
|
||||
# Controller uses ::1 in its /56 subnet but with /40 prefix for proper routing
|
||||
"${config.clan.core.vars.generators."wireguard-network-${instanceName}".files.prefix.value}::1/40"
|
||||
];
|
||||
"${thisControllerPrefix}::1/40"
|
||||
]
|
||||
++ lib.optional settings.ipv4.enable settings.ipv4.address;
|
||||
|
||||
privateKeyFile =
|
||||
config.clan.core.vars.generators."wireguard-keys-${instanceName}".files."privatekey".path;
|
||||
|
||||
# Connect to all peers and other controllers
|
||||
peers = lib.mapAttrsToList (
|
||||
name: value:
|
||||
if allPeers ? ${name} then
|
||||
# For peers: they now have our entire /56 subnet
|
||||
{
|
||||
publicKey = (
|
||||
builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||
)
|
||||
);
|
||||
peers =
|
||||
# Peers configuration
|
||||
(lib.mapAttrsToList (name: _value: {
|
||||
publicKey = (
|
||||
builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||
)
|
||||
);
|
||||
|
||||
# Allow the peer's /96 range in ALL controller subnets
|
||||
allowedIPs = lib.mapAttrsToList (
|
||||
ctrlName: _:
|
||||
let
|
||||
controllerPrefix = builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${ctrlName}/wireguard-network-${instanceName}/prefix/value"
|
||||
);
|
||||
peerSuffix = builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/suffix/value"
|
||||
);
|
||||
in
|
||||
"${controllerPrefix}:${peerSuffix}/96"
|
||||
) roles.controller.machines;
|
||||
# Allow the peer's /96 range in ALL controller subnets
|
||||
allowedIPs = lib.mapAttrsToList (
|
||||
ctrlName: _: "${controllerPrefix ctrlName}:${peerSuffix name}/96"
|
||||
) roles.controller.machines;
|
||||
|
||||
persistentKeepalive = 25;
|
||||
}
|
||||
else
|
||||
# For other controllers: use their /56 subnet
|
||||
{
|
||||
publicKey = (
|
||||
builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||
)
|
||||
);
|
||||
persistentKeepalive = 25;
|
||||
}) allPeers)
|
||||
++
|
||||
# External peers configuration - includes all external peers from all controllers
|
||||
(map (
|
||||
peer:
|
||||
let
|
||||
# IPv6 allowed IPs for mesh communication
|
||||
ipv6AllowedIPs = lib.mapAttrsToList (
|
||||
ctrlName: _: "${controllerPrefix ctrlName}:${externalPeerSuffix peer}/96"
|
||||
) roles.controller.machines;
|
||||
|
||||
allowedIPs = [
|
||||
"${
|
||||
# IPv4 allowed IP (only if this controller manages this peer and has IPv4 enabled)
|
||||
ipv4AllowedIPs = lib.optional (
|
||||
settings.ipv4.enable
|
||||
&& settings.externalPeers ? ${peer}
|
||||
&& settings.externalPeers.${peer}.ipv4.address != null
|
||||
) settings.externalPeers.${peer}.ipv4.address;
|
||||
in
|
||||
{
|
||||
publicKey = (
|
||||
builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
|
||||
+ "/vars/shared/wireguard-network-${instanceName}-external-peer-${peer}/publickey/value"
|
||||
)
|
||||
}::/56"
|
||||
];
|
||||
);
|
||||
|
||||
allowedIPs = ipv6AllowedIPs ++ ipv4AllowedIPs;
|
||||
|
||||
persistentKeepalive = 25;
|
||||
}
|
||||
) allExternalPeers)
|
||||
++
|
||||
# Other controllers configuration
|
||||
(lib.mapAttrsToList (name: value: {
|
||||
publicKey = (
|
||||
builtins.readFile (
|
||||
config.clan.core.settings.directory
|
||||
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||
)
|
||||
);
|
||||
|
||||
allowedIPs = [ "${controllerPrefix name}::/56" ];
|
||||
|
||||
endpoint = "${value.settings.endpoint}:${toString value.settings.port}";
|
||||
persistentKeepalive = 25;
|
||||
}
|
||||
) (allPeers // allOtherControllers);
|
||||
}) allOtherControllers);
|
||||
};
|
||||
};
|
||||
};
|
||||
@@ -416,7 +624,7 @@ in
|
||||
let
|
||||
isController =
|
||||
instanceInfo.roles ? controller && instanceInfo.roles.controller.machines ? ${machine.name};
|
||||
isPeer = instanceInfo.roles ? peer && instanceInfo.roles.peer.machines ? ${machine.name};
|
||||
isPeer = instanceInfo.roles ? peer && instanceInfo.roles.peer.machines or { } ? ${machine.name};
|
||||
in
|
||||
lib.optional (isController && isPeer) {
|
||||
inherit instanceName;
|
||||
|
||||
@@ -4,4 +4,12 @@ let
|
||||
in
|
||||
{
|
||||
clan.modules.wireguard = module;
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.wireguard = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
clan.modules."@clan/wireguard" = module;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
228
clanServices/wireguard/tests/vm/default.nix
Normal file
228
clanServices/wireguard/tests/vm/default.nix
Normal file
@@ -0,0 +1,228 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
machines = [
|
||||
"controller1"
|
||||
"controller2"
|
||||
"peer1"
|
||||
"peer2"
|
||||
"peer3"
|
||||
# external machine for external peer testing
|
||||
"external1"
|
||||
];
|
||||
|
||||
controllerPrefix =
|
||||
controllerName:
|
||||
builtins.readFile (
|
||||
config.clan.directory
|
||||
+ "/vars/per-machine/${controllerName}/wireguard-network-wg-test-one/prefix/value"
|
||||
);
|
||||
peerSuffix =
|
||||
peerName:
|
||||
builtins.readFile (
|
||||
config.clan.directory + "/vars/per-machine/${peerName}/wireguard-network-wg-test-one/suffix/value"
|
||||
);
|
||||
# external peer suffixes are stored via shared vars
|
||||
externalPeerSuffix =
|
||||
externalName:
|
||||
builtins.readFile (
|
||||
config.clan.directory
|
||||
+ "/vars/shared/wireguard-network-wg-test-one-external-peer-${externalName}/suffix/value"
|
||||
);
|
||||
in
|
||||
{
|
||||
name = "wireguard";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
|
||||
machines = lib.genAttrs machines (_: { });
|
||||
|
||||
instances = {
|
||||
|
||||
/*
|
||||
wg-test-one
|
||||
┌───────────────────────────────┐
|
||||
│ ◄───────────── │
|
||||
│ controller2 controller1
|
||||
│ ▲ ─────────────► ▲ ▲
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ └───────────────┐ │ │ │ │
|
||||
│ │ │ └──────────────┐ │ │ │ │ │
|
||||
│ ▼ │ ▼ ▼ ▼
|
||||
└─► peer2 │ peer1 peer3
|
||||
│ ▲
|
||||
└──────────┘
|
||||
*/
|
||||
|
||||
wg-test-one = {
|
||||
|
||||
module.name = "@clan/wireguard";
|
||||
module.input = "self";
|
||||
|
||||
roles.controller.machines."controller1".settings = {
|
||||
endpoint = "192.168.1.1";
|
||||
# Enable IPv4 for external peers
|
||||
ipv4.enable = true;
|
||||
ipv4.address = "10.42.1.1/24";
|
||||
# add an external peer to controller1 with IPv4
|
||||
externalPeers.external1 = {
|
||||
ipv4.address = "10.42.1.50/32";
|
||||
};
|
||||
};
|
||||
|
||||
roles.controller.machines."controller2".settings = {
|
||||
endpoint = "192.168.1.2";
|
||||
# add the same external peer to controller2 to test multi-controller connection
|
||||
externalPeers.external1 = { };
|
||||
};
|
||||
|
||||
roles.peer.machines = {
|
||||
peer1.settings.controller = "controller1";
|
||||
peer2.settings.controller = "controller2";
|
||||
peer3.settings.controller = "controller1";
|
||||
};
|
||||
};
|
||||
|
||||
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
|
||||
#wg-test-two = {
|
||||
# module.name = "@clan/wireguard";
|
||||
|
||||
# roles.controller.machines."controller1".settings = {
|
||||
# endpoint = "192.168.1.1";
|
||||
# port = 51922;
|
||||
# };
|
||||
|
||||
# roles.peer.machines = {
|
||||
# peer1 = { };
|
||||
# };
|
||||
#};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes.external1 =
|
||||
let
|
||||
controller1Prefix = controllerPrefix "controller1";
|
||||
controller2Prefix = controllerPrefix "controller2";
|
||||
external1Suffix = externalPeerSuffix "external1";
|
||||
in
|
||||
{
|
||||
networking.extraHosts = ''
|
||||
${controller1Prefix}::1 controller1.wg-test-one
|
||||
${controller2Prefix}::1 controller2.wg-test-one
|
||||
'';
|
||||
networking.wireguard.interfaces."wg0" = {
|
||||
|
||||
# Multiple IPs, one in each controller's subnet (IPv6) plus IPv4
|
||||
ips = [
|
||||
"${controller1Prefix + ":" + external1Suffix}/56"
|
||||
"${controller2Prefix + ":" + external1Suffix}/56"
|
||||
"10.42.1.50/32" # IPv4 address for controller1
|
||||
];
|
||||
|
||||
privateKeyFile =
|
||||
builtins.toFile "wg-priv-key"
|
||||
# This needs to be updated whenever update-vars was executed
|
||||
# Get the value from the generated vars via this command:
|
||||
# echo "AGE-SECRET-KEY-1PL0M9CWRCG3PZ9DXRTTLMCVD57U6JDFE8K7DNVQ35F4JENZ6G3MQ0RQLRV" | SOPS_AGE_KEY_FILE=/dev/stdin nix run nixpkgs#sops decrypt clanServices/wireguard/tests/vm/vars/shared/wireguard-network-wg-test-one-external-peer-external1/privatekey/secret
|
||||
"wO8dl3JWgV5J+0D/2UDcLsxTD25IWTvd5ed6vv2Nikk=";
|
||||
|
||||
# Connect to both controllers
|
||||
peers = [
|
||||
# Controller 1
|
||||
{
|
||||
publicKey = (
|
||||
builtins.readFile (
|
||||
config.clan.directory + "/vars/per-machine/controller1/wireguard-keys-wg-test-one/publickey/value"
|
||||
)
|
||||
);
|
||||
|
||||
# Allow controller1's /56 subnet (IPv6) and IPv4 subnet
|
||||
allowedIPs = [
|
||||
"${controller1Prefix}::/56"
|
||||
"10.42.1.0/24" # IPv4 subnet for internet access
|
||||
];
|
||||
|
||||
endpoint = "controller1:51820";
|
||||
|
||||
persistentKeepalive = 25;
|
||||
}
|
||||
# Controller 2
|
||||
{
|
||||
publicKey = (
|
||||
builtins.readFile (
|
||||
config.clan.directory + "/vars/per-machine/controller2/wireguard-keys-wg-test-one/publickey/value"
|
||||
)
|
||||
);
|
||||
|
||||
# Allow controller2's /56 subnet
|
||||
allowedIPs = [ "${controller2Prefix}::/56" ];
|
||||
|
||||
endpoint = "controller2:51820";
|
||||
|
||||
persistentKeepalive = 25;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
# Start network on all machines including external1
|
||||
machines = [peer1, peer2, peer3, controller1, controller2, external1]
|
||||
for m in machines:
|
||||
m.systemctl("start network-online.target")
|
||||
|
||||
for m in machines:
|
||||
m.wait_for_unit("network-online.target")
|
||||
m.wait_for_unit("systemd-networkd.service")
|
||||
|
||||
print("\n\n" + "="*60)
|
||||
print("STARTING PING TESTS")
|
||||
print("="*60)
|
||||
|
||||
# Test mesh connectivity between regular clan machines
|
||||
clan_machines = [peer1, peer2, peer3, controller1, controller2]
|
||||
for m1 in clan_machines:
|
||||
for m2 in clan_machines:
|
||||
if m1 != m2:
|
||||
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
|
||||
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
|
||||
|
||||
# Test that external peer can reach both controllers (multi-controller connection)
|
||||
print("\n--- Testing external1 -> controller1 (direct connection) ---")
|
||||
external1.wait_until_succeeds("ping -c1 controller1.wg-test-one >&2")
|
||||
|
||||
print("\n--- Testing external1 -> controller2 (direct connection) ---")
|
||||
external1.wait_until_succeeds("ping -c1 controller2.wg-test-one >&2")
|
||||
|
||||
# Test IPv4 connectivity
|
||||
print("\n--- Testing external1 -> controller1 (IPv4) ---")
|
||||
external1.wait_until_succeeds("ping -c1 10.42.1.1 >&2")
|
||||
|
||||
# Test that all clan machines can reach the external peer
|
||||
for m in clan_machines:
|
||||
print(f"\n--- Pinging from {m.name} to external1.wg-test-one ---")
|
||||
m.wait_until_succeeds("ping -c1 external1.wg-test-one >&2")
|
||||
|
||||
# Test that external peer can reach a regular peer via controller1
|
||||
print("\n--- Testing external1 -> peer1 (via controller1) ---")
|
||||
external1.wait_until_succeeds("ping -c1 ${controllerPrefix "controller1"}:${peerSuffix "peer1"} >&2")
|
||||
|
||||
# Test controller failover
|
||||
print("\n--- Shutting down controller1 ---")
|
||||
controller1.shutdown()
|
||||
print("\n--- Testing external1 -> peer1 (via controller2 after controller1 shutdown) ---")
|
||||
external1.wait_until_succeeds("ping -c1 ${controllerPrefix "controller2"}:${peerSuffix "peer1"} >&2")
|
||||
|
||||
'';
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user