Compare commits
193 Commits
flake-comp
...
cli-clan-i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fea4c2dc48 | ||
|
|
80bbc6d7a3 | ||
|
|
765bdb262a | ||
|
|
05c00fbe82 | ||
|
|
7e97734797 | ||
|
|
6384c4654e | ||
|
|
72d3ad09a4 | ||
|
|
a535450ec0 | ||
|
|
aaeb616f82 | ||
|
|
434edeaae1 | ||
|
|
a4efd3cb16 | ||
|
|
13131ccd6e | ||
|
|
3a8309b01f | ||
|
|
10065a7c8f | ||
|
|
176b54e29d | ||
|
|
be048d8307 | ||
|
|
52fcab30e7 | ||
|
|
d3b423328f | ||
|
|
1177e84dcc | ||
|
|
414952dfa3 | ||
|
|
24194011ac | ||
|
|
4f78a8ff94 | ||
|
|
068b5d4c1e | ||
|
|
adccef4757 | ||
|
|
980d94d47d | ||
|
|
a50b25eea2 | ||
|
|
017989841d | ||
|
|
c14a5fcc69 | ||
|
|
4f60345ba7 | ||
|
|
ece48d3b5f | ||
|
|
4eea8d24f0 | ||
|
|
49099df3fb | ||
|
|
62ccba9fb5 | ||
|
|
0b44770f1f | ||
|
|
61c3d7284a | ||
|
|
44b1be5ed4 | ||
|
|
88871bea69 | ||
|
|
5141ea047c | ||
|
|
ff6a03a646 | ||
|
|
bc379c985d | ||
|
|
69d8b029d6 | ||
|
|
f3617b0407 | ||
|
|
a5205681cc | ||
|
|
9880847d43 | ||
|
|
8aa88b22ab | ||
|
|
ff979eba61 | ||
|
|
5d1abbd303 | ||
|
|
92e9bb2ed8 | ||
|
|
ea75c9bfa9 | ||
|
|
2adf65482d | ||
|
|
5684ddf104 | ||
|
|
f74e444120 | ||
|
|
0ef57bfc8e | ||
|
|
8f43af3c48 | ||
|
|
eeaec583cb | ||
|
|
a9d1ff83f2 | ||
|
|
89cb22147c | ||
|
|
1006fc755e | ||
|
|
f100177df3 | ||
|
|
cbd3b08296 | ||
|
|
2608bee30a | ||
|
|
a29459a384 | ||
|
|
1abdd45821 | ||
|
|
b058fcc8eb | ||
|
|
24ae95a007 | ||
|
|
39510b613f | ||
|
|
dcdab61d13 | ||
|
|
f86fe07b63 | ||
|
|
84bf9fe3cf | ||
|
|
9737ce51b1 | ||
|
|
3c1c6c1942 | ||
|
|
0904c9da60 | ||
|
|
73e03c21f7 | ||
|
|
743f05a1b5 | ||
|
|
e28b8dc944 | ||
|
|
8d871aafe8 | ||
|
|
26559ff88b | ||
|
|
b39aead1db | ||
|
|
caae6c7a55 | ||
|
|
df90fb20a0 | ||
|
|
d6577ec05d | ||
|
|
9a66170aa1 | ||
|
|
3effd8fd9a | ||
|
|
86d6b42f4c | ||
|
|
5e2ffa9491 | ||
|
|
ba58d6d91a | ||
|
|
38e2d00bbc | ||
|
|
4f29f2e2ca | ||
|
|
5c23e24315 | ||
|
|
1009c61c9f | ||
|
|
0817f83b0b | ||
|
|
4f191f3ebe | ||
|
|
9f48b7a2fa | ||
|
|
b17466c84b | ||
|
|
d2814efcde | ||
|
|
6a5a83f57a | ||
|
|
9e9ab22c37 | ||
|
|
2840d3a5fc | ||
|
|
a305f98586 | ||
|
|
96fe387399 | ||
|
|
d022f26c2c | ||
|
|
d1f5a8e263 | ||
|
|
a6a5c1e21d | ||
|
|
f1867bdd7a | ||
|
|
ee8e44d255 | ||
|
|
f730f4fa06 | ||
|
|
567570e89c | ||
|
|
54797dd5f5 | ||
|
|
c70c588c1c | ||
|
|
bb6fab1168 | ||
|
|
0859a86ce0 | ||
|
|
1524dc963e | ||
|
|
eebd3fa4ec | ||
|
|
a1ff794d57 | ||
|
|
6693cda465 | ||
|
|
bf0691587d | ||
|
|
deecb966ce | ||
|
|
2d2d9c9dca | ||
|
|
e0e16de144 | ||
|
|
75c60a6103 | ||
|
|
1373670dfc | ||
|
|
03b13e9ed4 | ||
|
|
a79027c312 | ||
|
|
bdcdf4e788 | ||
|
|
e3ed9d7b4b | ||
|
|
ddf2b57b3f | ||
|
|
5ab3a164c8 | ||
|
|
073027f7c6 | ||
|
|
d0374c0d7c | ||
|
|
6137701532 | ||
|
|
1560c5f8cf | ||
|
|
5d884cecc2 | ||
|
|
8a3cade082 | ||
|
|
10b4389309 | ||
|
|
2879c72a89 | ||
|
|
547e912c4e | ||
|
|
87125f1ff7 | ||
|
|
804f606384 | ||
|
|
997c7de942 | ||
|
|
e7323999f2 | ||
|
|
31d3997358 | ||
|
|
62b748624d | ||
|
|
29f440a482 | ||
|
|
f15fd1be52 | ||
|
|
beaacf81c6 | ||
|
|
1ae023f4bf | ||
|
|
9becd4e0c4 | ||
|
|
454b09a67e | ||
|
|
787781c2ad | ||
|
|
469c6ba42d | ||
|
|
21f335fa15 | ||
|
|
d98b76e734 | ||
|
|
e2cb1fd83f | ||
|
|
91646b323a | ||
|
|
121548ffb7 | ||
|
|
966a3ee919 | ||
|
|
d007b0f1b3 | ||
|
|
034982bff2 | ||
|
|
7c37bddeea | ||
|
|
c1a87e5c6a | ||
|
|
e5bea3d49a | ||
|
|
40682972ef | ||
|
|
6035455cba | ||
|
|
9be53a9a63 | ||
|
|
29ec9dbe26 | ||
|
|
a1874c940e | ||
|
|
d115705cb4 | ||
|
|
eceb6eb999 | ||
|
|
d25cace522 | ||
|
|
3c6567e67d | ||
|
|
628e45293e | ||
|
|
954c14513a | ||
|
|
cb8a01d448 | ||
|
|
8e53e42b74 | ||
|
|
ed596a57f0 | ||
|
|
b6bccd218a | ||
|
|
1df9b6e97d | ||
|
|
58fa7ac32b | ||
|
|
110d1d4921 | ||
|
|
46aee098c4 | ||
|
|
9d6735e8c4 | ||
|
|
47c94c51b6 | ||
|
|
1eb567682c | ||
|
|
fcd83e7a60 | ||
|
|
af4b00408a | ||
|
|
aaff3b9b38 | ||
|
|
c13741602c | ||
|
|
6cbe221f44 | ||
|
|
3cf8f605d5 | ||
|
|
cc07e0ea44 | ||
|
|
ccb9340478 | ||
|
|
df096fe53b | ||
|
|
1129862293 |
@@ -1,28 +0,0 @@
|
|||||||
name: "Update pinned clan-core for checks"
|
|
||||||
on:
|
|
||||||
repository_dispatch:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: "51 2 * * *"
|
|
||||||
jobs:
|
|
||||||
update-pinned-clan-core:
|
|
||||||
runs-on: nix
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
- name: Update clan-core for checks
|
|
||||||
run: nix run .#update-clan-core-for-checks
|
|
||||||
- name: Create pull request
|
|
||||||
env:
|
|
||||||
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
|
||||||
run: |
|
|
||||||
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
|
|
||||||
git commit -am "Update pinned clan-core for checks"
|
|
||||||
|
|
||||||
# Use shared PR creation script
|
|
||||||
export PR_BRANCH="update-clan-core-for-checks"
|
|
||||||
export PR_TITLE="Update Clan Core for Checks"
|
|
||||||
export PR_BODY="This PR updates the pinned clan-core flake input that is used for checks."
|
|
||||||
|
|
||||||
./.gitea/workflows/create-pr.sh
|
|
||||||
@@ -19,8 +19,7 @@ jobs:
|
|||||||
uses: Mic92/update-flake-inputs-gitea@main
|
uses: Mic92/update-flake-inputs-gitea@main
|
||||||
with:
|
with:
|
||||||
# Exclude private flakes and update-clan-core checks flake
|
# Exclude private flakes and update-clan-core checks flake
|
||||||
|
exclude-patterns: "checks/impure/flake.nix"
|
||||||
exclude-patterns: "devFlake/private/flake.nix,checks/impure/flake.nix"
|
|
||||||
auto-merge: true
|
auto-merge: true
|
||||||
gitea-token: ${{ secrets.CI_BOT_TOKEN }}
|
gitea-token: ${{ secrets.CI_BOT_TOKEN }}
|
||||||
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}
|
github-token: ${{ secrets.CI_BOT_GITHUB_TOKEN }}
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
name: "Update private flake inputs"
|
|
||||||
on:
|
|
||||||
repository_dispatch:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *" # Run daily at 3 AM
|
|
||||||
jobs:
|
|
||||||
update-private-flake:
|
|
||||||
runs-on: nix
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
- name: Update private flake inputs
|
|
||||||
run: |
|
|
||||||
# Update the private flake lock file
|
|
||||||
cd devFlake/private
|
|
||||||
nix flake update
|
|
||||||
cd ../..
|
|
||||||
|
|
||||||
# Update the narHash
|
|
||||||
bash ./devFlake/update-private-narhash
|
|
||||||
- name: Create pull request
|
|
||||||
env:
|
|
||||||
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
|
|
||||||
run: |
|
|
||||||
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
|
|
||||||
|
|
||||||
# Check if there are any changes
|
|
||||||
if ! git diff --quiet; then
|
|
||||||
git add devFlake/private/flake.lock devFlake/private.narHash
|
|
||||||
git commit -m "Update dev flake"
|
|
||||||
|
|
||||||
# Use shared PR creation script
|
|
||||||
export PR_BRANCH="update-dev-flake"
|
|
||||||
export PR_TITLE="Update dev flake"
|
|
||||||
export PR_BODY="This PR updates the dev flake inputs and corresponding narHash."
|
|
||||||
else
|
|
||||||
echo "No changes detected in dev flake inputs"
|
|
||||||
fi
|
|
||||||
@@ -1,208 +0,0 @@
|
|||||||
{ self, ... }:
|
|
||||||
{
|
|
||||||
clan.machines.test-backup = {
|
|
||||||
imports = [ self.nixosModules.test-backup ];
|
|
||||||
fileSystems."/".device = "/dev/null";
|
|
||||||
boot.loader.grub.device = "/dev/null";
|
|
||||||
};
|
|
||||||
clan.inventory.services = {
|
|
||||||
borgbackup.test-backup = {
|
|
||||||
roles.client.machines = [ "test-backup" ];
|
|
||||||
roles.server.machines = [ "test-backup" ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
flake.nixosModules = {
|
|
||||||
test-backup =
|
|
||||||
{
|
|
||||||
pkgs,
|
|
||||||
lib,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
dependencies = [
|
|
||||||
pkgs.stdenv.drvPath
|
|
||||||
]
|
|
||||||
++ builtins.map (i: i.outPath) (builtins.attrValues (builtins.removeAttrs self.inputs [ "self" ]));
|
|
||||||
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
|
|
||||||
in
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
# Do not import inventory modules. They should be configured via 'clan.inventory'
|
|
||||||
#
|
|
||||||
# TODO: Configure localbackup via inventory
|
|
||||||
self.clanModules.localbackup
|
|
||||||
];
|
|
||||||
# Borgbackup overrides
|
|
||||||
services.borgbackup.repos.test-backups = {
|
|
||||||
path = "/var/lib/borgbackup/test-backups";
|
|
||||||
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
|
|
||||||
};
|
|
||||||
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
|
|
||||||
|
|
||||||
clan.core.networking.targetHost = "machine";
|
|
||||||
networking.hostName = "machine";
|
|
||||||
|
|
||||||
programs.ssh.knownHosts = {
|
|
||||||
machine.hostNames = [ "machine" ];
|
|
||||||
machine.publicKey = builtins.readFile ../assets/ssh/pubkey;
|
|
||||||
};
|
|
||||||
|
|
||||||
services.openssh = {
|
|
||||||
enable = true;
|
|
||||||
settings.UsePAM = false;
|
|
||||||
settings.UseDns = false;
|
|
||||||
hostKeys = [
|
|
||||||
{
|
|
||||||
path = "/root/.ssh/id_ed25519";
|
|
||||||
type = "ed25519";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
|
|
||||||
|
|
||||||
# This is needed to unlock the user for sshd
|
|
||||||
# Because we use sshd without setuid binaries
|
|
||||||
users.users.borg.initialPassword = "hello";
|
|
||||||
|
|
||||||
systemd.tmpfiles.settings."vmsecrets" = {
|
|
||||||
"/root/.ssh/id_ed25519" = {
|
|
||||||
C.argument = "${../assets/ssh/privkey}";
|
|
||||||
z = {
|
|
||||||
mode = "0400";
|
|
||||||
user = "root";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
"/etc/secrets/ssh.id_ed25519" = {
|
|
||||||
C.argument = "${../assets/ssh/privkey}";
|
|
||||||
z = {
|
|
||||||
mode = "0400";
|
|
||||||
user = "root";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
"/etc/secrets/borgbackup/borgbackup.ssh" = {
|
|
||||||
C.argument = "${../assets/ssh/privkey}";
|
|
||||||
z = {
|
|
||||||
mode = "0400";
|
|
||||||
user = "root";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
"/etc/secrets/borgbackup/borgbackup.repokey" = {
|
|
||||||
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
|
|
||||||
z = {
|
|
||||||
mode = "0400";
|
|
||||||
user = "root";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
clan.core.facts.secretStore = "vm";
|
|
||||||
clan.core.vars.settings.secretStore = "vm";
|
|
||||||
|
|
||||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
|
||||||
environment.etc.install-closure.source = "${closureInfo}/store-paths";
|
|
||||||
nix.settings = {
|
|
||||||
substituters = lib.mkForce [ ];
|
|
||||||
hashed-mirrors = null;
|
|
||||||
connect-timeout = lib.mkForce 3;
|
|
||||||
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
|
|
||||||
};
|
|
||||||
system.extraDependencies = dependencies;
|
|
||||||
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
|
|
||||||
|
|
||||||
clan.core.state.test-service = {
|
|
||||||
preBackupScript = ''
|
|
||||||
touch /var/test-service/pre-backup-command
|
|
||||||
'';
|
|
||||||
preRestoreScript = ''
|
|
||||||
touch /var/test-service/pre-restore-command
|
|
||||||
'';
|
|
||||||
postRestoreScript = ''
|
|
||||||
touch /var/test-service/post-restore-command
|
|
||||||
'';
|
|
||||||
folders = [ "/var/test-service" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
fileSystems."/mnt/external-disk" = {
|
|
||||||
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
|
|
||||||
autoFormat = true;
|
|
||||||
fsType = "ext4";
|
|
||||||
options = [
|
|
||||||
"defaults"
|
|
||||||
"noauto"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
clan.localbackup.targets.hdd = {
|
|
||||||
directory = "/mnt/external-disk";
|
|
||||||
preMountHook = ''
|
|
||||||
touch /run/mount-external-disk
|
|
||||||
'';
|
|
||||||
postUnmountHook = ''
|
|
||||||
touch /run/unmount-external-disk
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
perSystem =
|
|
||||||
{ pkgs, ... }:
|
|
||||||
let
|
|
||||||
clanCore = self.checks.x86_64-linux.clan-core-for-checks;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
|
|
||||||
nixos-test-backups = self.clanLib.test.containerTest {
|
|
||||||
name = "nixos-test-backups";
|
|
||||||
nodes.machine = {
|
|
||||||
imports = [
|
|
||||||
self.nixosModules.clanCore
|
|
||||||
# Some custom overrides for the backup tests
|
|
||||||
self.nixosModules.test-backup
|
|
||||||
]
|
|
||||||
++
|
|
||||||
# import the inventory generated nixosModules
|
|
||||||
self.clan.clanInternals.inventoryClass.machines.test-backup.machineImports;
|
|
||||||
clan.core.settings.directory = ./.;
|
|
||||||
};
|
|
||||||
|
|
||||||
testScript = ''
|
|
||||||
import json
|
|
||||||
start_all()
|
|
||||||
|
|
||||||
# dummy data
|
|
||||||
machine.succeed("mkdir -p /var/test-backups /var/test-service")
|
|
||||||
machine.succeed("echo testing > /var/test-backups/somefile")
|
|
||||||
|
|
||||||
# create
|
|
||||||
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
|
|
||||||
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
|
|
||||||
machine.succeed("test -f /run/mount-external-disk")
|
|
||||||
machine.succeed("test -f /run/unmount-external-disk")
|
|
||||||
|
|
||||||
# list
|
|
||||||
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
|
|
||||||
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
|
|
||||||
print(out)
|
|
||||||
assert backup_id in out, f"backup {backup_id} not found in {out}"
|
|
||||||
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
|
|
||||||
assert localbackup_id in out, "localbackup not found in {out}"
|
|
||||||
|
|
||||||
## borgbackup restore
|
|
||||||
machine.succeed("rm -f /var/test-backups/somefile")
|
|
||||||
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
|
|
||||||
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
|
||||||
machine.succeed("test -f /var/test-service/pre-restore-command")
|
|
||||||
machine.succeed("test -f /var/test-service/post-restore-command")
|
|
||||||
machine.succeed("test -f /var/test-service/pre-backup-command")
|
|
||||||
|
|
||||||
## localbackup restore
|
|
||||||
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
|
|
||||||
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
|
|
||||||
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
|
||||||
machine.succeed("test -f /var/test-service/pre-restore-command")
|
|
||||||
machine.succeed("test -f /var/test-service/post-restore-command")
|
|
||||||
machine.succeed("test -f /var/test-service/pre-backup-command")
|
|
||||||
'';
|
|
||||||
} { inherit pkgs self; };
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{ fetchgit }:
|
{ fetchgit }:
|
||||||
fetchgit {
|
fetchgit {
|
||||||
url = "https://git.clan.lol/clan/clan-core.git";
|
url = "https://git.clan.lol/clan/clan-core.git";
|
||||||
rev = "d0ebc75135b125fd509558c7680fa2459af91195";
|
rev = "5d884cecc2585a29b6a3596681839d081b4de192";
|
||||||
sha256 = "1k9wpy661dhwas7z05jkn8157pgmr408dn67zd9px9iphmrf7bry";
|
sha256 = "09is1afmncamavb2q88qac37vmsijxzsy1iz1vr6gsyjq2rixaxc";
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
self,
|
self,
|
||||||
lib,
|
lib,
|
||||||
inputs,
|
inputs,
|
||||||
|
privateInputs ? { },
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
@@ -33,7 +34,6 @@ in
|
|||||||
in
|
in
|
||||||
getClanCoreTestModules
|
getClanCoreTestModules
|
||||||
++ filter pathExists [
|
++ filter pathExists [
|
||||||
./backups/flake-module.nix
|
|
||||||
./devshell/flake-module.nix
|
./devshell/flake-module.nix
|
||||||
./flash/flake-module.nix
|
./flash/flake-module.nix
|
||||||
./impure/flake-module.nix
|
./impure/flake-module.nix
|
||||||
@@ -104,6 +104,7 @@ in
|
|||||||
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
|
||||||
|
|
||||||
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
service-dummy-test = import ./service-dummy-test nixosTestArgs;
|
||||||
|
wireguard = import ./wireguard nixosTestArgs;
|
||||||
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -157,7 +158,7 @@ in
|
|||||||
'';
|
'';
|
||||||
|
|
||||||
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
|
||||||
cp -r ${pkgs.callPackage ./clan-core-for-checks.nix { }} $out
|
cp -r ${privateInputs.clan-core-for-checks} $out
|
||||||
chmod -R +w $out
|
chmod -R +w $out
|
||||||
cp ${../flake.lock} $out/flake.lock
|
cp ${../flake.lock} $out/flake.lock
|
||||||
|
|
||||||
|
|||||||
@@ -61,6 +61,10 @@
|
|||||||
nodes.target = {
|
nodes.target = {
|
||||||
virtualisation.emptyDiskImages = [ 4096 ];
|
virtualisation.emptyDiskImages = [ 4096 ];
|
||||||
virtualisation.memorySize = 4096;
|
virtualisation.memorySize = 4096;
|
||||||
|
|
||||||
|
virtualisation.useNixStoreImage = true;
|
||||||
|
virtualisation.writableStore = true;
|
||||||
|
|
||||||
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
|
||||||
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
environment.etc."install-closure".source = "${closureInfo}/store-paths";
|
||||||
|
|
||||||
@@ -79,8 +83,8 @@
|
|||||||
start_all()
|
start_all()
|
||||||
|
|
||||||
# Some distros like to automount disks with spaces
|
# Some distros like to automount disks with spaces
|
||||||
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdb && mount /dev/vdb "/mnt/with spaces"')
|
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
|
||||||
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
|
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
|
||||||
'';
|
'';
|
||||||
} { inherit pkgs self; };
|
} { inherit pkgs self; };
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
{
|
{
|
||||||
self,
|
self,
|
||||||
lib,
|
lib,
|
||||||
|
privateInputs,
|
||||||
|
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
@@ -151,7 +152,7 @@
|
|||||||
let
|
let
|
||||||
closureInfo = pkgs.closureInfo {
|
closureInfo = pkgs.closureInfo {
|
||||||
rootPaths = [
|
rootPaths = [
|
||||||
self.checks.x86_64-linux.clan-core-for-checks
|
privateInputs.clan-core-for-checks
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
|
||||||
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
|
||||||
|
|||||||
115
checks/wireguard/default.nix
Normal file
115
checks/wireguard/default.nix
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
{
|
||||||
|
pkgs,
|
||||||
|
nixosLib,
|
||||||
|
clan-core,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
nixosLib.runTest (
|
||||||
|
{ ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
machines = [
|
||||||
|
"controller1"
|
||||||
|
"controller2"
|
||||||
|
"peer1"
|
||||||
|
"peer2"
|
||||||
|
"peer3"
|
||||||
|
];
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
clan-core.modules.nixosTest.clanTest
|
||||||
|
];
|
||||||
|
|
||||||
|
hostPkgs = pkgs;
|
||||||
|
|
||||||
|
name = "wireguard";
|
||||||
|
|
||||||
|
clan = {
|
||||||
|
directory = ./.;
|
||||||
|
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
|
||||||
|
inventory = {
|
||||||
|
|
||||||
|
machines = lib.genAttrs machines (_: { });
|
||||||
|
|
||||||
|
instances = {
|
||||||
|
|
||||||
|
/*
|
||||||
|
wg-test-one
|
||||||
|
┌───────────────────────────────┐
|
||||||
|
│ ◄───────────── │
|
||||||
|
│ controller2 controller1
|
||||||
|
│ ▲ ─────────────► ▲ ▲
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ └───────────────┐ │ │ │ │
|
||||||
|
│ │ │ └──────────────┐ │ │ │ │ │
|
||||||
|
│ ▼ │ ▼ ▼ ▼
|
||||||
|
└─► peer2 │ peer1 peer3
|
||||||
|
│ ▲
|
||||||
|
└──────────┘
|
||||||
|
*/
|
||||||
|
|
||||||
|
wg-test-one = {
|
||||||
|
|
||||||
|
module.name = "@clan/wireguard";
|
||||||
|
module.input = "self";
|
||||||
|
|
||||||
|
roles.controller.machines."controller1".settings = {
|
||||||
|
endpoint = "192.168.1.1";
|
||||||
|
};
|
||||||
|
|
||||||
|
roles.controller.machines."controller2".settings = {
|
||||||
|
endpoint = "192.168.1.2";
|
||||||
|
};
|
||||||
|
|
||||||
|
roles.peer.machines = {
|
||||||
|
peer1.settings.controller = "controller1";
|
||||||
|
peer2.settings.controller = "controller2";
|
||||||
|
peer3.settings.controller = "controller1";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
|
||||||
|
#wg-test-two = {
|
||||||
|
# module.name = "@clan/wireguard";
|
||||||
|
|
||||||
|
# roles.controller.machines."controller1".settings = {
|
||||||
|
# endpoint = "192.168.1.1";
|
||||||
|
# port = 51922;
|
||||||
|
# };
|
||||||
|
|
||||||
|
# roles.peer.machines = {
|
||||||
|
# peer1 = { };
|
||||||
|
# };
|
||||||
|
#};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
# Show all addresses
|
||||||
|
machines = [peer1, peer2, peer3, controller1, controller2]
|
||||||
|
for m in machines:
|
||||||
|
m.systemctl("start network-online.target")
|
||||||
|
|
||||||
|
for m in machines:
|
||||||
|
m.wait_for_unit("network-online.target")
|
||||||
|
m.wait_for_unit("systemd-networkd.service")
|
||||||
|
|
||||||
|
print("\n\n" + "="*60)
|
||||||
|
print("STARTING PING TESTS")
|
||||||
|
print("="*60)
|
||||||
|
|
||||||
|
for m1 in machines:
|
||||||
|
for m2 in machines:
|
||||||
|
if m1 != m2:
|
||||||
|
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
|
||||||
|
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
)
|
||||||
6
checks/wireguard/sops/machines/controller1/key.json
Executable file
6
checks/wireguard/sops/machines/controller1/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
checks/wireguard/sops/machines/controller2/key.json
Executable file
6
checks/wireguard/sops/machines/controller2/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
checks/wireguard/sops/machines/peer1/key.json
Executable file
6
checks/wireguard/sops/machines/peer1/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
checks/wireguard/sops/machines/peer2/key.json
Executable file
6
checks/wireguard/sops/machines/peer2/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
6
checks/wireguard/sops/machines/peer3/key.json
Executable file
6
checks/wireguard/sops/machines/peer3/key.json
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"publickey": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
|
]
|
||||||
15
checks/wireguard/sops/secrets/controller1-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/controller1-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:zDF0RiBqaawpg+GaFkuLPomJ01Xu+lgY5JfUzaIk2j03XkCzIf8EMrmn6pRtBP3iUjPBm+gQSTQk6GHTONrixA5hRNyETV+UgQw=,iv:zUUCAGZ0cz4Tc2t/HOjVYNsdnrAOtid/Ns5ak7rnyCk=,tag:z43WtNSue4Ddf7AVu21IKA==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlY1NEdjAzQm5RMFZWY3BJ\nclp6c01FdlZFK3dOSDB4cHc1NTdwMXErMFJFCnIrRVFNZEFYOG1rVUhFd2xsbTJ2\nVkJHNmdOWXlOcHJoQ0QzM1VyZmxmcGcKLS0tIFk1cEx4dFdvNGRwK1FWdDZsb1lR\nV2d1RFZtNzZqVFdtQ1FzNStEcEgyUUkKx8tkxqJz/Ko3xgvhvd6IYiV/lRGmrY13\nUZpYWR9tsQwZAR9dLjCyVU3JRuXeGB1unXC1CO0Ff3R0A/PuuRHh+g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:37Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:8RGOUhZ2LGmC9ugULwHDgdMrtdo9vzBm3BJmL4XTuNJKm0NlKfgNLi1E4n9DMQ+kD4hKvcwbiUcwSGE8jZD6sm7Sh3bJi/HZCoiWm/O/OIzstli2NNDBGvQBgyWZA5H+kDjZ6aEi6icNWIlm5gsty7KduABnf5B3p0Bn5Uf5Bio=,iv:sGZp0XF+mgocVzAfHF8ATdlSE/5zyz5WUSRMJqNeDQs=,tag:ymYVBRwF5BOSAu5ONU2qKw==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/controller1-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/controller1-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
15
checks/wireguard/sops/secrets/controller2-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/controller2-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:dHM7zWzqnC1QLRKYpbI2t63kOFnSaQy6ur9zlkLQf17Q03CNrqUsZtdEbwMnLR3llu7eVMhtvVRkXjEkvn3leb9HsNFmtk/DP70=,iv:roEZsBFqRypM106O5sehTzo7SySOJUJgAR738rTtOo8=,tag:VDd9/6uU0SAM7pWRLIUhUQ==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKTEVYUmVGbUtOcHZ4cnc3\nKzNETnlxaVRKYTI3eWVHdEoyc3l2SnhsZ1J3CnB2RnZrOXM5Uml6TThDUlZjY25J\nbkJ6eUZ2ckN1NWpNUU9IaE93UDJQdlEKLS0tIC95ZDhkU0R1VHhCdldxdW4zSmps\nN3NqL1cvd05hRTRPdDA3R2pzNUFFajgKS+DJH14fH9AvEAa3PoUC1jEqKAzTmExN\nl32FeHTHbGMo1PKeaFm+Eg0WSpAmFE7beBunc5B73SW30ok6x4FcQw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:47Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:77EnuBQyguvkCtobUg8/6zoLHjmeGDrSBZuIXOZBMxdbJjzhRg++qxQjuu6t0FoWATtz7u4Y3/jzUMGffr/N5HegqSq0D2bhv7AqJwBiVaOwd80fRTtM+YiP/zXsCk52Pj/Gadapg208bDPQ1BBDOyz/DrqZ7w//j+ARJjAnugI=,iv:IuTDmJKZEuHXJXjxrBw0gP2t6vpxAYEqbtpnVbavVCY=,tag:4EnpX6rOamtg1O+AaEQahQ==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/controller2-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/controller2-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
15
checks/wireguard/sops/secrets/peer1-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/peer1-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:wcSsqxTKiMAnzPwxs5DNjcSdLyjVQ9UOrZxfSbOkVfniwx6F7xz6dLNhaDq7MHQ0vRWpg28yNs7NHrp52bYFnb/+eZsis46WiCw=,iv:B4t1lvS2gC601MtsmZfEiEulLWvSGei3/LSajwFS9Vs=,tag:hnRXlZyYEFfLJUrw1SqbSQ==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAybUgya2VEdzMvRG1hdkpu\nM2pGNmcyVmcvYVZ1ZjJlY3A1bXFUUUtkMTI0CmJoRFZmejZjN2UxUXNuc1k5WnE2\nNmxIcnpNQ1lJZ3ZKSmhtSlVURXJTSUUKLS0tIGU4Wi9yZ3VYekJkVW9pNWFHblFk\na0gzbTVKUWdSam1sVjRUaUlTdVd5YWMKntRc9yb9VPOTMibp8QM5m57DilP01N/X\nPTQaw8oI40znnHdctTZz7S+W/3Te6sRnkOhFyalWmsKY0CWg/FELlA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:58Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:8nq+ugkUJxE24lUIySySs/cAF8vnfqr936L/5F0O1QFwNrbpPmKRXkuwa6u0V+187L2952Id20Fym4ke59f3fJJsF840NCKDwDDZhBZ20q9GfOqIKImEom/Nzw6D0WXQLUT3w8EMyJ/F+UaJxnBNPR6f6+Kx4YgStYzCcA6Ahzg=,iv:VBPktEz7qwWBBnXE+xOP/EUVy7/AmNCHPoK56Yt/ZNc=,tag:qXONwOLFAlopymBEf5p4Sw==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/peer1-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/peer1-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
15
checks/wireguard/sops/secrets/peer2-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/peer2-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:4d3ri0EsDmWRtA8vzvpPRLMsSp4MIMKwvtn0n0pRY05uBPXs3KcjnweMPIeTE1nIhqnMR2o2MfLah5TCPpaFax9+wxIt74uacbg=,iv:0LBAldTC/hN4QLCxgXTl6d9UB8WmUTnj4sD2zHQuG2w=,tag:zr/RhG/AU4g9xj9l2BprKw==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvV0JnZDhlU1piU1g2cng0\ncytKOEZ6WlZlNGRGUjV3MmVMd2Nzc0ZwelgwCjBGdThCUGlXbVFYdnNoZWpJZ3Vm\nc2xkRXhxS09vdzltSVoxLzhFSVduak0KLS0tIE5DRjJ6cGxiVlB1eElHWXhxN1pJ\nYWtIMDMvb0Z6akJjUzlqeEFsNHkxL2cKpghv/QegnXimeqd9OPFouGM//jYvoVmw\n2d4mLT2JSMkEhpfGcqb6vswhdJfCiKuqr2B4bqwAnPMaykhsm8DFRQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:20:08Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:BzlQVAJ7HzcxNPKB3JhabqRX/uU0EElj172YecjmOflHnzz/s9xgfdAfJK/c53hXlX4LtGPnubH7a8jOolRq98zmZeBYE27+WLs2aN7Ufld6mYk90/i7u4CqR+Fh2Kfht04SlUJCjnS5A9bTPwU9XGRHJ0BiOhzTuSMUJTRaPRM=,iv:L50K5zc1o99Ix9nP0pb9PRH+VIN2yvq7JqKeVHxVXmc=,tag:XFLkSCsdbTPxbasDYYxcFQ==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/peer2-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/peer2-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
15
checks/wireguard/sops/secrets/peer3-age.key/secret
Normal file
15
checks/wireguard/sops/secrets/peer3-age.key/secret
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:qfLm6+g1vYnESCik9uyBeKsY6Ju2Gq3arnn2I8HHNO67Ri5BWbOQTvtz7WT8/q94RwVjv8SGeJ/fsJSpwLSrJSbqTZCPAnYwzzQ=,iv:PnA9Ao8RRELNhNQYbaorstc0KaIXRU7h3+lgDCXZFHk=,tag:VeLgYQYwqthYihIoQTwYiA==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBNWVVQaDJFd0N3WHptRC9Z\nZTgxTWh5bnU1SkpqRWRXZnhPaFhpSVJmVEhrCjFvdHFYenNWaFNrdXlha09iS2xj\nOTZDcUNkcHkvTDUwNjM4Z3gxUkxreUEKLS0tIE5oY3Q2bWhsb2FSQTVGTWVSclJw\nWllrelRwT3duYjJJbTV0d3FwU1VuNlkK2eN3fHFX/sVUWom8TeZC9fddqnSCsC1+\nJRCZsG46uHDxqLcKIfdFWh++2t16XupQYk3kn+NUR/aMc3fR32Uwjw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:20:18Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:nUwsPcP1bsDjAHFjQ1NlVkTwyZY4B+BpzNkMx9gl0rE14j425HVLtlhlLndhRp+XMpnDldQppLAAtSdzMsrw8r5efNgTRl7cu4Fy/b9cHt84k7m0aou5lrGus9SV1bM7/fzC9Xm7CSXBcRzyDGVsKC6UBl1rx+ybh7HyAN05XSo=,iv:It57H+zUUNPkoN1D8sYwyZx5zIFIga7mydhGUHYBCGE=,tag:mBQdYqUpjPknbYa13qESyw==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
checks/wireguard/sops/secrets/peer3-age.key/users/admin
Symbolic link
1
checks/wireguard/sops/secrets/peer3-age.key/users/admin
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../users/admin
|
||||||
4
checks/wireguard/sops/users/admin/key.json
Normal file
4
checks/wireguard/sops/users/admin/key.json
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"type": "age"
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/controller1
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:noe913+28JWkoDkGGMu++cc1+j5NPDoyIhWixdsowoiVO3cTWGkZ88SUGO5D,iv:ynYMljwqMcBdk8RpVcw/2Jflg2RCF28r4fKUgIAF8B4=,tag:+TsXDJgfUhKgg4iQVXKKlQ==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBhYVRReTZBQ05GYmVBVjhS\nNXM5aFlhVzZRaVl6UHl6S3JnMC9Sb1dwZ1ZjCmVuS2dEVExYZWROVklUZWFCSnM2\nZnlxbVNseTM2c0Q0TjhsT3NzYmtqREUKLS0tIHBRTFpvVGt6d1cxZ2lFclRsUVhZ\nZDlWaG9PcXVrNUZKaEgxWndjUDVpYjgKt0eOhAgcYdkg9JSEakx4FjChLTn3pis+\njOkuGd4JfXMKcwC7vJV5ygQBxzVJSBw+RucP7sYCBPK0m8Voj94ntw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1rnkc2vmrupy9234clyu7fpur5kephuqs3v7qauaw5zeg00jqjdasefn3cc",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6MFJqNHNraG9DSnJZMFdz\ndU8zVXNTamxROFd1dWtuK2RiekhPdHhleVhFCi8zNWJDNXJMRUlDdjc4Q0UycTIz\nSGFGSmdnNU0wZWlDaTEwTzBqWjh6SFkKLS0tIEJOdjhOMDY2TUFLb3RPczNvMERx\nYkpSeW5VOXZvMlEvdm53MDE3aUFTNjgKyelSTjrTIR9I3rJd3krvzpsrKF1uGs4J\n4MtmQj0/3G+zPYZVBx7b3HF6B3f1Z7LYh05+z7nCnN/duXyPnDjNcg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:37Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:+DmIkPG/H6tCtf8CvB98E1QFXv08QfTcCB3CRsi+XWnIRBkryRd/Au9JahViHMdK7MED8WNf84NWTjY2yH4y824/DjI8XXNMF1iVMo0CqY42xbVHtUuhXrYeT+c8CyEw+M6zfy1jC0+Bm3WQWgagz1G6A9SZk3D2ycu0N08+axA=,iv:kwBjTYebIy5i2hagAajSwwuKnSkrM9GyrnbeQXB2e/w=,tag:EgKJ5gVGYj1NGFUduxLGfg==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
lQfR7GhivN87XoXruTGOPjVPhNu1Brt//wyc3pdwE20=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
7470bb5c79df224a9b7f5a2259acd2e46db763c27e24cb3416c8b591cb328077
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
fd51:19c1:3b:f700
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/controller2
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:2kehACgvNgoYGPwnW7p86BR0yUu689Chth6qZf9zoJtuTY9ATS68dxDyBc5S,iv:qb2iDUtExegTeN3jt6SA8RnU61W5GDDhn56QXiQT4gw=,tag:pSGPICX5p6qlZ1WMVoIEYQ==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSTTR5TDY4RE9VYmlCK1dL\nWkVRcVZqVDlsbmQvUlJmdzF2b1Z1S0k3NngwCkFWNzRVaERtSmFsd0o2aFJOb0ZX\nSU9yUnVaNi9IUjJWeGRFcEpDUXo5WkEKLS0tIEczNkxiYnJsTWRoLzFhQVF1M21n\nWnZEdGV1N2N5d1FZQkJUQ1IrdGFLblkKPTpha2bxS8CCAMXWTDKX/WOcdvggaP3Y\nqewyahDNzb4ggP+LNKp55BtwFjdvoPoq4BpYOOgMRbQMMk+H1o9WFw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1t2hhg99d4p2yymuhngcy5ccutp8mvu7qwvg5cdhck303h9e7ha9qnlt635",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYcEZ6Tzk3M0pkV0tOdTBj\nenF2a0tHNnhBa0NrazMwV1VBbXBZR3pzSHpvCnBZOEU0VlFHS1FHcVpTTDdPczVV\nV0RFSlZ0VmIzWGoydEdKVXlIUE9OOEkKLS0tIFZ0cWVBR1loeVlWa2c4U3oweXE2\ncm1ja0JCS3U5Nk41dlAzV2NabDc2bDQKdgCDNnpRZlFPnEGlX6fo0SQX4yOB+E6r\ntnSwofR3xxZvkyme/6JJU5qBZXyCXEAhKMRkFyvJANXzMJAUo/Osow==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:48Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:e3EkL8vwRhLsec83Zi9DE3PKT+4RwgiffpN4QHcJKTgmDW6hzizWc5kAxbNWGJ9Qqe6sso2KY7tc+hg1lHEsmzjCbg153p8h+7lVI2XT6adi/CS8WZ2VpeL+0X9zDQCjqHmrESZAYFBdkLqO4jucdf0Pc3CKKD+N3BDDTwSUvHM=,iv:xvR7dJL8sdYen00ovrYT8PNxhB9XxSWDSRz1IK23I/o=,tag:OyhAvllBgfAp3eGeNpR/Nw==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
5Z7gbLFbXpEFfomW2pKyZBpZN5xvUtiqrIL0GVfNtQ8=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
c3672fdb9fb31ddaf6572fc813cf7a8fe50488ef4e9d534c62d4f29da60a1a99
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
fd51:19c1:c1:aa00
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/peer1
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:b+akw85T3D9xc75CPLHucR//k7inpxKDvgpR8tCNKwNDRVjVHjcABhfZNLXW,iv:g11fZE8UI0MVh9GKdjR6leBlxa4wN7ZubozXG/VlBbw=,tag:0YkzWCW3zJ3Mt3br/jmTYw==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1jts52rzlqcwjc36jkp56a7fmjn3czr7kl9ta2spkfzhvfama33sqacrzzd",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXWkJUR0pIa2xOSEw2dThm\nYlNuOHZCVW93Wkc5LzE4YmpUTHRkZlk3ckc4CnN4M3ZRMWNFVitCT3FyWkxaR0di\nb0NmSXFhRHJmTWg0d05OcWx1LytscEEKLS0tIEtleTFqU3JrRjVsdHpJeTNuVUhF\nWEtnOVlXVXRFamFSak5ia2F2b0JiTzAKlhOBZvZ4AN+QqAYQXvd6YNmgVS4gtkWT\nbV3bLNTgwtrDtet9NDHM8vdF+cn5RZxwFfgmTbDEow6Zm8EXfpxj/g==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6YVYyQkZqMTJYQTlyRG5Y\nbnJ2UkE1TS9FZkpSa2tQbk1hQjViMi9OcGk0CjFaZUdjU3JtNzh0bDFXdTdUVW4x\nanFqZHZjZjdzKzA2MC8vTWh3Uy82UGcKLS0tIDhyOFl3UGs3czdoMlpza3UvMlB1\nSE90MnpGc05sSCtmVWg0UVNVdmRvN2MKHlCr4U+7bsoYb+2fgT4mEseZCEjxrtLu\n55sR/4YH0vqMnIBnLTSA0e+WMrs3tQfseeJM5jY/ZNnpec1LbxkGTg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:19:58Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:gEoEC9D2Z7k5F8egaY1qPXT5/96FFVsyofSBivQ28Ir/9xHX2j40PAQrYRJUWsk/GAUMOyi52Wm7kPuacw+bBcdtQ0+MCDEmjkEnh1V83eZ/baey7iMmg05uO92MYY5o4e7ZkwzXoAeMCMcfO0GqjNvsYJHF1pSNa+UNDj+eflw=,iv:dnIYpvhAdvUDe9md53ll42krb0sxcHy/toqGc7JFxNA=,tag:0WkZU7GeKMD1DQTYaI+1dg==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
juK7P/92N2t2t680aLIRobHc3ts49CsZBvfZOyIKpUc=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
b36142569a74a0de0f9b229f2a040ae33a22d53bef5e62aa6939912d0cda05ba
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
6987:50a0:9b93:4337
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/peer2
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:apX2sLwtq6iQgLJslFwiRMNBUe0XLzLQbhKfmb2pKiJG7jGNHUgHJz3Ls4Ca,iv:HTDatm3iD5wACTkkd3LdRNvJfnfg75RMtn9G6Q7Fqd4=,tag:Mfehlljnes5CFD1NJdk27A==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age12nqnp0zd435ckp5p0v2fv4p2x4cvur2mnxe8use2sx3fgy883vaq4ae75e",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVZzFyMUZsd2V2VWxOUmhP\nZE8yZTc4Q0RkZisxR25NemR1TzVDWmJZVjBVClA1MWhsU0xzSG16aUx3cWFWKzlG\nSkxrT09OTkVqLzlWejVESE1QWHVJaFkKLS0tIGxlaGVuWU43RXErNTB3c3FaUnM3\nT0N5M253anZkbnFkZWw2VHA0eWhxQW8Kd1PMtEX1h0Hd3fDLMi++gKJkzPi9FXUm\n+uYhx+pb+pJM+iLkPwP/q6AWC7T0T4bHfekkdzxrbsKMi73x/GrOiw==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqVzRIMWdlNjVwTURyMFkv\nSUhiajZkZVNuWklRYit6cno4UzNDa2szOFN3CkQ2TWhHb25pbmR1MlBsRXNLL2lx\ncVZ3c3BsWXN2aS9UUVYvN3I4S0xUSmMKLS0tIE5FV0U5aXVUZk9XL0U0Z2ZSNGd5\nbU9zY3IvMlpSNVFLYkRNQUpUYVZOWFUK7j4Otzb8CJTcT7aAj9/irxHEDXh1HkTg\nzz7Ho8/ZncNtaCVHlHxjTgVW9d5aIx8fSsV9LRCFwHMtNzvwj1Nshg==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:20:08Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:e7WNVEz78noHBiz6S3A6qNfop+yBXB3rYN0k4GvaQKz3b99naEHuqIF8Smzzt4XrbbiPKu2iLa5ddLBlqqsi32UQUB8JS9TY7hvW8ol+jpn0VxusGCXW9ThdDEsM/hXiPyr331C73zTvbOYI1hmcGMlJL9cunVRO9rkMtEqhEfo=,iv:6zt7wjIs1y5xDHNK+yLOwoOuUpY7/dOGJGT6UWAFeOg=,tag:gzFTgoxhoLzUV0lvzOhhfg==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
XI9uSaQRDBCb82cMnGzGJcbqRfDG/IXZobyeL+kV03k=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
360f9fce4a984eb87ce2a673eb5341ecb89c0f62126548d45ef25ff5243dd646
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
3b21:3ced:003e:89b3
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/machines/peer3
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"data": "ENC[AES256_GCM,data:Gluvjes/3oH5YsDq00JDJyJgoEFcj56smioMArPSt309MDGExYX2QsCzeO1q,iv:oBBJRDdTj/1dWEvzhdFKQ2WfeCKyavKMLmnMbqnU5PM=,tag:2WNFxKz2dWyVcybpm5N4iw==,type:str]",
|
||||||
|
"sops": {
|
||||||
|
"age": [
|
||||||
|
{
|
||||||
|
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtQWpjRmhZTFdPa2VSZkFN\nbUczMlY5bDBmMTdoMy8xcWxMaXpWVitMZGdjCnRWb2Y3eGpHU1hmNHRJVFBqbU5w\nVEZGdUIrQXk0U0dUUEZ6bE5EMFpTRHMKLS0tIGpYSmZmQThJUTlvTHpjc05ZVlM4\nQWhTOWxnUHZnYlJ3czE3ZUJ0L3ozWTQK3a7N0Zpzo4sUezYveqvKR49RUdJL23eD\n+cK5lk2xbtj+YHkeG+dg7UlHfDaicj0wnFH1KLuWmNd1ONa6eQp3BQ==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"recipient": "age1sglr4zp34drjfydzeweq43fz3uwpul3hkh53lsfa9drhuzwmkqyqn5jegp",
|
||||||
|
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3a2FOWlVsSkdnendrYmUz\ndEpuL1hZSWNFTUtDYm14S3V1aW9KS3hsazJRCkp2SkFFbi9hbGJpNks1MlNTL0s5\nTk5pcUMxaEJobkcvWmRGeU9jMkdNdzAKLS0tIDR6M0Y5eE1ETHJJejAzVW1EYy9v\nZCtPWHJPUkhuWnRzSGhMUUtTa280UmMKXvtnxyop7PmRvTOFkV80LziDjhGh93Pf\nYwhD/ByD/vMmr21Fd6PVHOX70FFT30BdnMc1/wt7c/0iAw4w4GoQsA==\n-----END AGE ENCRYPTED FILE-----\n"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"lastmodified": "2025-08-13T09:20:18Z",
|
||||||
|
"mac": "ENC[AES256_GCM,data:3nXMTma0UYXCco+EM8UW45cth7DVMboFBKyesL86GmaG6OlTkA2/25AeDrtSVO13a5c2jC6yNFK5dE6pSe5R9f0BoDF7d41mgc85zyn+LGECNWKC6hy6gADNSDD6RRuV1S3FisFQl1F1LD8LiSWmg/XNMZzChNlHYsCS8M+I84g=,iv:pu5VVXAVPmVoXy0BJ+hq5Ar8R0pZttKSYa4YS+dhDNc=,tag:xp1S/4qExnxMTGwhfLJrkA==,type:str]",
|
||||||
|
"unencrypted_suffix": "_unencrypted",
|
||||||
|
"version": "3.10.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
../../../../../../sops/users/admin
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
t6qN4VGLR+VMhrBDNKQEXZVyRsEXs1/nGFRs5DI82F8=
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
e3facc99b73fe029d4c295f71829a83f421f38d82361cf412326398175da162a
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
e42b:bf85:33f4:f0b1
|
||||||
@@ -41,25 +41,13 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
perInstance =
|
|
||||||
{ settings, ... }:
|
|
||||||
{
|
|
||||||
nixosModule =
|
|
||||||
{ ... }:
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
# We don't have a good way to specify dependencies between
|
|
||||||
# clanServices for now. When it get's implemtende, we should just
|
|
||||||
# use the ssh and users modules here.
|
|
||||||
./ssh.nix
|
|
||||||
./root-password.nix
|
|
||||||
];
|
|
||||||
|
|
||||||
_module.args = { inherit settings; };
|
|
||||||
|
|
||||||
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# We don't have a good way to specify dependencies between
|
||||||
|
# clanServices for now. When it get's implemtende, we should just
|
||||||
|
# use the ssh and users modules here.
|
||||||
|
imports = [
|
||||||
|
./ssh.nix
|
||||||
|
./root-password.nix
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,39 +1,55 @@
|
|||||||
# We don't have a way of specifying dependencies between clanServices for now.
|
# We don't have a way of specifying dependencies between clanServices for now.
|
||||||
# When it get's added this file should be removed and the users module used instead.
|
# When it get's added this file should be removed and the users module used instead.
|
||||||
{
|
{
|
||||||
config,
|
roles.default.perInstance =
|
||||||
pkgs,
|
{ ... }:
|
||||||
...
|
{
|
||||||
}:
|
nixosModule =
|
||||||
{
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
|
||||||
users.mutableUsers = false;
|
users.mutableUsers = false;
|
||||||
users.users.root.hashedPasswordFile =
|
users.users.root.hashedPasswordFile =
|
||||||
config.clan.core.vars.generators.root-password.files.password-hash.path;
|
config.clan.core.vars.generators.root-password.files.password-hash.path;
|
||||||
|
|
||||||
clan.core.vars.generators.root-password = {
|
clan.core.vars.generators.root-password = {
|
||||||
files.password-hash.neededFor = "users";
|
files.password-hash.neededFor = "users";
|
||||||
|
|
||||||
files.password.deploy = false;
|
files.password.deploy = false;
|
||||||
|
|
||||||
runtimeInputs = [
|
runtimeInputs = [
|
||||||
pkgs.coreutils
|
pkgs.coreutils
|
||||||
pkgs.mkpasswd
|
pkgs.mkpasswd
|
||||||
pkgs.xkcdpass
|
pkgs.xkcdpass
|
||||||
];
|
];
|
||||||
|
|
||||||
prompts.password.type = "hidden";
|
prompts.password.display = {
|
||||||
prompts.password.persist = true;
|
group = "Root User";
|
||||||
prompts.password.description = "Leave empty to generate automatically";
|
label = "Password";
|
||||||
|
required = false;
|
||||||
|
helperText = ''
|
||||||
|
Your password will be encrypted and stored securely using the secret store you've configured.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
script = ''
|
prompts.password.type = "hidden";
|
||||||
prompt_value="$(cat "$prompts"/password)"
|
prompts.password.persist = true;
|
||||||
if [[ -n "''${prompt_value-}" ]]; then
|
prompts.password.description = "Leave empty to generate automatically";
|
||||||
echo "$prompt_value" | tr -d "\n" > "$out"/password
|
|
||||||
else
|
script = ''
|
||||||
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
|
prompt_value="$(cat "$prompts"/password)"
|
||||||
fi
|
if [[ -n "''${prompt_value-}" ]]; then
|
||||||
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
|
echo "$prompt_value" | tr -d "\n" > "$out"/password
|
||||||
'';
|
else
|
||||||
};
|
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
|
||||||
|
fi
|
||||||
|
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,115 +1,124 @@
|
|||||||
{
|
{
|
||||||
config,
|
roles.default.perInstance =
|
||||||
pkgs,
|
{ settings, ... }:
|
||||||
lib,
|
{
|
||||||
settings,
|
nixosModule =
|
||||||
...
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
|
|
||||||
|
|
||||||
domains = stringSet settings.certificateSearchDomains;
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
|
||||||
|
|
||||||
in
|
domains = stringSet settings.certificateSearchDomains;
|
||||||
{
|
|
||||||
|
|
||||||
services.openssh = {
|
in
|
||||||
enable = true;
|
{
|
||||||
settings.PasswordAuthentication = false;
|
|
||||||
|
|
||||||
settings.HostCertificate = lib.mkIf (
|
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
|
||||||
settings.certificateSearchDomains != [ ]
|
|
||||||
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
|
|
||||||
|
|
||||||
hostKeys = [
|
services.openssh = {
|
||||||
{
|
enable = true;
|
||||||
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
|
settings.PasswordAuthentication = false;
|
||||||
type = "ed25519";
|
|
||||||
}
|
settings.HostCertificate = lib.mkIf (
|
||||||
]
|
settings.certificateSearchDomains != [ ]
|
||||||
++ lib.optional settings.rsaHostKey.enable {
|
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
|
||||||
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
|
|
||||||
type = "rsa";
|
hostKeys = [
|
||||||
|
{
|
||||||
|
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
|
||||||
|
type = "ed25519";
|
||||||
|
}
|
||||||
|
]
|
||||||
|
++ lib.optional settings.rsaHostKey.enable {
|
||||||
|
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
|
||||||
|
type = "rsa";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
clan.core.vars.generators.openssh = {
|
||||||
|
files."ssh.id_ed25519" = { };
|
||||||
|
files."ssh.id_ed25519.pub".secret = false;
|
||||||
|
migrateFact = "openssh";
|
||||||
|
runtimeInputs = [
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.openssh
|
||||||
|
];
|
||||||
|
script = ''
|
||||||
|
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
|
||||||
|
hostNames = [
|
||||||
|
"localhost"
|
||||||
|
config.networking.hostName
|
||||||
|
]
|
||||||
|
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
|
||||||
|
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
|
||||||
|
};
|
||||||
|
|
||||||
|
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
|
||||||
|
files."ssh.id_rsa" = { };
|
||||||
|
files."ssh.id_rsa.pub".secret = false;
|
||||||
|
runtimeInputs = [
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.openssh
|
||||||
|
];
|
||||||
|
script = ''
|
||||||
|
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||||
|
files."ssh.id_ed25519-cert.pub".secret = false;
|
||||||
|
dependencies = [
|
||||||
|
"openssh"
|
||||||
|
"openssh-ca"
|
||||||
|
];
|
||||||
|
validation = {
|
||||||
|
name = config.clan.core.settings.machine.name;
|
||||||
|
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
|
||||||
|
};
|
||||||
|
runtimeInputs = [
|
||||||
|
pkgs.openssh
|
||||||
|
pkgs.jq
|
||||||
|
];
|
||||||
|
script = ''
|
||||||
|
ssh-keygen \
|
||||||
|
-s $in/openssh-ca/id_ed25519 \
|
||||||
|
-I ${config.clan.core.settings.machine.name} \
|
||||||
|
-h \
|
||||||
|
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
|
||||||
|
$in/openssh/ssh.id_ed25519.pub
|
||||||
|
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||||
|
share = true;
|
||||||
|
files.id_ed25519.deploy = false;
|
||||||
|
files."id_ed25519.pub" = {
|
||||||
|
deploy = false;
|
||||||
|
secret = false;
|
||||||
|
};
|
||||||
|
runtimeInputs = [
|
||||||
|
pkgs.openssh
|
||||||
|
];
|
||||||
|
script = ''
|
||||||
|
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
||||||
|
certAuthority = true;
|
||||||
|
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
|
||||||
|
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
clan.core.vars.generators.openssh = {
|
|
||||||
files."ssh.id_ed25519" = { };
|
|
||||||
files."ssh.id_ed25519.pub".secret = false;
|
|
||||||
migrateFact = "openssh";
|
|
||||||
runtimeInputs = [
|
|
||||||
pkgs.coreutils
|
|
||||||
pkgs.openssh
|
|
||||||
];
|
|
||||||
script = ''
|
|
||||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
|
|
||||||
hostNames = [
|
|
||||||
"localhost"
|
|
||||||
config.networking.hostName
|
|
||||||
]
|
|
||||||
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
|
|
||||||
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
|
|
||||||
};
|
|
||||||
|
|
||||||
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
|
|
||||||
files."ssh.id_rsa" = { };
|
|
||||||
files."ssh.id_rsa.pub".secret = false;
|
|
||||||
runtimeInputs = [
|
|
||||||
pkgs.coreutils
|
|
||||||
pkgs.openssh
|
|
||||||
];
|
|
||||||
script = ''
|
|
||||||
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
|
||||||
files."ssh.id_ed25519-cert.pub".secret = false;
|
|
||||||
dependencies = [
|
|
||||||
"openssh"
|
|
||||||
"openssh-ca"
|
|
||||||
];
|
|
||||||
validation = {
|
|
||||||
name = config.clan.core.settings.machine.name;
|
|
||||||
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
|
|
||||||
};
|
|
||||||
runtimeInputs = [
|
|
||||||
pkgs.openssh
|
|
||||||
pkgs.jq
|
|
||||||
];
|
|
||||||
script = ''
|
|
||||||
ssh-keygen \
|
|
||||||
-s $in/openssh-ca/id_ed25519 \
|
|
||||||
-I ${config.clan.core.settings.machine.name} \
|
|
||||||
-h \
|
|
||||||
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
|
|
||||||
$in/openssh/ssh.id_ed25519.pub
|
|
||||||
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
|
||||||
share = true;
|
|
||||||
files.id_ed25519.deploy = false;
|
|
||||||
files."id_ed25519.pub" = {
|
|
||||||
deploy = false;
|
|
||||||
secret = false;
|
|
||||||
};
|
|
||||||
runtimeInputs = [
|
|
||||||
pkgs.openssh
|
|
||||||
];
|
|
||||||
script = ''
|
|
||||||
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
|
|
||||||
certAuthority = true;
|
|
||||||
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
|
|
||||||
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|||||||
86
clanServices/dyndns/README.md
Normal file
86
clanServices/dyndns/README.md
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
|
||||||
|
A Dynamic-DNS (DDNS) service continuously keeps one or more DNS records in sync with the current public IP address of your machine.
|
||||||
|
In *clan* this service is backed by [qdm12/ddns-updater](https://github.com/qdm12/ddns-updater).
|
||||||
|
|
||||||
|
> Info
|
||||||
|
> ddns-updater itself is **heavily opinionated and version-specific**. Whenever you need the exhaustive list of flags or
|
||||||
|
> provider-specific fields refer to its *versioned* documentation – **not** the GitHub README
|
||||||
|
---
|
||||||
|
|
||||||
|
# 1. Configuration model
|
||||||
|
|
||||||
|
Internally ddns-updater consumes a single file named `config.json`.
|
||||||
|
A minimal configuration for the registrar *Namecheap* looks like:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"settings": [
|
||||||
|
{
|
||||||
|
"provider": "namecheap",
|
||||||
|
"domain": "sub.example.com",
|
||||||
|
"password": "e5322165c1d74692bfa6d807100c0310"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Another example for *Porkbun*:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"settings": [
|
||||||
|
{
|
||||||
|
"provider": "porkbun",
|
||||||
|
"domain": "domain.com",
|
||||||
|
"api_key": "sk1_…",
|
||||||
|
"secret_api_key": "pk1_…",
|
||||||
|
"ip_version": "ipv4",
|
||||||
|
"ipv6_suffix": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
When you write a `clan.nix` the **common** fields (`provider`, `domain`, `period`, …) are already exposed as typed
|
||||||
|
*Nix options*.
|
||||||
|
Registrar-specific or very new keys can be passed through an open attribute set called **extraSettings**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2. Full Porkbun example
|
||||||
|
|
||||||
|
Manage three records – `@`, `home` and `test` – of the domain
|
||||||
|
`jon.blog` and refresh them every 15 minutes:
|
||||||
|
|
||||||
|
```nix title="clan.nix" hl_lines="10-11"
|
||||||
|
inventory.instances = {
|
||||||
|
dyndns = {
|
||||||
|
roles.default.machines."jon" = { };
|
||||||
|
roles.default.settings = {
|
||||||
|
period = 15; # minutes
|
||||||
|
settings = {
|
||||||
|
"all-jon-blog" = {
|
||||||
|
provider = "porkbun";
|
||||||
|
domain = "jon.blog";
|
||||||
|
|
||||||
|
# (1) tell the secret-manager which key we are going to store
|
||||||
|
secret_field_name = "secret_api_key";
|
||||||
|
|
||||||
|
# everything below is copied verbatim into config.json
|
||||||
|
extraSettings = {
|
||||||
|
host = "@,home,test"; # (2) comma-separated list of sub-domains
|
||||||
|
ip_version = "ipv4";
|
||||||
|
ipv6_suffix = "";
|
||||||
|
api_key = "pk1_4bb2b231275a02fdc23b7e6f3552s01S213S"; # (3) public – safe to commit
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
1. `secret_field_name` tells the *vars-generator* to store the entered secret under the specified JSON field name in the configuration.
|
||||||
|
2. ddns-updater allows multiple hosts by separating them with a comma.
|
||||||
|
3. The `api_key` above is *public*; the corresponding **private key** is retrieved through `secret_field_name`.
|
||||||
|
|
||||||
277
clanServices/dyndns/default.nix
Normal file
277
clanServices/dyndns/default.nix
Normal file
@@ -0,0 +1,277 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest.name = "clan-core/dyndns";
|
||||||
|
manifest.description = "A dynamic DNS service to update domain IPs";
|
||||||
|
manifest.categories = [ "Network" ];
|
||||||
|
manifest.readme = builtins.readFile ./README.md;
|
||||||
|
|
||||||
|
roles.default = {
|
||||||
|
interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
server = {
|
||||||
|
enable = lib.mkEnableOption "dyndns webserver";
|
||||||
|
domain = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = "Domain to serve the webservice on";
|
||||||
|
};
|
||||||
|
port = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
default = 54805;
|
||||||
|
description = "Port to listen on";
|
||||||
|
};
|
||||||
|
acmeEmail = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = ''
|
||||||
|
Email address for account creation and correspondence from the CA.
|
||||||
|
It is recommended to use the same email for all certs to avoid account
|
||||||
|
creation limits.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
period = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
default = 5;
|
||||||
|
description = "Domain update period in minutes";
|
||||||
|
};
|
||||||
|
|
||||||
|
settings = lib.mkOption {
|
||||||
|
type = lib.types.attrsOf (
|
||||||
|
lib.types.submodule (
|
||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
provider = lib.mkOption {
|
||||||
|
example = "namecheap";
|
||||||
|
type = lib.types.str;
|
||||||
|
description = "The dyndns provider to use";
|
||||||
|
};
|
||||||
|
domain = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
example = "example.com";
|
||||||
|
description = "The top level domain to update.";
|
||||||
|
};
|
||||||
|
secret_field_name = lib.mkOption {
|
||||||
|
example = "api_key";
|
||||||
|
|
||||||
|
type = lib.types.enum [
|
||||||
|
"password"
|
||||||
|
"token"
|
||||||
|
"api_key"
|
||||||
|
"secret_api_key"
|
||||||
|
];
|
||||||
|
default = "password";
|
||||||
|
description = "The field name for the secret";
|
||||||
|
};
|
||||||
|
extraSettings = lib.mkOption {
|
||||||
|
type = lib.types.attrsOf lib.types.str;
|
||||||
|
default = { };
|
||||||
|
description = ''
|
||||||
|
Extra settings for the provider.
|
||||||
|
Provider specific settings: https://github.com/qdm12/ddns-updater#configuration
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
default = { };
|
||||||
|
description = "Configuration for which domains to update";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
perInstance =
|
||||||
|
{ settings, ... }:
|
||||||
|
{
|
||||||
|
nixosModule =
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
name = "dyndns";
|
||||||
|
cfg = settings;
|
||||||
|
|
||||||
|
# We dedup secrets if they have the same provider + base domain
|
||||||
|
secret_id = opt: "${name}-${opt.provider}-${opt.domain}";
|
||||||
|
secret_path =
|
||||||
|
opt: config.clan.core.vars.generators."${secret_id opt}".files."${secret_id opt}".path;
|
||||||
|
|
||||||
|
# We check that a secret has not been set in extraSettings.
|
||||||
|
extraSettingsSafe =
|
||||||
|
opt:
|
||||||
|
if (builtins.hasAttr opt.secret_field_name opt.extraSettings) then
|
||||||
|
throw "Please do not set ${opt.secret_field_name} in extraSettings, it is automatically set by the dyndns module."
|
||||||
|
else
|
||||||
|
opt.extraSettings;
|
||||||
|
|
||||||
|
service_config = {
|
||||||
|
settings = builtins.catAttrs "value" (
|
||||||
|
builtins.attrValues (
|
||||||
|
lib.mapAttrs (_: opt: {
|
||||||
|
value =
|
||||||
|
(extraSettingsSafe opt)
|
||||||
|
// {
|
||||||
|
domain = opt.domain;
|
||||||
|
provider = opt.provider;
|
||||||
|
}
|
||||||
|
// {
|
||||||
|
"${opt.secret_field_name}" = secret_id opt;
|
||||||
|
};
|
||||||
|
}) cfg.settings
|
||||||
|
)
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
secret_generator = _: opt: {
|
||||||
|
name = secret_id opt;
|
||||||
|
value = {
|
||||||
|
share = true;
|
||||||
|
migrateFact = "${secret_id opt}";
|
||||||
|
prompts.${secret_id opt} = {
|
||||||
|
type = "hidden";
|
||||||
|
persist = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = lib.optional cfg.server.enable (
|
||||||
|
lib.modules.importApply ./nginx.nix {
|
||||||
|
inherit config;
|
||||||
|
inherit settings;
|
||||||
|
inherit lib;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
clan.core.vars.generators = lib.mkIf (cfg.settings != { }) (
|
||||||
|
lib.mapAttrs' secret_generator cfg.settings
|
||||||
|
);
|
||||||
|
|
||||||
|
users.groups.${name} = lib.mkIf (cfg.settings != { }) { };
|
||||||
|
users.users.${name} = lib.mkIf (cfg.settings != { }) {
|
||||||
|
group = name;
|
||||||
|
isSystemUser = true;
|
||||||
|
description = "User for ${name} service";
|
||||||
|
home = "/var/lib/${name}";
|
||||||
|
createHome = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
services.nginx = lib.mkIf cfg.server.enable {
|
||||||
|
virtualHosts = {
|
||||||
|
"${cfg.server.domain}" = {
|
||||||
|
forceSSL = true;
|
||||||
|
enableACME = true;
|
||||||
|
locations."/" = {
|
||||||
|
proxyPass = "http://localhost:${toString cfg.server.port}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.${name} = lib.mkIf (cfg.settings != { }) {
|
||||||
|
path = [ ];
|
||||||
|
description = "Dynamic DNS updater";
|
||||||
|
after = [ "network.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
environment = {
|
||||||
|
MYCONFIG = "${builtins.toJSON service_config}";
|
||||||
|
SERVER_ENABLED = if cfg.server.enable then "yes" else "no";
|
||||||
|
PERIOD = "${toString cfg.period}m";
|
||||||
|
LISTENING_ADDRESS = ":${toString cfg.server.port}";
|
||||||
|
GODEBUG = "netdns=go"; # We need to set this untill this has been merged. https://github.com/NixOS/nixpkgs/pull/432758
|
||||||
|
};
|
||||||
|
|
||||||
|
serviceConfig =
|
||||||
|
let
|
||||||
|
pyscript =
|
||||||
|
pkgs.writers.writePython3Bin "generate_secret_config.py"
|
||||||
|
{
|
||||||
|
libraries = [ ];
|
||||||
|
doCheck = false;
|
||||||
|
}
|
||||||
|
''
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
|
||||||
|
cred_dir = Path(os.getenv("CREDENTIALS_DIRECTORY"))
|
||||||
|
config_str = os.getenv("MYCONFIG")
|
||||||
|
|
||||||
|
|
||||||
|
def get_credential(name):
|
||||||
|
secret_p = cred_dir / name
|
||||||
|
with open(secret_p, 'r') as f:
|
||||||
|
return f.read().strip()
|
||||||
|
|
||||||
|
|
||||||
|
config = json.loads(config_str)
|
||||||
|
print(f"Config: {config}")
|
||||||
|
for attrset in config["settings"]:
|
||||||
|
if "password" in attrset:
|
||||||
|
attrset['password'] = get_credential(attrset['password'])
|
||||||
|
elif "token" in attrset:
|
||||||
|
attrset['token'] = get_credential(attrset['token'])
|
||||||
|
elif "secret_api_key" in attrset:
|
||||||
|
attrset['secret_api_key'] = get_credential(attrset['secret_api_key'])
|
||||||
|
elif "api_key" in attrset:
|
||||||
|
attrset['api_key'] = get_credential(attrset['api_key'])
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Missing secret field in {attrset}")
|
||||||
|
|
||||||
|
# create directory data if it does not exist
|
||||||
|
data_dir = Path('data')
|
||||||
|
data_dir.mkdir(mode=0o770, exist_ok=True)
|
||||||
|
|
||||||
|
# Create a temporary config file
|
||||||
|
# with appropriate permissions
|
||||||
|
tmp_config_path = data_dir / '.config.json'
|
||||||
|
tmp_config_path.touch(mode=0o660, exist_ok=False)
|
||||||
|
|
||||||
|
# Write the config with secrets back
|
||||||
|
with open(tmp_config_path, 'w') as f:
|
||||||
|
f.write(json.dumps(config, indent=4))
|
||||||
|
|
||||||
|
# Move config into place
|
||||||
|
config_path = data_dir / 'config.json'
|
||||||
|
tmp_config_path.rename(config_path)
|
||||||
|
|
||||||
|
# Set file permissions to read
|
||||||
|
# and write only by the user and group
|
||||||
|
for file in data_dir.iterdir():
|
||||||
|
file.chmod(0o660)
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
{
|
||||||
|
ExecStartPre = lib.getExe pyscript;
|
||||||
|
ExecStart = lib.getExe pkgs.ddns-updater;
|
||||||
|
LoadCredential = lib.mapAttrsToList (_: opt: "${secret_id opt}:${secret_path opt}") cfg.settings;
|
||||||
|
User = name;
|
||||||
|
Group = name;
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
PrivateTmp = true;
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
ReadOnlyPaths = "/";
|
||||||
|
PrivateDevices = "yes";
|
||||||
|
ProtectKernelModules = "yes";
|
||||||
|
ProtectKernelTunables = "yes";
|
||||||
|
WorkingDirectory = "/var/lib/${name}";
|
||||||
|
ReadWritePaths = [
|
||||||
|
"/proc/self"
|
||||||
|
"/var/lib/${name}"
|
||||||
|
];
|
||||||
|
|
||||||
|
Restart = "always";
|
||||||
|
RestartSec = 60;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
19
clanServices/dyndns/flake-module.nix
Normal file
19
clanServices/dyndns/flake-module.nix
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
{ lib, ... }:
|
||||||
|
let
|
||||||
|
module = lib.modules.importApply ./default.nix { };
|
||||||
|
in
|
||||||
|
{
|
||||||
|
clan.modules = {
|
||||||
|
dyndns = module;
|
||||||
|
};
|
||||||
|
|
||||||
|
perSystem =
|
||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
clan.nixosTests.dyndns = {
|
||||||
|
imports = [ ./tests/vm/default.nix ];
|
||||||
|
|
||||||
|
clan.modules."@clan/dyndns" = module;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
50
clanServices/dyndns/nginx.nix
Normal file
50
clanServices/dyndns/nginx.nix
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
settings,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
security.acme.acceptTerms = true;
|
||||||
|
security.acme.defaults.email = settings.server.acmeEmail;
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [
|
||||||
|
443
|
||||||
|
80
|
||||||
|
];
|
||||||
|
|
||||||
|
services.nginx = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
statusPage = lib.mkDefault true;
|
||||||
|
recommendedBrotliSettings = lib.mkDefault true;
|
||||||
|
recommendedGzipSettings = lib.mkDefault true;
|
||||||
|
recommendedOptimisation = lib.mkDefault true;
|
||||||
|
recommendedProxySettings = lib.mkDefault true;
|
||||||
|
recommendedTlsSettings = lib.mkDefault true;
|
||||||
|
|
||||||
|
# Nginx sends all the access logs to /var/log/nginx/access.log by default.
|
||||||
|
# instead of going to the journal!
|
||||||
|
commonHttpConfig = "access_log syslog:server=unix:/dev/log;";
|
||||||
|
|
||||||
|
resolver.addresses =
|
||||||
|
let
|
||||||
|
isIPv6 = addr: builtins.match ".*:.*:.*" addr != null;
|
||||||
|
escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr;
|
||||||
|
cloudflare = [
|
||||||
|
"1.1.1.1"
|
||||||
|
"2606:4700:4700::1111"
|
||||||
|
];
|
||||||
|
resolvers =
|
||||||
|
if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
|
||||||
|
in
|
||||||
|
map escapeIPv6 resolvers;
|
||||||
|
|
||||||
|
sslDhparam = config.security.dhparams.params.nginx.path;
|
||||||
|
};
|
||||||
|
|
||||||
|
security.dhparams = {
|
||||||
|
enable = true;
|
||||||
|
params.nginx = { };
|
||||||
|
};
|
||||||
|
}
|
||||||
77
clanServices/dyndns/tests/vm/default.nix
Normal file
77
clanServices/dyndns/tests/vm/default.nix
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
{
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
name = "service-dyndns";
|
||||||
|
|
||||||
|
clan = {
|
||||||
|
directory = ./.;
|
||||||
|
inventory = {
|
||||||
|
machines.server = { };
|
||||||
|
|
||||||
|
instances = {
|
||||||
|
dyndns-test = {
|
||||||
|
module.name = "@clan/dyndns";
|
||||||
|
module.input = "self";
|
||||||
|
roles.default.machines."server".settings = {
|
||||||
|
server = {
|
||||||
|
enable = true;
|
||||||
|
domain = "test.example.com";
|
||||||
|
port = 54805;
|
||||||
|
acmeEmail = "test@example.com";
|
||||||
|
};
|
||||||
|
period = 1;
|
||||||
|
settings = {
|
||||||
|
"test.example.com" = {
|
||||||
|
provider = "namecheap";
|
||||||
|
domain = "example.com";
|
||||||
|
secret_field_name = "password";
|
||||||
|
extraSettings = {
|
||||||
|
host = "test";
|
||||||
|
server = "dynamicdns.park-your-domain.com";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
server = {
|
||||||
|
# Disable firewall for testing
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
|
||||||
|
# Mock ACME for testing (avoid real certificate requests)
|
||||||
|
security.acme.defaults.server = "https://localhost:14000/dir";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
# Test that dyndns service starts (will fail without secrets, but that's expected)
|
||||||
|
server.wait_for_unit("multi-user.target")
|
||||||
|
|
||||||
|
# Test that nginx service is running
|
||||||
|
server.wait_for_unit("nginx.service")
|
||||||
|
|
||||||
|
# Test that nginx is listening on expected ports
|
||||||
|
server.wait_for_open_port(80)
|
||||||
|
server.wait_for_open_port(443)
|
||||||
|
|
||||||
|
# Test that the dyndns user was created
|
||||||
|
# server.succeed("getent passwd dyndns")
|
||||||
|
# server.succeed("getent group dyndns")
|
||||||
|
#
|
||||||
|
# Test that the home directory was created
|
||||||
|
server.succeed("test -d /var/lib/dyndns")
|
||||||
|
|
||||||
|
# Test that nginx configuration includes our domain
|
||||||
|
server.succeed("${pkgs.nginx}/bin/nginx -t")
|
||||||
|
|
||||||
|
print("All tests passed!")
|
||||||
|
'';
|
||||||
|
}
|
||||||
@@ -1,3 +1,9 @@
|
|||||||
|
# Example clan service. See https://docs.clan.lol/guides/services/community/
|
||||||
|
# for more details
|
||||||
|
|
||||||
|
# The test for this module in ./tests/vm/default.nix shows an example of how
|
||||||
|
# the service is used.
|
||||||
|
|
||||||
{ packages }:
|
{ packages }:
|
||||||
{ ... }:
|
{ ... }:
|
||||||
{
|
{
|
||||||
@@ -5,30 +11,94 @@
|
|||||||
manifest.name = "clan-core/hello-word";
|
manifest.name = "clan-core/hello-word";
|
||||||
manifest.description = "This is a test";
|
manifest.description = "This is a test";
|
||||||
|
|
||||||
roles.peer = {
|
# This service provides two roles: "morning" and "evening". Roles can be
|
||||||
|
# defined in this file directly (e.g. the "morning" role) or split up into a
|
||||||
|
# separate file (e.g. the "evening" role)
|
||||||
|
roles.morning = {
|
||||||
interface =
|
interface =
|
||||||
{ lib, ... }:
|
{ lib, ... }:
|
||||||
{
|
{
|
||||||
options.foo = lib.mkOption {
|
# Here we define the settings for this role. They will be accessible
|
||||||
|
# via `roles.morning.settings` in the role
|
||||||
|
|
||||||
|
options.greeting = lib.mkOption {
|
||||||
type = lib.types.str;
|
type = lib.types.str;
|
||||||
# default = "";
|
default = "Good morning";
|
||||||
description = "Some option";
|
description = "The greeting to use";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
# Maps over all instances and produces one result per instance.
|
||||||
|
perInstance =
|
||||||
|
{
|
||||||
|
# Role settings for this machine/instance
|
||||||
|
settings,
|
||||||
|
|
||||||
|
# The name of this instance of the service
|
||||||
|
instanceName,
|
||||||
|
|
||||||
|
# The current machine
|
||||||
|
machine,
|
||||||
|
|
||||||
|
# All roles of this service, with their assigned machines
|
||||||
|
roles,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
# Analog to 'perSystem' of flake-parts.
|
||||||
|
# For every instance of this service we will add a nixosModule to a morning-machine
|
||||||
|
nixosModule =
|
||||||
|
{ config, ... }:
|
||||||
|
{
|
||||||
|
# Interaction examples what you could do here:
|
||||||
|
# - Get some settings of this machine
|
||||||
|
# settings.ipRanges
|
||||||
|
#
|
||||||
|
# - Get all evening names:
|
||||||
|
# allEveningNames = lib.attrNames roles.evening.machines
|
||||||
|
#
|
||||||
|
# - Get all roles of the machine:
|
||||||
|
# machine.roles
|
||||||
|
#
|
||||||
|
# - Get the settings that where applied to a specific evening machine:
|
||||||
|
# roles.evening.machines.peer1.settings
|
||||||
|
imports = [ ];
|
||||||
|
environment.etc.hello.text = "${settings.greeting} World!";
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# The impnlementation of the evening role is in a separate file. We have kept
|
||||||
|
# the interface here, so we can see all settings of the service in one place,
|
||||||
|
# but you can also move it to the respective file
|
||||||
|
roles.evening = {
|
||||||
|
interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
options.greeting = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "Good evening";
|
||||||
|
description = "The greeting to use";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
imports = [ ./evening.nix ];
|
||||||
|
|
||||||
|
# This part gets applied to all machines, regardless of their role.
|
||||||
perMachine =
|
perMachine =
|
||||||
{ machine, ... }:
|
{ machine, ... }:
|
||||||
{
|
{
|
||||||
nixosModule = {
|
nixosModule =
|
||||||
clan.core.vars.generators.hello = {
|
{ pkgs, ... }:
|
||||||
files.hello = {
|
{
|
||||||
secret = false;
|
environment.systemPackages = [
|
||||||
};
|
(pkgs.writeShellScriptBin "greet-world" ''
|
||||||
script = ''
|
#!${pkgs.bash}/bin/bash
|
||||||
echo "Hello world from ${machine.name}" > $out/hello
|
set -euo pipefail
|
||||||
'';
|
|
||||||
|
cat /etc/hello
|
||||||
|
echo " I'm ${machine.name}"
|
||||||
|
'')
|
||||||
|
];
|
||||||
};
|
};
|
||||||
};
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
12
clanServices/hello-world/evening.nix
Normal file
12
clanServices/hello-world/evening.nix
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
roles.evening.perInstance =
|
||||||
|
{ settings, ... }:
|
||||||
|
{
|
||||||
|
nixosModule =
|
||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
imports = [ ];
|
||||||
|
environment.etc.hello.text = "${settings.greeting} World!";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -27,20 +27,10 @@ let
|
|||||||
module.name = "hello-world";
|
module.name = "hello-world";
|
||||||
module.input = "self";
|
module.input = "self";
|
||||||
|
|
||||||
roles.peer.machines.jon = { };
|
roles.evening.machines.jon = { };
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
# NOTE:
|
|
||||||
# If you wonder why 'self-zerotier-redux':
|
|
||||||
# A local module has prefix 'self', otherwise it is the name of the 'input'
|
|
||||||
# The rest is the name of the service as in the instance 'module.name';
|
|
||||||
#
|
|
||||||
# -> ${module.input}-${module.name}
|
|
||||||
# In this case it is 'self-zerotier-redux'
|
|
||||||
# This is usually only used internally, but we can use it to test the evaluation of service module in isolation
|
|
||||||
# evaluatedService =
|
|
||||||
# testFlake.clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.self-zerotier-redux.config;
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
test_simple = {
|
test_simple = {
|
||||||
|
|||||||
@@ -5,22 +5,35 @@
|
|||||||
directory = ./.;
|
directory = ./.;
|
||||||
inventory = {
|
inventory = {
|
||||||
machines.peer1 = { };
|
machines.peer1 = { };
|
||||||
|
machines.peer2 = { };
|
||||||
|
|
||||||
instances."test" = {
|
instances."test" = {
|
||||||
module.name = "hello-service";
|
module.name = "hello-service";
|
||||||
module.input = "self";
|
module.input = "self";
|
||||||
roles.peer.machines.peer1 = { };
|
|
||||||
|
# Assign the roles to the two machines
|
||||||
|
roles.morning.machines.peer1 = { };
|
||||||
|
|
||||||
|
roles.evening.machines.peer2 = {
|
||||||
|
# Set roles settings for the peers, where we want to differ from
|
||||||
|
# the role defaults
|
||||||
|
settings = {
|
||||||
|
greeting = "Good night";
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript =
|
testScript =
|
||||||
{ nodes, ... }:
|
{ ... }:
|
||||||
''
|
''
|
||||||
start_all()
|
start_all()
|
||||||
|
|
||||||
# peer1 should have the 'hello' file
|
value = peer1.succeed("greet-world")
|
||||||
value = peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.hello.files.hello.path}")
|
assert value.strip() == "Good morning World! I'm peer1", value
|
||||||
assert value.strip() == "Hello world from peer1", value
|
|
||||||
|
value = peer2.succeed("greet-world")
|
||||||
|
assert value.strip() == "Good night World! I'm peer2", value
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
|
|||||||
35
clanServices/localbackup/README.md
Normal file
35
clanServices/localbackup/README.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
## Features
|
||||||
|
|
||||||
|
- Creates incremental snapshots using rsnapshot
|
||||||
|
- Supports multiple backup targets
|
||||||
|
- Mount/unmount hooks for external storage
|
||||||
|
- Pre/post backup hooks for custom scripts
|
||||||
|
- Configurable snapshot retention
|
||||||
|
- Automatic state folder detection
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Enable the localbackup service and configure backup targets:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
instances = {
|
||||||
|
localbackup = {
|
||||||
|
module.name = "@clan/localbackup";
|
||||||
|
module.input = "self";
|
||||||
|
roles.default.machines."machine".settings = {
|
||||||
|
targets.external= {
|
||||||
|
directory = "/mnt/backup";
|
||||||
|
mountpoint = "/mnt/backup";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
The service provides these commands:
|
||||||
|
|
||||||
|
- `localbackup-create`: Create a new backup
|
||||||
|
- `localbackup-list`: List available backups
|
||||||
|
- `localbackup-restore`: Restore from backup (requires NAME and FOLDERS environment variables)
|
||||||
267
clanServices/localbackup/default.nix
Normal file
267
clanServices/localbackup/default.nix
Normal file
@@ -0,0 +1,267 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest.name = "localbackup";
|
||||||
|
manifest.description = "Automatically backups current machine to local directory.";
|
||||||
|
manifest.categories = [ "System" ];
|
||||||
|
manifest.readme = builtins.readFile ./README.md;
|
||||||
|
|
||||||
|
roles.default = {
|
||||||
|
interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
|
||||||
|
options = {
|
||||||
|
|
||||||
|
targets = lib.mkOption {
|
||||||
|
type = lib.types.attrsOf (
|
||||||
|
lib.types.submodule (
|
||||||
|
{ name, ... }:
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
name = lib.mkOption {
|
||||||
|
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
|
||||||
|
default = name;
|
||||||
|
description = "the name of the backup job";
|
||||||
|
};
|
||||||
|
directory = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = "the directory to backup";
|
||||||
|
};
|
||||||
|
mountpoint = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
default = null;
|
||||||
|
description = "mountpoint of the directory to backup. If set, the directory will be mounted before the backup and unmounted afterwards";
|
||||||
|
};
|
||||||
|
preMountHook = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.lines;
|
||||||
|
default = null;
|
||||||
|
description = "Shell commands to run before the directory is mounted";
|
||||||
|
};
|
||||||
|
postMountHook = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.lines;
|
||||||
|
default = null;
|
||||||
|
description = "Shell commands to run after the directory is mounted";
|
||||||
|
};
|
||||||
|
preUnmountHook = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.lines;
|
||||||
|
default = null;
|
||||||
|
description = "Shell commands to run before the directory is unmounted";
|
||||||
|
};
|
||||||
|
postUnmountHook = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.lines;
|
||||||
|
default = null;
|
||||||
|
description = "Shell commands to run after the directory is unmounted";
|
||||||
|
};
|
||||||
|
preBackupHook = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.lines;
|
||||||
|
default = null;
|
||||||
|
description = "Shell commands to run before the backup";
|
||||||
|
};
|
||||||
|
postBackupHook = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.lines;
|
||||||
|
default = null;
|
||||||
|
description = "Shell commands to run after the backup";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
# default = { };
|
||||||
|
description = "List of directories where backups are stored";
|
||||||
|
};
|
||||||
|
|
||||||
|
snapshots = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
default = 20;
|
||||||
|
description = "Number of snapshots to keep";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
perInstance =
|
||||||
|
{
|
||||||
|
settings,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
nixosModule =
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
mountHook = target: ''
|
||||||
|
if [[ -x /run/current-system/sw/bin/localbackup-mount-${target.name} ]]; then
|
||||||
|
/run/current-system/sw/bin/localbackup-mount-${target.name}
|
||||||
|
fi
|
||||||
|
if [[ -x /run/current-system/sw/bin/localbackup-unmount-${target.name} ]]; then
|
||||||
|
trap "/run/current-system/sw/bin/localbackup-unmount-${target.name}" EXIT
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
|
||||||
|
uniqueFolders = lib.unique (
|
||||||
|
lib.flatten (lib.mapAttrsToList (_name: state: state.folders) config.clan.core.state)
|
||||||
|
);
|
||||||
|
|
||||||
|
rsnapshotConfig = target: ''
|
||||||
|
config_version 1.2
|
||||||
|
snapshot_root ${target.directory}
|
||||||
|
sync_first 1
|
||||||
|
cmd_cp ${pkgs.coreutils}/bin/cp
|
||||||
|
cmd_rm ${pkgs.coreutils}/bin/rm
|
||||||
|
cmd_rsync ${pkgs.rsync}/bin/rsync
|
||||||
|
cmd_ssh ${pkgs.openssh}/bin/ssh
|
||||||
|
cmd_logger ${pkgs.inetutils}/bin/logger
|
||||||
|
cmd_du ${pkgs.coreutils}/bin/du
|
||||||
|
cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff
|
||||||
|
|
||||||
|
${lib.optionalString (target.postBackupHook != null) ''
|
||||||
|
cmd_postexec ${pkgs.writeShellScript "postexec.sh" ''
|
||||||
|
set -efu -o pipefail
|
||||||
|
${target.postBackupHook}
|
||||||
|
''}
|
||||||
|
''}
|
||||||
|
retain snapshot ${builtins.toString settings.snapshots}
|
||||||
|
${lib.concatMapStringsSep "\n" (folder: ''
|
||||||
|
backup ${folder} ${config.networking.hostName}/
|
||||||
|
'') uniqueFolders}
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
environment.systemPackages = [
|
||||||
|
(pkgs.writeShellScriptBin "localbackup-create" ''
|
||||||
|
set -efu -o pipefail
|
||||||
|
export PATH=${
|
||||||
|
lib.makeBinPath [
|
||||||
|
pkgs.rsnapshot
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.util-linux
|
||||||
|
]
|
||||||
|
}
|
||||||
|
${lib.concatMapStringsSep "\n" (target: ''
|
||||||
|
${mountHook target}
|
||||||
|
echo "Creating backup '${target.name}'"
|
||||||
|
|
||||||
|
${lib.optionalString (target.preBackupHook != null) ''
|
||||||
|
(
|
||||||
|
${target.preBackupHook}
|
||||||
|
)
|
||||||
|
''}
|
||||||
|
|
||||||
|
declare -A preCommandErrors
|
||||||
|
${lib.concatMapStringsSep "\n" (
|
||||||
|
state:
|
||||||
|
lib.optionalString (state.preBackupCommand != null) ''
|
||||||
|
echo "Running pre-backup command for ${state.name}"
|
||||||
|
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
|
||||||
|
preCommandErrors["${state.name}"]=1
|
||||||
|
fi
|
||||||
|
''
|
||||||
|
) (builtins.attrValues config.clan.core.state)}
|
||||||
|
|
||||||
|
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
|
||||||
|
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
|
||||||
|
'') (builtins.attrValues settings.targets)}'')
|
||||||
|
(pkgs.writeShellScriptBin "localbackup-list" ''
|
||||||
|
set -efu -o pipefail
|
||||||
|
export PATH=${
|
||||||
|
lib.makeBinPath [
|
||||||
|
pkgs.jq
|
||||||
|
pkgs.findutils
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.util-linux
|
||||||
|
]
|
||||||
|
}
|
||||||
|
(${
|
||||||
|
lib.concatMapStringsSep "\n" (target: ''
|
||||||
|
(
|
||||||
|
${mountHook target}
|
||||||
|
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
|
||||||
|
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
|
||||||
|
)
|
||||||
|
'') (builtins.attrValues settings.targets)
|
||||||
|
}) | jq -s .
|
||||||
|
'')
|
||||||
|
(pkgs.writeShellScriptBin "localbackup-restore" ''
|
||||||
|
set -efu -o pipefail
|
||||||
|
export PATH=${
|
||||||
|
lib.makeBinPath [
|
||||||
|
pkgs.rsync
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.util-linux
|
||||||
|
pkgs.gawk
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if [[ "''${NAME:-}" == "" ]]; then
|
||||||
|
echo "No backup name given via NAME environment variable"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [[ "''${FOLDERS:-}" == "" ]]; then
|
||||||
|
echo "No folders given via FOLDERS environment variable"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
name=$(awk -F'::' '{print $1}' <<< $NAME)
|
||||||
|
backupname=''${NAME#$name::}
|
||||||
|
|
||||||
|
if command -v localbackup-mount-$name; then
|
||||||
|
localbackup-mount-$name
|
||||||
|
fi
|
||||||
|
if command -v localbackup-unmount-$name; then
|
||||||
|
trap "localbackup-unmount-$name" EXIT
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -d $backupname ]]; then
|
||||||
|
echo "No backup found $backupname"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
|
||||||
|
for folder in "''${FOLDER[@]}"; do
|
||||||
|
mkdir -p "$folder"
|
||||||
|
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
|
||||||
|
done
|
||||||
|
'')
|
||||||
|
]
|
||||||
|
++ (lib.mapAttrsToList (
|
||||||
|
name: target:
|
||||||
|
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
|
||||||
|
set -efu -o pipefail
|
||||||
|
${lib.optionalString (target.preMountHook != null) target.preMountHook}
|
||||||
|
${lib.optionalString (target.mountpoint != null) ''
|
||||||
|
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
|
||||||
|
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
|
||||||
|
fi
|
||||||
|
''}
|
||||||
|
${lib.optionalString (target.postMountHook != null) target.postMountHook}
|
||||||
|
''
|
||||||
|
) settings.targets)
|
||||||
|
++ lib.mapAttrsToList (
|
||||||
|
name: target:
|
||||||
|
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
|
||||||
|
set -efu -o pipefail
|
||||||
|
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
|
||||||
|
${lib.optionalString (
|
||||||
|
target.mountpoint != null
|
||||||
|
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
|
||||||
|
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
|
||||||
|
''
|
||||||
|
) settings.targets;
|
||||||
|
|
||||||
|
clan.core.backups.providers.localbackup = {
|
||||||
|
# TODO list needs to run locally or on the remote machine
|
||||||
|
list = "localbackup-list";
|
||||||
|
create = "localbackup-create";
|
||||||
|
restore = "localbackup-restore";
|
||||||
|
};
|
||||||
|
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
16
clanServices/localbackup/flake-module.nix
Normal file
16
clanServices/localbackup/flake-module.nix
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{ lib, ... }:
|
||||||
|
let
|
||||||
|
module = lib.modules.importApply ./default.nix { };
|
||||||
|
in
|
||||||
|
{
|
||||||
|
clan.modules.localbackup = module;
|
||||||
|
perSystem =
|
||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
clan.nixosTests.localbackup = {
|
||||||
|
imports = [ ./tests/vm/default.nix ];
|
||||||
|
|
||||||
|
clan.modules."@clan/localbackup" = module;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
62
clanServices/localbackup/tests/vm/default.nix
Normal file
62
clanServices/localbackup/tests/vm/default.nix
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
name = "service-localbackup";
|
||||||
|
|
||||||
|
clan = {
|
||||||
|
directory = ./.;
|
||||||
|
test.useContainers = true;
|
||||||
|
inventory = {
|
||||||
|
|
||||||
|
machines.machine = { };
|
||||||
|
|
||||||
|
instances = {
|
||||||
|
localbackup = {
|
||||||
|
module.name = "@clan/localbackup";
|
||||||
|
module.input = "self";
|
||||||
|
roles.default.machines."machine".settings = {
|
||||||
|
|
||||||
|
targets.hdd = {
|
||||||
|
directory = "/mnt/external-disk";
|
||||||
|
preMountHook = ''
|
||||||
|
touch /run/mount-external-disk
|
||||||
|
'';
|
||||||
|
postUnmountHook = ''
|
||||||
|
touch /run/unmount-external-disk
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes.machine = {
|
||||||
|
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
import json
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
machine.systemctl("start network-online.target")
|
||||||
|
machine.wait_for_unit("network-online.target")
|
||||||
|
|
||||||
|
# dummy data
|
||||||
|
machine.succeed("mkdir -p /var/test-backups")
|
||||||
|
machine.succeed("echo testing > /var/test-backups/somefile")
|
||||||
|
|
||||||
|
# create
|
||||||
|
machine.succeed("localbackup-create >&2")
|
||||||
|
machine.wait_until_succeeds("! systemctl is-active localbackup-job-serverone >&2")
|
||||||
|
|
||||||
|
# list
|
||||||
|
snapshot_list = machine.succeed("localbackup-list").strip()
|
||||||
|
assert json.loads(snapshot_list)[0]["name"].strip() == "hdd::/mnt/external-disk/snapshot.0"
|
||||||
|
|
||||||
|
# borgbackup restore
|
||||||
|
machine.succeed("rm -f /var/test-backups/somefile")
|
||||||
|
|
||||||
|
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/test-backups /run/current-system/sw/bin/localbackup-restore >&2")
|
||||||
|
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
|
||||||
|
'';
|
||||||
|
}
|
||||||
217
clanServices/wireguard/README.md
Normal file
217
clanServices/wireguard/README.md
Normal file
@@ -0,0 +1,217 @@
|
|||||||
|
# Wireguard VPN Service
|
||||||
|
|
||||||
|
This service provides a Wireguard-based VPN mesh network with automatic IPv6 address allocation and routing between clan machines.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The wireguard service creates a secure mesh network between clan machines using two roles:
|
||||||
|
- **Controllers**: Machines with public endpoints that act as connection points and routers
|
||||||
|
- **Peers**: Machines that connect through controllers to access the network
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Controllers must have a publicly accessible endpoint (domain name or static IP)
|
||||||
|
- Peers must be in networks where UDP traffic is not blocked (uses port 51820 by default, configurable)
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Automatic IPv6 address allocation using ULA (Unique Local Address) prefixes
|
||||||
|
- Full mesh connectivity between all machines
|
||||||
|
- Automatic key generation and distribution
|
||||||
|
- IPv6 forwarding on controllers for inter-peer communication
|
||||||
|
- Support for multiple controllers for redundancy
|
||||||
|
|
||||||
|
## Network Architecture
|
||||||
|
|
||||||
|
### IPv6 Address Allocation
|
||||||
|
- Base network: `/40` ULA prefix (deterministically generated from instance name)
|
||||||
|
- Controllers: Each gets a `/56` subnet from the base `/40`
|
||||||
|
- Peers: Each gets a unique 64-bit host suffix that is used in ALL controller subnets
|
||||||
|
|
||||||
|
### Addressing Design
|
||||||
|
- Each peer generates a unique host suffix (e.g., `:8750:a09b:0:1`)
|
||||||
|
- This suffix is appended to each controller's `/56` prefix to create unique addresses
|
||||||
|
- Example: peer1 with suffix `:8750:a09b:0:1` gets:
|
||||||
|
- `fd51:19c1:3b:f700:8750:a09b:0:1` in controller1's subnet
|
||||||
|
- `fd51:19c1:c1:aa00:8750:a09b:0:1` in controller2's subnet
|
||||||
|
- Controllers allow each peer's `/96` subnet for routing flexibility
|
||||||
|
|
||||||
|
### Connectivity
|
||||||
|
- Peers use a single WireGuard interface with multiple IPs (one per controller subnet)
|
||||||
|
- Controllers connect to ALL other controllers and ALL peers on a single interface
|
||||||
|
- Controllers have IPv6 forwarding enabled to route traffic between peers
|
||||||
|
- All traffic between peers flows through controllers
|
||||||
|
- Symmetric routing is maintained as each peer has consistent IPs across all controllers
|
||||||
|
|
||||||
|
### Example Network Topology
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
subgraph Controllers
|
||||||
|
C1[controller1<br/>endpoint: vpn1.example.com<br/>fd51:19c1:3b:f700::/56]
|
||||||
|
C2[controller2<br/>endpoint: vpn2.example.com<br/>fd51:19c1:c1:aa00::/56]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Peers
|
||||||
|
P1[peer1<br/>designated: controller1]
|
||||||
|
P2[peer2<br/>designated: controller2]
|
||||||
|
P3[peer3<br/>designated: controller1]
|
||||||
|
end
|
||||||
|
|
||||||
|
%% Controllers connect to each other
|
||||||
|
C1 <--> C2
|
||||||
|
|
||||||
|
%% All peers connect to all controllers
|
||||||
|
P1 <--> C1
|
||||||
|
P1 <--> C2
|
||||||
|
P2 <--> C1
|
||||||
|
P2 <--> C2
|
||||||
|
P3 <--> C1
|
||||||
|
P3 <--> C2
|
||||||
|
|
||||||
|
%% Peer-to-peer traffic flows through controllers
|
||||||
|
P1 -.->|via controllers| P3
|
||||||
|
P1 -.->|via controllers| P2
|
||||||
|
P2 -.->|via controllers| P3
|
||||||
|
|
||||||
|
classDef controller fill:#f9f,stroke:#333,stroke-width:4px
|
||||||
|
classDef peer fill:#bbf,stroke:#333,stroke-width:2px
|
||||||
|
class C1,C2 controller
|
||||||
|
class P1,P2,P3 peer
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Basic Setup with Single Controller
|
||||||
|
|
||||||
|
```nix
|
||||||
|
# In your flake.nix or inventory
|
||||||
|
{
|
||||||
|
services.wireguard.server1 = {
|
||||||
|
roles.controller = {
|
||||||
|
# Public endpoint where this controller can be reached
|
||||||
|
endpoint = "vpn.example.com";
|
||||||
|
# Optional: Change the UDP port (default: 51820)
|
||||||
|
port = 51820;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.wireguard.laptop1 = {
|
||||||
|
roles.peer = {
|
||||||
|
# No configuration needed if only one controller exists
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multiple Controllers Setup
|
||||||
|
|
||||||
|
```nix
|
||||||
|
{
|
||||||
|
services.wireguard.server1 = {
|
||||||
|
roles.controller = {
|
||||||
|
endpoint = "vpn1.example.com";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.wireguard.server2 = {
|
||||||
|
roles.controller = {
|
||||||
|
endpoint = "vpn2.example.com";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.wireguard.laptop1 = {
|
||||||
|
roles.peer = {
|
||||||
|
# Must specify which controller subnet is exposed as the default in /etc/hosts, when multiple controllers exist
|
||||||
|
controller = "server1";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Advanced Options
|
||||||
|
|
||||||
|
|
||||||
|
### Automatic Hostname Resolution
|
||||||
|
|
||||||
|
The wireguard service automatically adds entries to `/etc/hosts` for all machines in the network. Each machine is accessible via its hostname in the format `<machine-name>.<instance-name>`.
|
||||||
|
|
||||||
|
For example, with an instance named `vpn`:
|
||||||
|
- `server1.vpn` - resolves to server1's IPv6 address
|
||||||
|
- `laptop1.vpn` - resolves to laptop1's IPv6 address
|
||||||
|
|
||||||
|
This allows machines to communicate using hostnames instead of IPv6 addresses:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Ping another machine by hostname
|
||||||
|
ping6 server1.vpn
|
||||||
|
|
||||||
|
# SSH to another machine
|
||||||
|
ssh user@laptop1.vpn
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Check Wireguard Status
|
||||||
|
```bash
|
||||||
|
sudo wg show
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify IP Addresses
|
||||||
|
```bash
|
||||||
|
ip addr show dev <instance-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Routing
|
||||||
|
```bash
|
||||||
|
ip -6 route show dev <instance-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Interface Fails to Start: "Address already in use"
|
||||||
|
|
||||||
|
If you see this error in your logs:
|
||||||
|
```
|
||||||
|
wireguard: Could not bring up interface, ignoring: Address already in use
|
||||||
|
```
|
||||||
|
|
||||||
|
This means the configured port (default: 51820) is already in use by another service or wireguard instance. Solutions:
|
||||||
|
|
||||||
|
1. **Check for conflicting wireguard instances:**
|
||||||
|
```bash
|
||||||
|
sudo wg show
|
||||||
|
sudo ss -ulnp | grep 51820
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Use a different port:**
|
||||||
|
```nix
|
||||||
|
services.wireguard.myinstance = {
|
||||||
|
roles.controller = {
|
||||||
|
endpoint = "vpn.example.com";
|
||||||
|
port = 51821; # Use a different port
|
||||||
|
};
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Ensure unique ports across multiple instances:**
|
||||||
|
If you have multiple wireguard instances on the same machine, each must use a different port.
|
||||||
|
|
||||||
|
### Key Management
|
||||||
|
|
||||||
|
Keys are automatically generated and stored in the clan vars system. To regenerate keys:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Regenerate keys for a specific machine and instance
|
||||||
|
clan vars generate --service wireguard-keys-<instance-name> --regenerate --machine <machine-name>
|
||||||
|
|
||||||
|
# Apply the new keys
|
||||||
|
clan machines update <machine-name>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
- All traffic is encrypted using Wireguard's modern cryptography
|
||||||
|
- Private keys never leave the machines they're generated on
|
||||||
|
- Public keys are distributed through the clan vars system
|
||||||
|
- Controllers must have publicly accessible endpoints
|
||||||
|
- Firewall rules are automatically configured for the Wireguard ports
|
||||||
|
|
||||||
456
clanServices/wireguard/default.nix
Normal file
456
clanServices/wireguard/default.nix
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
/*
|
||||||
|
There are two roles: peers and controllers:
|
||||||
|
- Every controller has an endpoint set
|
||||||
|
- There can be multiple peers
|
||||||
|
- There has to be one or more controllers
|
||||||
|
- Peers connect to ALL controllers (full mesh)
|
||||||
|
- If only one controller exists, peers automatically use it for IP allocation
|
||||||
|
- If multiple controllers exist, peers must specify which controller's subnet to use
|
||||||
|
- Controllers have IPv6 forwarding enabled, every peer and controller can reach
|
||||||
|
everyone else, via extra controller hops if necessary
|
||||||
|
|
||||||
|
Example:
|
||||||
|
┌───────────────────────────────┐
|
||||||
|
│ ◄───────────── │
|
||||||
|
│ controller2 controller1
|
||||||
|
│ ▲ ─────────────► ▲ ▲
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ │ │ │ │ │
|
||||||
|
│ │ │ │ └───────────────┐ │ │ │ │
|
||||||
|
│ │ │ └──────────────┐ │ │ │ │ │
|
||||||
|
│ ▼ │ ▼ ▼ ▼
|
||||||
|
└─► peer2 │ peer1 peer3
|
||||||
|
│ ▲
|
||||||
|
└──────────┘
|
||||||
|
|
||||||
|
Network Architecture:
|
||||||
|
|
||||||
|
IPv6 Address Allocation:
|
||||||
|
- Base network: /40 ULA prefix (generated from instance name)
|
||||||
|
- Controllers: Each gets a /56 subnet from the base /40
|
||||||
|
- Peers: Each gets a unique host suffix that is used in ALL controller subnets
|
||||||
|
|
||||||
|
Address Assignment:
|
||||||
|
- Each peer generates a unique 64-bit host suffix (e.g., :8750:a09b:0:1)
|
||||||
|
- This suffix is appended to each controller's /56 prefix
|
||||||
|
- Example: peer1 with suffix :8750:a09b:0:1 gets:
|
||||||
|
- fd51:19c1:3b:f700:8750:a09b:0:1 in controller1's subnet
|
||||||
|
- fd51:19c1:c1:aa00:8750:a09b:0:1 in controller2's subnet
|
||||||
|
|
||||||
|
Peers: Use a SINGLE interface that:
|
||||||
|
- Connects to ALL controllers
|
||||||
|
- Has multiple IPs, one in each controller's subnet (with /56 prefix)
|
||||||
|
- Routes to each controller's /56 subnet via that controller
|
||||||
|
- allowedIPs: Each controller's /56 subnet
|
||||||
|
- No routing conflicts due to unique IPs per subnet
|
||||||
|
|
||||||
|
Controllers: Use a SINGLE interface that:
|
||||||
|
- Connects to ALL peers and ALL other controllers
|
||||||
|
- Gets a /56 subnet from the base /40 network
|
||||||
|
- Has IPv6 forwarding enabled for routing between peers
|
||||||
|
- allowedIPs:
|
||||||
|
- For peers: A /96 range containing the peer's address in this controller's subnet
|
||||||
|
- For other controllers: The controller's /56 subnet
|
||||||
|
*/
|
||||||
|
|
||||||
|
{ ... }:
|
||||||
|
let
|
||||||
|
# Shared module for extraHosts configuration
|
||||||
|
extraHostsModule =
|
||||||
|
{
|
||||||
|
instanceName,
|
||||||
|
settings,
|
||||||
|
roles,
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
networking.extraHosts =
|
||||||
|
let
|
||||||
|
domain = if settings.domain == null then instanceName else settings.domain;
|
||||||
|
# Controllers use their subnet's ::1 address
|
||||||
|
controllerHosts = lib.mapAttrsToList (
|
||||||
|
name: _value:
|
||||||
|
let
|
||||||
|
prefix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
);
|
||||||
|
# Controller IP is always ::1 in their subnet
|
||||||
|
ip = prefix + "::1";
|
||||||
|
in
|
||||||
|
"${ip} ${name}.${domain}"
|
||||||
|
) roles.controller.machines;
|
||||||
|
|
||||||
|
# Peers use their suffix in their designated controller's subnet only
|
||||||
|
peerHosts = lib.mapAttrsToList (
|
||||||
|
peerName: peerValue:
|
||||||
|
let
|
||||||
|
peerSuffix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${peerName}/wireguard-network-${instanceName}/suffix/value"
|
||||||
|
);
|
||||||
|
# Determine designated controller
|
||||||
|
designatedController =
|
||||||
|
if (builtins.length (builtins.attrNames roles.controller.machines) == 1) then
|
||||||
|
(builtins.head (builtins.attrNames roles.controller.machines))
|
||||||
|
else
|
||||||
|
peerValue.settings.controller;
|
||||||
|
controllerPrefix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${designatedController}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
);
|
||||||
|
peerIP = controllerPrefix + ":" + peerSuffix;
|
||||||
|
in
|
||||||
|
"${peerIP} ${peerName}.${domain}"
|
||||||
|
) roles.peer.machines;
|
||||||
|
in
|
||||||
|
builtins.concatStringsSep "\n" (controllerHosts ++ peerHosts);
|
||||||
|
};
|
||||||
|
|
||||||
|
# Shared interface options
|
||||||
|
sharedInterface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
options.port = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
example = 51820;
|
||||||
|
default = 51820;
|
||||||
|
description = ''
|
||||||
|
Port for the wireguard interface
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
options.domain = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
defaultText = lib.literalExpression "instanceName";
|
||||||
|
default = null;
|
||||||
|
description = ''
|
||||||
|
Domain suffix to use for hostnames in /etc/hosts.
|
||||||
|
Defaults to the instance name.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
_class = "clan.service";
|
||||||
|
manifest.name = "clan-core/wireguard";
|
||||||
|
manifest.description = "Wireguard-based VPN mesh network with automatic IPv6 address allocation";
|
||||||
|
manifest.categories = [
|
||||||
|
"System"
|
||||||
|
"Network"
|
||||||
|
];
|
||||||
|
manifest.readme = builtins.readFile ./README.md;
|
||||||
|
|
||||||
|
# Peer options and configuration
|
||||||
|
roles.peer = {
|
||||||
|
interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
imports = [ sharedInterface ];
|
||||||
|
|
||||||
|
options.controller = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
example = "controller1";
|
||||||
|
description = ''
|
||||||
|
Machinename of the controller to attach to
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
perInstance =
|
||||||
|
{
|
||||||
|
instanceName,
|
||||||
|
settings,
|
||||||
|
roles,
|
||||||
|
machine,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
# Set default domain to instanceName
|
||||||
|
|
||||||
|
# Peers connect to all controllers
|
||||||
|
nixosModule =
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
(extraHostsModule {
|
||||||
|
inherit
|
||||||
|
instanceName
|
||||||
|
settings
|
||||||
|
roles
|
||||||
|
config
|
||||||
|
lib
|
||||||
|
;
|
||||||
|
})
|
||||||
|
];
|
||||||
|
# Network allocation generator for this peer - generates host suffix
|
||||||
|
clan.core.vars.generators."wireguard-network-${instanceName}" = {
|
||||||
|
files.suffix.secret = false;
|
||||||
|
|
||||||
|
runtimeInputs = with pkgs; [
|
||||||
|
python3
|
||||||
|
];
|
||||||
|
|
||||||
|
# Invalidate on hostname changes
|
||||||
|
validation.hostname = machine.name;
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" peer "${machine.name}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Single wireguard interface with multiple IPs
|
||||||
|
networking.wireguard.interfaces."${instanceName}" = {
|
||||||
|
ips =
|
||||||
|
# Get this peer's suffix
|
||||||
|
let
|
||||||
|
peerSuffix =
|
||||||
|
config.clan.core.vars.generators."wireguard-network-${instanceName}".files.suffix.value;
|
||||||
|
in
|
||||||
|
# Create an IP in each controller's subnet
|
||||||
|
lib.mapAttrsToList (
|
||||||
|
ctrlName: _:
|
||||||
|
let
|
||||||
|
controllerPrefix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${ctrlName}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
);
|
||||||
|
peerIP = controllerPrefix + ":" + peerSuffix;
|
||||||
|
in
|
||||||
|
"${peerIP}/56"
|
||||||
|
) roles.controller.machines;
|
||||||
|
|
||||||
|
privateKeyFile =
|
||||||
|
config.clan.core.vars.generators."wireguard-keys-${instanceName}".files."privatekey".path;
|
||||||
|
|
||||||
|
# Connect to all controllers
|
||||||
|
peers = lib.mapAttrsToList (name: value: {
|
||||||
|
publicKey = (
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
# Allow each controller's /56 subnet
|
||||||
|
allowedIPs = [
|
||||||
|
"${
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
)
|
||||||
|
}::/56"
|
||||||
|
];
|
||||||
|
|
||||||
|
endpoint = "${value.settings.endpoint}:${toString value.settings.port}";
|
||||||
|
|
||||||
|
persistentKeepalive = 25;
|
||||||
|
}) roles.controller.machines;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Controller options and configuration
|
||||||
|
roles.controller = {
|
||||||
|
interface =
|
||||||
|
{ lib, ... }:
|
||||||
|
{
|
||||||
|
imports = [ sharedInterface ];
|
||||||
|
|
||||||
|
options.endpoint = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
example = "vpn.clan.lol";
|
||||||
|
description = ''
|
||||||
|
Endpoint where the controller can be reached
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
perInstance =
|
||||||
|
{
|
||||||
|
settings,
|
||||||
|
instanceName,
|
||||||
|
roles,
|
||||||
|
machine,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
|
||||||
|
# Controllers connect to all peers and other controllers
|
||||||
|
nixosModule =
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
allOtherControllers = lib.filterAttrs (name: _v: name != machine.name) roles.controller.machines;
|
||||||
|
allPeers = roles.peer.machines;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
(extraHostsModule {
|
||||||
|
inherit
|
||||||
|
instanceName
|
||||||
|
settings
|
||||||
|
roles
|
||||||
|
config
|
||||||
|
lib
|
||||||
|
;
|
||||||
|
})
|
||||||
|
];
|
||||||
|
# Network allocation generator for this controller
|
||||||
|
clan.core.vars.generators."wireguard-network-${instanceName}" = {
|
||||||
|
files.prefix.secret = false;
|
||||||
|
|
||||||
|
runtimeInputs = with pkgs; [
|
||||||
|
python3
|
||||||
|
];
|
||||||
|
|
||||||
|
# Invalidate on network or hostname changes
|
||||||
|
validation.hostname = machine.name;
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
${pkgs.python3}/bin/python3 ${./ipv6_allocator.py} "$out" "${instanceName}" controller "${machine.name}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Enable ip forwarding, so wireguard peers can reach eachother
|
||||||
|
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
|
||||||
|
|
||||||
|
networking.firewall.allowedUDPPorts = [ settings.port ];
|
||||||
|
|
||||||
|
# Single wireguard interface
|
||||||
|
networking.wireguard.interfaces."${instanceName}" = {
|
||||||
|
listenPort = settings.port;
|
||||||
|
|
||||||
|
ips = [
|
||||||
|
# Controller uses ::1 in its /56 subnet but with /40 prefix for proper routing
|
||||||
|
"${config.clan.core.vars.generators."wireguard-network-${instanceName}".files.prefix.value}::1/40"
|
||||||
|
];
|
||||||
|
|
||||||
|
privateKeyFile =
|
||||||
|
config.clan.core.vars.generators."wireguard-keys-${instanceName}".files."privatekey".path;
|
||||||
|
|
||||||
|
# Connect to all peers and other controllers
|
||||||
|
peers = lib.mapAttrsToList (
|
||||||
|
name: value:
|
||||||
|
if allPeers ? ${name} then
|
||||||
|
# For peers: they now have our entire /56 subnet
|
||||||
|
{
|
||||||
|
publicKey = (
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
# Allow the peer's /96 range in ALL controller subnets
|
||||||
|
allowedIPs = lib.mapAttrsToList (
|
||||||
|
ctrlName: _:
|
||||||
|
let
|
||||||
|
controllerPrefix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${ctrlName}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
);
|
||||||
|
peerSuffix = builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/suffix/value"
|
||||||
|
);
|
||||||
|
in
|
||||||
|
"${controllerPrefix}:${peerSuffix}/96"
|
||||||
|
) roles.controller.machines;
|
||||||
|
|
||||||
|
persistentKeepalive = 25;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
# For other controllers: use their /56 subnet
|
||||||
|
{
|
||||||
|
publicKey = (
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-keys-${instanceName}/publickey/value"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
allowedIPs = [
|
||||||
|
"${
|
||||||
|
builtins.readFile (
|
||||||
|
config.clan.core.settings.directory
|
||||||
|
+ "/vars/per-machine/${name}/wireguard-network-${instanceName}/prefix/value"
|
||||||
|
)
|
||||||
|
}::/56"
|
||||||
|
];
|
||||||
|
|
||||||
|
endpoint = "${value.settings.endpoint}:${toString value.settings.port}";
|
||||||
|
persistentKeepalive = 25;
|
||||||
|
}
|
||||||
|
) (allPeers // allOtherControllers);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Maps over all machines and produces one result per machine, regardless of role
|
||||||
|
perMachine =
|
||||||
|
{ instances, machine, ... }:
|
||||||
|
{
|
||||||
|
nixosModule =
|
||||||
|
{ pkgs, lib, ... }:
|
||||||
|
let
|
||||||
|
# Check if this machine has conflicting roles across all instances
|
||||||
|
machineRoleConflicts = lib.flatten (
|
||||||
|
lib.mapAttrsToList (
|
||||||
|
instanceName: instanceInfo:
|
||||||
|
let
|
||||||
|
isController =
|
||||||
|
instanceInfo.roles ? controller && instanceInfo.roles.controller.machines ? ${machine.name};
|
||||||
|
isPeer = instanceInfo.roles ? peer && instanceInfo.roles.peer.machines ? ${machine.name};
|
||||||
|
in
|
||||||
|
lib.optional (isController && isPeer) {
|
||||||
|
inherit instanceName;
|
||||||
|
machineName = machine.name;
|
||||||
|
}
|
||||||
|
) instances
|
||||||
|
);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# Add assertions for role conflicts
|
||||||
|
assertions = lib.forEach machineRoleConflicts (conflict: {
|
||||||
|
assertion = false;
|
||||||
|
message = ''
|
||||||
|
Machine '${conflict.machineName}' cannot have both 'controller' and 'peer' roles in the wireguard instance '${conflict.instanceName}'.
|
||||||
|
A machine must be either a controller or a peer, not both.
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
|
# Generate keys for each instance where this machine participates
|
||||||
|
clan.core.vars.generators = lib.mapAttrs' (
|
||||||
|
name: _instanceInfo:
|
||||||
|
lib.nameValuePair "wireguard-keys-${name}" {
|
||||||
|
files.publickey.secret = false;
|
||||||
|
files.privatekey = { };
|
||||||
|
|
||||||
|
runtimeInputs = with pkgs; [
|
||||||
|
wireguard-tools
|
||||||
|
];
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
wg genkey > $out/privatekey
|
||||||
|
wg pubkey < $out/privatekey > $out/publickey
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
) instances;
|
||||||
|
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
7
clanServices/wireguard/flake-module.nix
Normal file
7
clanServices/wireguard/flake-module.nix
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{ lib, ... }:
|
||||||
|
let
|
||||||
|
module = lib.modules.importApply ./default.nix { };
|
||||||
|
in
|
||||||
|
{
|
||||||
|
clan.modules.wireguard = module;
|
||||||
|
}
|
||||||
135
clanServices/wireguard/ipv6_allocator.py
Executable file
135
clanServices/wireguard/ipv6_allocator.py
Executable file
@@ -0,0 +1,135 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
IPv6 address allocator for WireGuard networks.
|
||||||
|
|
||||||
|
Network layout:
|
||||||
|
- Base network: /40 ULA prefix (fd00::/8 + 32 bits from hash)
|
||||||
|
- Controllers: Each gets a /56 subnet from the base /40 (256 controllers max)
|
||||||
|
- Peers: Each gets a /96 subnet from their controller's /56
|
||||||
|
"""
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import ipaddress
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def hash_string(s: str) -> str:
|
||||||
|
"""Generate SHA256 hash of string."""
|
||||||
|
return hashlib.sha256(s.encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
|
||||||
|
"""
|
||||||
|
Generate a /40 ULA prefix from instance name.
|
||||||
|
|
||||||
|
Format: fd{32-bit hash}/40
|
||||||
|
This gives us fd00:0000:0000::/40 through fdff:ffff:ff00::/40
|
||||||
|
"""
|
||||||
|
h = hash_string(instance_name)
|
||||||
|
|
||||||
|
# For /40, we need 32 bits after 'fd' (8 hex chars)
|
||||||
|
# But only the first 32 bits count for the network prefix
|
||||||
|
# The last 8 bits of the 40-bit prefix must be 0
|
||||||
|
prefix_bits = int(h[:8], 16)
|
||||||
|
|
||||||
|
# Mask to ensure we only use the first 32 bits for /40
|
||||||
|
# This gives us addresses like fd28:387a::/40
|
||||||
|
prefix_bits = prefix_bits & 0xFFFFFF00 # Clear last 8 bits
|
||||||
|
|
||||||
|
# Format as IPv6 address
|
||||||
|
prefix = f"fd{prefix_bits:08x}"
|
||||||
|
prefix_formatted = f"{prefix[:4]}:{prefix[4:8]}::/40"
|
||||||
|
|
||||||
|
network = ipaddress.IPv6Network(prefix_formatted)
|
||||||
|
return network
|
||||||
|
|
||||||
|
|
||||||
|
def generate_controller_subnet(
|
||||||
|
base_network: ipaddress.IPv6Network, controller_name: str
|
||||||
|
) -> ipaddress.IPv6Network:
|
||||||
|
"""
|
||||||
|
Generate a /56 subnet for a controller from the base /40 network.
|
||||||
|
|
||||||
|
We have 16 bits (40 to 56) to allocate controller subnets.
|
||||||
|
This allows for 65,536 possible controller subnets.
|
||||||
|
"""
|
||||||
|
h = hash_string(controller_name)
|
||||||
|
# Take 16 bits from hash for the controller subnet ID
|
||||||
|
controller_id = int(h[:4], 16)
|
||||||
|
|
||||||
|
# Create the controller subnet by adding the controller ID to the base network
|
||||||
|
# The controller subnet is at base_prefix:controller_id::/56
|
||||||
|
base_int = int(base_network.network_address)
|
||||||
|
controller_subnet_int = base_int | (controller_id << (128 - 56))
|
||||||
|
controller_subnet = ipaddress.IPv6Network((controller_subnet_int, 56))
|
||||||
|
|
||||||
|
return controller_subnet
|
||||||
|
|
||||||
|
|
||||||
|
def generate_peer_suffix(peer_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Generate a unique 64-bit host suffix for a peer.
|
||||||
|
|
||||||
|
This suffix will be used in all controller subnets to create unique addresses.
|
||||||
|
Format: :xxxx:xxxx:xxxx:xxxx (64 bits)
|
||||||
|
"""
|
||||||
|
h = hash_string(peer_name)
|
||||||
|
# Take 64 bits (16 hex chars) from hash for the host suffix
|
||||||
|
suffix_bits = h[:16]
|
||||||
|
|
||||||
|
# Format as IPv6 suffix without leading colon
|
||||||
|
suffix = f"{suffix_bits[0:4]}:{suffix_bits[4:8]}:{suffix_bits[8:12]}:{suffix_bits[12:16]}"
|
||||||
|
return suffix
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
if len(sys.argv) < 4:
|
||||||
|
print(
|
||||||
|
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
output_dir = Path(sys.argv[1])
|
||||||
|
instance_name = sys.argv[2]
|
||||||
|
node_type = sys.argv[3]
|
||||||
|
|
||||||
|
# Generate base /40 network
|
||||||
|
base_network = generate_ula_prefix(instance_name)
|
||||||
|
|
||||||
|
if node_type == "controller":
|
||||||
|
if len(sys.argv) < 5:
|
||||||
|
print("Controller name required")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
controller_name = sys.argv[4]
|
||||||
|
subnet = generate_controller_subnet(base_network, controller_name)
|
||||||
|
|
||||||
|
# Extract clean prefix from subnet (e.g. "fd51:19c1:3b:f700::/56" -> "fd51:19c1:3b:f700")
|
||||||
|
prefix_str = str(subnet).split("/")[0].rstrip(":")
|
||||||
|
while prefix_str.endswith(":"):
|
||||||
|
prefix_str = prefix_str.rstrip(":")
|
||||||
|
|
||||||
|
# Write file
|
||||||
|
(output_dir / "prefix").write_text(prefix_str)
|
||||||
|
|
||||||
|
elif node_type == "peer":
|
||||||
|
if len(sys.argv) < 5:
|
||||||
|
print("Peer name required")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
peer_name = sys.argv[4]
|
||||||
|
|
||||||
|
# Generate the peer's host suffix
|
||||||
|
suffix = generate_peer_suffix(peer_name)
|
||||||
|
|
||||||
|
# Write file
|
||||||
|
(output_dir / "suffix").write_text(suffix)
|
||||||
|
|
||||||
|
else:
|
||||||
|
print(f"Unknown node type: {node_type}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -9,14 +9,7 @@
|
|||||||
let
|
let
|
||||||
controllerMachine = builtins.head (lib.attrNames roles.controller.machines or { });
|
controllerMachine = builtins.head (lib.attrNames roles.controller.machines or { });
|
||||||
networkIdPath = "${config.clan.core.settings.directory}/vars/per-machine/${controllerMachine}/zerotier/zerotier-network-id/value";
|
networkIdPath = "${config.clan.core.settings.directory}/vars/per-machine/${controllerMachine}/zerotier/zerotier-network-id/value";
|
||||||
networkId =
|
networkId = if builtins.pathExists networkIdPath then builtins.readFile networkIdPath else null;
|
||||||
if builtins.pathExists networkIdPath then
|
|
||||||
builtins.readFile networkIdPath
|
|
||||||
else
|
|
||||||
builtins.throw ''
|
|
||||||
No zerotier network id found for ${controllerMachine}.
|
|
||||||
Please run `clan vars generate ${controllerMachine}` first.
|
|
||||||
'';
|
|
||||||
moons = lib.attrNames (roles.moon.machines or { });
|
moons = lib.attrNames (roles.moon.machines or { });
|
||||||
moonIps = builtins.foldl' (
|
moonIps = builtins.foldl' (
|
||||||
ips: name:
|
ips: name:
|
||||||
|
|||||||
25
devFlake/flake.lock
generated
25
devFlake/flake.lock
generated
@@ -1,5 +1,23 @@
|
|||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
|
"clan-core-for-checks": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1755093452,
|
||||||
|
"narHash": "sha256-NKBss7QtNnOqYVyJmYCgaCvYZK0mpQTQc9fLgE1mGyk=",
|
||||||
|
"ref": "main",
|
||||||
|
"rev": "7e97734797f0c6bd3c2d3a51cf54a2a6b371c222",
|
||||||
|
"shallow": true,
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://git.clan.lol/clan/clan-core"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"ref": "main",
|
||||||
|
"shallow": true,
|
||||||
|
"type": "git",
|
||||||
|
"url": "https://git.clan.lol/clan/clan-core"
|
||||||
|
}
|
||||||
|
},
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"systems": [
|
"systems": [
|
||||||
@@ -104,6 +122,7 @@
|
|||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
|
"clan-core-for-checks": "clan-core-for-checks",
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": "flake-utils",
|
||||||
"nixpkgs-dev": "nixpkgs-dev",
|
"nixpkgs-dev": "nixpkgs-dev",
|
||||||
"nuschtos": "nuschtos",
|
"nuschtos": "nuschtos",
|
||||||
@@ -146,11 +165,11 @@
|
|||||||
"nixpkgs": []
|
"nixpkgs": []
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1752055615,
|
"lastModified": 1754847726,
|
||||||
"narHash": "sha256-19m7P4O/Aw/6+CzncWMAJu89JaKeMh3aMle1CNQSIwM=",
|
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "treefmt-nix",
|
"repo": "treefmt-nix",
|
||||||
"rev": "c9d477b5d5bd7f26adddd3f96cfd6a904768d4f9",
|
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
@@ -15,5 +15,8 @@
|
|||||||
|
|
||||||
inputs.systems.url = "github:nix-systems/default";
|
inputs.systems.url = "github:nix-systems/default";
|
||||||
|
|
||||||
|
inputs.clan-core-for-checks.url = "git+https://git.clan.lol/clan/clan-core?ref=main&shallow=1";
|
||||||
|
inputs.clan-core-for-checks.flake = false;
|
||||||
|
|
||||||
outputs = inputs: inputs;
|
outputs = inputs: inputs;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,10 +92,10 @@ nav:
|
|||||||
- Services:
|
- Services:
|
||||||
- Overview:
|
- Overview:
|
||||||
- reference/clanServices/index.md
|
- reference/clanServices/index.md
|
||||||
|
|
||||||
- reference/clanServices/admin.md
|
- reference/clanServices/admin.md
|
||||||
- reference/clanServices/borgbackup.md
|
- reference/clanServices/borgbackup.md
|
||||||
- reference/clanServices/data-mesher.md
|
- reference/clanServices/data-mesher.md
|
||||||
|
- reference/clanServices/dyndns.md
|
||||||
- reference/clanServices/emergency-access.md
|
- reference/clanServices/emergency-access.md
|
||||||
- reference/clanServices/garage.md
|
- reference/clanServices/garage.md
|
||||||
- reference/clanServices/hello-world.md
|
- reference/clanServices/hello-world.md
|
||||||
@@ -108,6 +108,7 @@ nav:
|
|||||||
- reference/clanServices/trusted-nix-caches.md
|
- reference/clanServices/trusted-nix-caches.md
|
||||||
- reference/clanServices/users.md
|
- reference/clanServices/users.md
|
||||||
- reference/clanServices/wifi.md
|
- reference/clanServices/wifi.md
|
||||||
|
- reference/clanServices/wireguard.md
|
||||||
- reference/clanServices/zerotier.md
|
- reference/clanServices/zerotier.md
|
||||||
- API: reference/clanServices/clan-service-author-interface.md
|
- API: reference/clanServices/clan-service-author-interface.md
|
||||||
|
|
||||||
@@ -144,6 +145,7 @@ nav:
|
|||||||
- reference/clanModules/heisenbridge.md
|
- reference/clanModules/heisenbridge.md
|
||||||
- reference/clanModules/importer.md
|
- reference/clanModules/importer.md
|
||||||
- reference/clanModules/iwd.md
|
- reference/clanModules/iwd.md
|
||||||
|
- reference/clanServices/localbackup.md
|
||||||
- reference/clanModules/localbackup.md
|
- reference/clanModules/localbackup.md
|
||||||
- reference/clanModules/localsend.md
|
- reference/clanModules/localsend.md
|
||||||
- reference/clanModules/matrix-synapse.md
|
- reference/clanModules/matrix-synapse.md
|
||||||
|
|||||||
@@ -2,7 +2,11 @@
|
|||||||
|
|
||||||
Ready to create your own Clan and manage a fleet of machines? Follow these simple steps to get started.
|
Ready to create your own Clan and manage a fleet of machines? Follow these simple steps to get started.
|
||||||
|
|
||||||
By the end of this guide, you'll have a fresh NixOS configuration ready to push to one or more machines. You'll create a new Git repository and a flake, and all you need is at least one machine to push to. This is the easiest way to begin, and we recommend you to copy your existing configuration into this new setup!
|
This guide walks your through setting up your own declarative infrastructure using clan, git and flakes. By the end of this, you will have one or more machines integrated and installed. You can then import your existing NixOS configuration into this setup if you wish.
|
||||||
|
|
||||||
|
The following steps are meant to be executed on the machine on which to administer the infrastructure.
|
||||||
|
|
||||||
|
In order to get started you should have at least one machine with either physical or ssh access available as an installation target. Your local machine can also be used as an installation target if it is already running NixOS.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
@@ -10,7 +14,7 @@ By the end of this guide, you'll have a fresh NixOS configuration ready to push
|
|||||||
|
|
||||||
Clan requires Nix to be installed on your system. Run the following command to install Nix:
|
Clan requires Nix to be installed on your system. Run the following command to install Nix:
|
||||||
|
|
||||||
```bash
|
```shellSession
|
||||||
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -30,17 +34,17 @@ By the end of this guide, you'll have a fresh NixOS configuration ready to push
|
|||||||
|
|
||||||
Clan requires Nix to be installed on your system. Run the following command to install Nix:
|
Clan requires Nix to be installed on your system. Run the following command to install Nix:
|
||||||
|
|
||||||
```bash
|
```shellSession
|
||||||
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
|
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
|
||||||
|
|
||||||
## Add Clan CLI to Your Shell
|
## Create a new clan
|
||||||
|
|
||||||
Create a new clan
|
Initialize a new clan flake
|
||||||
|
|
||||||
```bash
|
```shellSession
|
||||||
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
|
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -60,22 +64,16 @@ Enter a *name*, confirm with *enter*. A directory with that name will be created
|
|||||||
## Explore the Project Structure
|
## Explore the Project Structure
|
||||||
|
|
||||||
Take a look at all project files:
|
Take a look at all project files:
|
||||||
|
|
||||||
```bash
|
|
||||||
cd my-clan
|
|
||||||
tree
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, you might see something like:
|
For example, you might see something like:
|
||||||
|
|
||||||
``` { .console .no-copy }
|
```{ .console .no-copy }
|
||||||
.
|
$ cd my-clan
|
||||||
├── flake.nix
|
$ ls
|
||||||
├── machines/
|
clan.nix flake.lock flake.nix modules sops
|
||||||
├── modules/
|
|
||||||
└── README.md
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Don’t worry if your output looks different — Clan templates evolve over time.
|
Don’t worry if your output looks different — Clan templates evolve over time.
|
||||||
|
|
||||||
To interact with your newly created clan the you need to load the `clan` cli-package it into your environment by running:
|
To interact with your newly created clan the you need to load the `clan` cli-package it into your environment by running:
|
||||||
@@ -83,19 +81,19 @@ To interact with your newly created clan the you need to load the `clan` cli-pac
|
|||||||
=== "Automatic (direnv, recommended)"
|
=== "Automatic (direnv, recommended)"
|
||||||
- prerequisite: [install nix-direnv](https://github.com/nix-community/nix-direnv)
|
- prerequisite: [install nix-direnv](https://github.com/nix-community/nix-direnv)
|
||||||
|
|
||||||
```
|
```shellSession
|
||||||
direnv allow
|
direnv allow
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Manual (nix develop)"
|
=== "Manual (nix develop)"
|
||||||
|
|
||||||
```
|
```shellSession
|
||||||
nix develop
|
nix develop
|
||||||
```
|
```
|
||||||
|
|
||||||
verify that you can run `clan` commands:
|
verify that you can run `clan` commands:
|
||||||
|
|
||||||
```bash
|
```shellSession
|
||||||
clan show
|
clan show
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
18
flake.lock
generated
18
flake.lock
generated
@@ -115,10 +115,10 @@
|
|||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 315532800,
|
"lastModified": 315532800,
|
||||||
"narHash": "sha256-5VYevX3GccubYeccRGAXvCPA1ktrGmIX1IFC0icX07g=",
|
"narHash": "sha256-eNwx+U1ODm+fzDSZzHo3TZ60dFjFzb1A+o6Kh09OYaM=",
|
||||||
"rev": "a683adc19ff5228af548c6539dbc3440509bfed3",
|
"rev": "641d909c4a7538f1539da9240dedb1755c907e40",
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre840248.a683adc19ff5/nixexprs.tar.xz"
|
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre842025.641d909c4a75/nixexprs.tar.xz"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
@@ -146,11 +146,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1754328224,
|
"lastModified": 1754988908,
|
||||||
"narHash": "sha256-glPK8DF329/dXtosV7YSzRlF4n35WDjaVwdOMEoEXHA=",
|
"narHash": "sha256-t+voe2961vCgrzPFtZxha0/kmFSHFobzF00sT8p9h0U=",
|
||||||
"owner": "Mic92",
|
"owner": "Mic92",
|
||||||
"repo": "sops-nix",
|
"repo": "sops-nix",
|
||||||
"rev": "49021900e69812ba7ddb9e40f9170218a7eca9f4",
|
"rev": "3223c7a92724b5d804e9988c6b447a0d09017d48",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -181,11 +181,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1754492133,
|
"lastModified": 1754847726,
|
||||||
"narHash": "sha256-B+3g9+76KlGe34Yk9za8AF3RL+lnbHXkLiVHLjYVOAc=",
|
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "treefmt-nix",
|
"repo": "treefmt-nix",
|
||||||
"rev": "1298185c05a56bff66383a20be0b41a307f52228",
|
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
@@ -51,9 +51,12 @@
|
|||||||
;
|
;
|
||||||
|
|
||||||
privateInputs =
|
privateInputs =
|
||||||
(import ./devFlake/flake-compat.nix {
|
if builtins.pathExists (./. + ".skip-private-inputs") then
|
||||||
src = ./devFlake;
|
{ }
|
||||||
}).outputs;
|
else
|
||||||
|
(import ./devFlake/flake-compat.nix {
|
||||||
|
src = ./devFlake;
|
||||||
|
}).outputs;
|
||||||
in
|
in
|
||||||
flake-parts.lib.mkFlake { inherit inputs; } (
|
flake-parts.lib.mkFlake { inherit inputs; } (
|
||||||
{ ... }:
|
{ ... }:
|
||||||
|
|||||||
@@ -11,6 +11,8 @@
|
|||||||
treefmt.programs.nixfmt.enable = true;
|
treefmt.programs.nixfmt.enable = true;
|
||||||
treefmt.programs.nixfmt.package = pkgs.nixfmt-rfc-style;
|
treefmt.programs.nixfmt.package = pkgs.nixfmt-rfc-style;
|
||||||
treefmt.programs.deadnix.enable = true;
|
treefmt.programs.deadnix.enable = true;
|
||||||
|
treefmt.programs.sizelint.enable = true;
|
||||||
|
treefmt.programs.sizelint.failOnWarn = true;
|
||||||
treefmt.programs.clang-format.enable = true;
|
treefmt.programs.clang-format.enable = true;
|
||||||
treefmt.settings.global.excludes = [
|
treefmt.settings.global.excludes = [
|
||||||
"*.png"
|
"*.png"
|
||||||
@@ -59,6 +61,7 @@
|
|||||||
"pkgs/clan-cli/clan_cli/tests/data/sshd_config"
|
"pkgs/clan-cli/clan_cli/tests/data/sshd_config"
|
||||||
"pkgs/clan-vm-manager/.vscode/lhebendanz.weaudit"
|
"pkgs/clan-vm-manager/.vscode/lhebendanz.weaudit"
|
||||||
"pkgs/clan-vm-manager/bin/clan-vm-manager"
|
"pkgs/clan-vm-manager/bin/clan-vm-manager"
|
||||||
|
"clanServices/hello-world/default.nix"
|
||||||
"sops/secrets/test-backup-age.key/secret"
|
"sops/secrets/test-backup-age.key/secret"
|
||||||
];
|
];
|
||||||
treefmt.settings.formatter.ruff-format.includes = [
|
treefmt.settings.formatter.ruff-format.includes = [
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ rec {
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
}" \
|
}" \
|
||||||
${pkgs.runtimeShell} ${genInfo.finalScript}
|
${pkgs.runtimeShell} -x "${genInfo.finalScript}"
|
||||||
|
|
||||||
# Verify expected outputs were created
|
# Verify expected outputs were created
|
||||||
${lib.concatStringsSep "\n" (
|
${lib.concatStringsSep "\n" (
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{ self, ... }:
|
{ ... }:
|
||||||
{
|
{
|
||||||
perSystem =
|
perSystem =
|
||||||
{ ... }:
|
{ ... }:
|
||||||
@@ -22,28 +22,11 @@
|
|||||||
roles.default.extraModules = [
|
roles.default.extraModules = [
|
||||||
{
|
{
|
||||||
|
|
||||||
imports = [
|
|
||||||
# self.nixosModules.clanCore
|
|
||||||
self.clanModules.localbackup
|
|
||||||
];
|
|
||||||
|
|
||||||
clan.core.postgresql.enable = true;
|
clan.core.postgresql.enable = true;
|
||||||
clan.core.postgresql.users.test = { };
|
clan.core.postgresql.users.test = { };
|
||||||
clan.core.postgresql.databases.test.create.options.OWNER = "test";
|
clan.core.postgresql.databases.test.create.options.OWNER = "test";
|
||||||
clan.core.postgresql.databases.test.restore.stopOnRestore = [ "sample-service" ];
|
|
||||||
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
|
|
||||||
clan.core.settings.directory = ./.;
|
clan.core.settings.directory = ./.;
|
||||||
|
|
||||||
systemd.services.sample-service = {
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
script = ''
|
|
||||||
while true; do
|
|
||||||
echo "Hello, world!"
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
@@ -53,54 +36,15 @@
|
|||||||
# TODO: Broken. Use instead of importer after fixing.
|
# TODO: Broken. Use instead of importer after fixing.
|
||||||
# nodes.machine = { };
|
# nodes.machine = { };
|
||||||
|
|
||||||
testScript =
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
machine.wait_for_unit("postgresql")
|
||||||
|
|
||||||
{ nodes, ... }:
|
# Create a test table
|
||||||
|
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -c 'CREATE TABLE test (id serial PRIMARY KEY);' test")
|
||||||
''
|
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'INSERT INTO test DEFAULT VALUES;'")
|
||||||
start_all()
|
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'SELECT * FROM test;'")
|
||||||
machine.wait_for_unit("postgresql")
|
'';
|
||||||
machine.wait_for_unit("sample-service")
|
|
||||||
# Create a test table
|
|
||||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -c 'CREATE TABLE test (id serial PRIMARY KEY);' test")
|
|
||||||
|
|
||||||
machine.succeed("/run/current-system/sw/bin/localbackup-create >&2")
|
|
||||||
timestamp_before = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
|
|
||||||
|
|
||||||
# import time
|
|
||||||
# time.sleep(5400000)
|
|
||||||
|
|
||||||
machine.succeed("test -e /mnt/external-disk/snapshot.0/machine/var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
|
||||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'INSERT INTO test DEFAULT VALUES;'")
|
|
||||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'DROP TABLE test;'")
|
|
||||||
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
|
||||||
|
|
||||||
machine.succeed("rm -rf /var/backup/postgres")
|
|
||||||
|
|
||||||
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/backup/postgres/test /run/current-system/sw/bin/localbackup-restore >&2")
|
|
||||||
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
|
|
||||||
|
|
||||||
machine.succeed("""
|
|
||||||
set -x
|
|
||||||
${nodes.machine.clan.core.state.test.postRestoreCommand}
|
|
||||||
""")
|
|
||||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -l >&2")
|
|
||||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
|
|
||||||
|
|
||||||
timestamp_after = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
|
|
||||||
assert timestamp_before < timestamp_after, f"{timestamp_before} >= {timestamp_after}: expected sample-service to be restarted after restore"
|
|
||||||
|
|
||||||
# Check that the table is still there
|
|
||||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'SELECT * FROM test;'")
|
|
||||||
output = machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql --csv -c \"SELECT datdba::regrole FROM pg_database WHERE datname = 'test'\"")
|
|
||||||
owner = output.split("\n")[1]
|
|
||||||
assert owner == "test", f"Expected database owner to be 'test', got '{owner}'"
|
|
||||||
|
|
||||||
# check if restore works if the database does not exist
|
|
||||||
machine.succeed("runuser -u postgres -- dropdb test")
|
|
||||||
machine.succeed("${nodes.machine.clan.core.state.test.postRestoreCommand}")
|
|
||||||
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
import functools
|
import functools
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import threading
|
||||||
from collections.abc import Callable
|
from collections.abc import Callable
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
|
from time import sleep
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
from clan_lib.api import MethodRegistry
|
from clan_lib.api import MethodRegistry, message_queue
|
||||||
from clan_lib.api.tasks import WebThread
|
from clan_lib.api.tasks import WebThread
|
||||||
from clan_lib.log_manager import LogManager
|
from clan_lib.log_manager import LogManager
|
||||||
|
|
||||||
@@ -69,6 +71,22 @@ class Webview:
|
|||||||
if self.size:
|
if self.size:
|
||||||
self.set_size(self.size)
|
self.set_size(self.size)
|
||||||
|
|
||||||
|
def __post_init__(self) -> None:
|
||||||
|
self.setup_notify() # Start the notification loop
|
||||||
|
|
||||||
|
def setup_notify(self) -> None:
|
||||||
|
def loop() -> None:
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
msg = message_queue.get() # Blocks until available
|
||||||
|
js_code = f"window.notifyBus({json.dumps(msg)});"
|
||||||
|
self.eval(js_code)
|
||||||
|
except Exception as e:
|
||||||
|
print("Bridge notify error:", e)
|
||||||
|
sleep(0.01) # avoid busy loop
|
||||||
|
|
||||||
|
threading.Thread(target=loop, daemon=True).start()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def handle(self) -> Any:
|
def handle(self) -> Any:
|
||||||
"""Get the webview handle, creating it if necessary."""
|
"""Get the webview handle, creating it if necessary."""
|
||||||
@@ -129,6 +147,7 @@ class Webview:
|
|||||||
webview=self, middleware_chain=tuple(self._middleware), threads={}
|
webview=self, middleware_chain=tuple(self._middleware), threads={}
|
||||||
)
|
)
|
||||||
self._bridge = bridge
|
self._bridge = bridge
|
||||||
|
|
||||||
return bridge
|
return bridge
|
||||||
|
|
||||||
# Legacy methods for compatibility
|
# Legacy methods for compatibility
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ class WebviewBridge(ApiBridge):
|
|||||||
|
|
||||||
def send_api_response(self, response: BackendResponse) -> None:
|
def send_api_response(self, response: BackendResponse) -> None:
|
||||||
"""Send response back to the webview client."""
|
"""Send response back to the webview client."""
|
||||||
|
|
||||||
serialized = json.dumps(
|
serialized = json.dumps(
|
||||||
dataclass_to_dict(response), indent=4, ensure_ascii=False
|
dataclass_to_dict(response), indent=4, ensure_ascii=False
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
{
|
{
|
||||||
fetchurl,
|
fetchurl,
|
||||||
fetchzip,
|
|
||||||
runCommand,
|
runCommand,
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
@@ -45,10 +44,9 @@ let
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
commitMono = fetchzip {
|
commitMono = fetchurl {
|
||||||
url = "https://github.com/eigilnikolajsen/commit-mono/releases/download/v1.143/CommitMono-1.143.zip";
|
url = "https://github.com/eigilnikolajsen/commit-mono/raw/0b3b192f035cdc8d1ea8ffb5463cc23d73d0b89f/src/fonts/fontlab/CommitMonoV143-VF.woff2";
|
||||||
stripRoot = false;
|
hash = "sha256-80LKbD8ll+bA/NhLPz7WTTzlvbbQrxnRkNZFpVixzyk=";
|
||||||
hash = "sha256-JTyPgWfbWq+lXQU/rgnyvPG6+V3f+FB5QUkd+I1oFKE=";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
in
|
in
|
||||||
@@ -63,5 +61,5 @@ runCommand "" { } ''
|
|||||||
cp ${archivoSemi.medium} $out/ArchivoSemiCondensed-Medium.woff2
|
cp ${archivoSemi.medium} $out/ArchivoSemiCondensed-Medium.woff2
|
||||||
cp ${archivoSemi.semiBold} $out/ArchivoSemiCondensed-SemiBold.woff2
|
cp ${archivoSemi.semiBold} $out/ArchivoSemiCondensed-SemiBold.woff2
|
||||||
|
|
||||||
cp ${commitMono}/CommitMono-1.143/CommitMono-400-Regular.otf $out/CommitMono-400-Regular.otf
|
cp ${commitMono} $out/CommitMonoV143-VF.woff2
|
||||||
''
|
''
|
||||||
|
|||||||
9
pkgs/clan-app/ui/index.d.ts
vendored
Normal file
9
pkgs/clan-app/ui/index.d.ts
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
import { ProcessMessage } from "./src/hooks/notify";
|
||||||
|
|
||||||
|
export {};
|
||||||
|
|
||||||
|
declare global {
|
||||||
|
interface Window {
|
||||||
|
notifyBus: (data: ProcessMessage) => void;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
div.alert {
|
div.alert {
|
||||||
@apply flex gap-2.5 px-6 py-4 size-full rounded-md items-start;
|
@apply flex flex-row gap-2.5 p-4 rounded-md items-start;
|
||||||
|
|
||||||
&.has-icon {
|
&.has-icon {
|
||||||
@apply pl-4;
|
@apply pl-3;
|
||||||
|
|
||||||
svg.icon {
|
svg.icon {
|
||||||
@apply relative top-0.5;
|
@apply relative top-0.5;
|
||||||
@@ -10,11 +10,15 @@ div.alert {
|
|||||||
}
|
}
|
||||||
|
|
||||||
&.has-dismiss {
|
&.has-dismiss {
|
||||||
@apply pr-4;
|
@apply pr-3;
|
||||||
|
}
|
||||||
|
|
||||||
|
& > button.dismiss-trigger {
|
||||||
|
@apply relative top-0.5;
|
||||||
}
|
}
|
||||||
|
|
||||||
& > div.content {
|
& > div.content {
|
||||||
@apply flex flex-col gap-2 size-full;
|
@apply flex flex-col size-full gap-1;
|
||||||
}
|
}
|
||||||
|
|
||||||
&.info {
|
&.info {
|
||||||
@@ -33,7 +37,7 @@ div.alert {
|
|||||||
@apply bg-semantic-success-1 border border-semantic-success-3 fg-semantic-success-3;
|
@apply bg-semantic-success-1 border border-semantic-success-3 fg-semantic-success-3;
|
||||||
}
|
}
|
||||||
|
|
||||||
& > button.dismiss-trigger {
|
&.transparent {
|
||||||
@apply relative top-0.5;
|
@apply bg-transparent border-none p-0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,16 +3,26 @@ import { Alert, AlertProps } from "@/src/components/Alert/Alert";
|
|||||||
import { expect, fn } from "storybook/test";
|
import { expect, fn } from "storybook/test";
|
||||||
import { StoryContext } from "@kachurun/storybook-solid-vite";
|
import { StoryContext } from "@kachurun/storybook-solid-vite";
|
||||||
|
|
||||||
|
const AlertExamples = (props: AlertProps) => (
|
||||||
|
<div class="grid w-fit grid-cols-2 gap-8">
|
||||||
|
<div class="w-72">
|
||||||
|
<Alert {...props} />
|
||||||
|
</div>
|
||||||
|
<div class="w-72">
|
||||||
|
<Alert {...props} size="s" />
|
||||||
|
</div>
|
||||||
|
<div class="w-72">
|
||||||
|
<Alert {...props} transparent />
|
||||||
|
</div>
|
||||||
|
<div class="w-72">
|
||||||
|
<Alert {...props} size="s" transparent />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
|
||||||
const meta: Meta<AlertProps> = {
|
const meta: Meta<AlertProps> = {
|
||||||
title: "Components/Alert",
|
title: "Components/Alert",
|
||||||
component: Alert,
|
component: AlertExamples,
|
||||||
decorators: [
|
|
||||||
(Story: StoryObj) => (
|
|
||||||
<div class="w-72">
|
|
||||||
<Story />
|
|
||||||
</div>
|
|
||||||
),
|
|
||||||
],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export default meta;
|
export default meta;
|
||||||
@@ -23,6 +33,7 @@ export const Info: Story = {
|
|||||||
args: {
|
args: {
|
||||||
type: "info",
|
type: "info",
|
||||||
title: "Headline",
|
title: "Headline",
|
||||||
|
onDismiss: undefined,
|
||||||
description:
|
description:
|
||||||
"Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua.",
|
"Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua.",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -7,40 +7,63 @@ import { Alert as KAlert } from "@kobalte/core/alert";
|
|||||||
import { Show } from "solid-js";
|
import { Show } from "solid-js";
|
||||||
|
|
||||||
export interface AlertProps {
|
export interface AlertProps {
|
||||||
type: "success" | "error" | "warning" | "info";
|
|
||||||
title: string;
|
|
||||||
description?: string;
|
|
||||||
icon?: IconVariant;
|
icon?: IconVariant;
|
||||||
|
type: "success" | "error" | "warning" | "info";
|
||||||
|
size?: "default" | "s";
|
||||||
|
title: string;
|
||||||
onDismiss?: () => void;
|
onDismiss?: () => void;
|
||||||
|
transparent?: boolean;
|
||||||
|
description?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export const Alert = (props: AlertProps) => (
|
export const Alert = (props: AlertProps) => {
|
||||||
<KAlert
|
const size = () => props.size || "default";
|
||||||
class={cx("alert", props.type, {
|
const titleSize = () => (size() == "default" ? "default" : "xs");
|
||||||
"has-icon": props.icon,
|
const bodySize = () => (size() == "default" ? "xs" : "xxs");
|
||||||
"has-dismiss": props.onDismiss,
|
const iconSize = () => (size() == "default" ? "1rem" : "0.75rem");
|
||||||
})}
|
|
||||||
>
|
return (
|
||||||
{props.icon && <Icon icon={props.icon} color="inherit" size="1rem" />}
|
<KAlert
|
||||||
<div class="content">
|
class={cx("alert", props.type, {
|
||||||
<Typography hierarchy="body" size="default" weight="bold" color="inherit">
|
"has-icon": props.icon,
|
||||||
{props.title}
|
"has-dismiss": props.onDismiss,
|
||||||
</Typography>
|
transparent: props.transparent,
|
||||||
<Show when={props.description}>
|
})}
|
||||||
<Typography hierarchy="body" size="xs" color="inherit">
|
>
|
||||||
{props.description}
|
{props.icon && (
|
||||||
|
<Icon icon={props.icon} color="inherit" size={iconSize()} />
|
||||||
|
)}
|
||||||
|
<div class="content">
|
||||||
|
<Typography
|
||||||
|
hierarchy="body"
|
||||||
|
family="condensed"
|
||||||
|
size={titleSize()}
|
||||||
|
weight="bold"
|
||||||
|
color="inherit"
|
||||||
|
>
|
||||||
|
{props.title}
|
||||||
</Typography>
|
</Typography>
|
||||||
</Show>
|
<Show when={props.description}>
|
||||||
</div>
|
<Typography
|
||||||
{props.onDismiss && (
|
hierarchy="body"
|
||||||
<Button
|
family="condensed"
|
||||||
name="dismiss-alert"
|
size={bodySize()}
|
||||||
class="dismiss-trigger"
|
color="inherit"
|
||||||
onClick={props.onDismiss}
|
>
|
||||||
aria-label={`Dismiss ${props.type} alert`}
|
{props.description}
|
||||||
>
|
</Typography>
|
||||||
<Icon icon="Close" color="primary" size="0.75rem" />
|
</Show>
|
||||||
</Button>
|
</div>
|
||||||
)}
|
{props.onDismiss && (
|
||||||
</KAlert>
|
<Button
|
||||||
);
|
name="dismiss-alert"
|
||||||
|
class="dismiss-trigger"
|
||||||
|
onClick={props.onDismiss}
|
||||||
|
aria-label={`Dismiss ${props.type} alert`}
|
||||||
|
>
|
||||||
|
<Icon icon="Close" color="primary" size="0.75rem" />
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</KAlert>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ export interface ButtonProps
|
|||||||
startIcon?: IconVariant;
|
startIcon?: IconVariant;
|
||||||
endIcon?: IconVariant;
|
endIcon?: IconVariant;
|
||||||
class?: string;
|
class?: string;
|
||||||
onAction?: Action;
|
loading?: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
const iconSizes: Record<Size, string> = {
|
const iconSizes: Record<Size, string> = {
|
||||||
@@ -40,31 +40,12 @@ export const Button = (props: ButtonProps) => {
|
|||||||
"startIcon",
|
"startIcon",
|
||||||
"endIcon",
|
"endIcon",
|
||||||
"class",
|
"class",
|
||||||
"onAction",
|
"loading",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
const size = local.size || "default";
|
const size = local.size || "default";
|
||||||
const hierarchy = local.hierarchy || "primary";
|
const hierarchy = local.hierarchy || "primary";
|
||||||
|
|
||||||
const [loading, setLoading] = createSignal(false);
|
|
||||||
|
|
||||||
const onClick = async () => {
|
|
||||||
if (!local.onAction) {
|
|
||||||
console.error("this should not be possible");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
setLoading(true);
|
|
||||||
|
|
||||||
try {
|
|
||||||
await local.onAction();
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Error while executing action", error);
|
|
||||||
}
|
|
||||||
|
|
||||||
setLoading(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
const iconSize = iconSizes[local.size || "default"];
|
const iconSize = iconSizes[local.size || "default"];
|
||||||
|
|
||||||
const loadingClass =
|
const loadingClass =
|
||||||
@@ -81,16 +62,19 @@ export const Button = (props: ButtonProps) => {
|
|||||||
hierarchy,
|
hierarchy,
|
||||||
{
|
{
|
||||||
icon: local.icon,
|
icon: local.icon,
|
||||||
loading: loading(),
|
loading: props.loading,
|
||||||
ghost: local.ghost,
|
ghost: local.ghost,
|
||||||
},
|
},
|
||||||
)}
|
)}
|
||||||
onClick={local.onAction ? onClick : undefined}
|
onClick={props.onClick}
|
||||||
{...other}
|
{...other}
|
||||||
>
|
>
|
||||||
<Loader
|
<Loader
|
||||||
hierarchy={hierarchy}
|
hierarchy={hierarchy}
|
||||||
class={cx({ [idleClass]: !loading(), [loadingClass]: loading() })}
|
class={cx({
|
||||||
|
[idleClass]: !props.loading,
|
||||||
|
[loadingClass]: props.loading,
|
||||||
|
})}
|
||||||
/>
|
/>
|
||||||
|
|
||||||
{local.startIcon && (
|
{local.startIcon && (
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
hr {
|
hr {
|
||||||
@apply border-none outline-none bg-inv-2 self-stretch;
|
@apply border-none outline-none bg-def-3 self-stretch;
|
||||||
|
|
||||||
&.inverted {
|
&.inverted {
|
||||||
@apply bg-def-3;
|
@apply bg-inv-2;
|
||||||
}
|
}
|
||||||
|
|
||||||
&[data-orientation="horizontal"] {
|
&[data-orientation="horizontal"] {
|
||||||
|
|||||||
@@ -1,136 +0,0 @@
|
|||||||
import type { Meta, StoryContext, StoryObj } from "@kachurun/storybook-solid";
|
|
||||||
import cx from "classnames";
|
|
||||||
|
|
||||||
import { Combobox, ComboboxProps } from "./Combobox";
|
|
||||||
|
|
||||||
const ComboboxExamples = (props: ComboboxProps<string>) => (
|
|
||||||
<div class="flex flex-col gap-8">
|
|
||||||
<div class="flex flex-col gap-8 p-8">
|
|
||||||
<Combobox {...props} />
|
|
||||||
<Combobox {...props} size="s" />
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col gap-8 p-8 bg-inv-acc-3">
|
|
||||||
<Combobox {...props} inverted={true} />
|
|
||||||
<Combobox {...props} inverted={true} size="s" />
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col gap-8 p-8">
|
|
||||||
<Combobox {...props} orientation="horizontal" />
|
|
||||||
<Combobox {...props} orientation="horizontal" size="s" />
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col gap-8 p-8 bg-inv-acc-3">
|
|
||||||
<Combobox {...props} inverted={true} orientation="horizontal" />
|
|
||||||
<Combobox {...props} inverted={true} orientation="horizontal" size="s" />
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
|
|
||||||
const meta = {
|
|
||||||
title: "Components/Form/Combobox",
|
|
||||||
component: ComboboxExamples,
|
|
||||||
decorators: [
|
|
||||||
(Story: StoryObj, context: StoryContext<ComboboxProps<string>>) => {
|
|
||||||
return (
|
|
||||||
<div
|
|
||||||
class={cx({
|
|
||||||
"w-[600px]": (context.args.orientation || "vertical") == "vertical",
|
|
||||||
"w-[1024px]": context.args.orientation == "horizontal",
|
|
||||||
"bg-inv-acc-3": context.args.inverted,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<Story />
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
},
|
|
||||||
],
|
|
||||||
} satisfies Meta<ComboboxProps<string>>;
|
|
||||||
|
|
||||||
export default meta;
|
|
||||||
|
|
||||||
export type Story = StoryObj<typeof meta>;
|
|
||||||
|
|
||||||
export const Bare: Story = {
|
|
||||||
args: {
|
|
||||||
options: ["foo", "bar", "baz"],
|
|
||||||
defaultValue: "foo",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Label: Story = {
|
|
||||||
args: {
|
|
||||||
...Bare.args,
|
|
||||||
label: "DOB",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Description: Story = {
|
|
||||||
args: {
|
|
||||||
...Label.args,
|
|
||||||
description: "The date you were born",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Required: Story = {
|
|
||||||
args: {
|
|
||||||
...Description.args,
|
|
||||||
required: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Multiple: Story = {
|
|
||||||
args: {
|
|
||||||
...Description.args,
|
|
||||||
required: true,
|
|
||||||
multiple: true,
|
|
||||||
defaultValue: ["foo", "bar"],
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Tooltip: Story = {
|
|
||||||
args: {
|
|
||||||
...Required.args,
|
|
||||||
tooltip: "The day you came out of your momma",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Ghost: Story = {
|
|
||||||
args: {
|
|
||||||
...Tooltip.args,
|
|
||||||
ghost: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Invalid: Story = {
|
|
||||||
args: {
|
|
||||||
...Tooltip.args,
|
|
||||||
validationState: "invalid",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const Disabled: Story = {
|
|
||||||
args: {
|
|
||||||
...Tooltip.args,
|
|
||||||
disabled: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const MultipleDisabled: Story = {
|
|
||||||
args: {
|
|
||||||
...Multiple.args,
|
|
||||||
disabled: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const ReadOnly: Story = {
|
|
||||||
args: {
|
|
||||||
...Tooltip.args,
|
|
||||||
readOnly: true,
|
|
||||||
defaultValue: "foo",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
export const MultipleReadonly: Story = {
|
|
||||||
args: {
|
|
||||||
...Multiple.args,
|
|
||||||
readOnly: true,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
@@ -1,181 +0,0 @@
|
|||||||
import Icon from "@/src/components/Icon/Icon";
|
|
||||||
import {
|
|
||||||
Combobox as KCombobox,
|
|
||||||
ComboboxRootOptions as KComboboxRootOptions,
|
|
||||||
} from "@kobalte/core/combobox";
|
|
||||||
import { isFunction } from "@kobalte/utils";
|
|
||||||
|
|
||||||
import "./Combobox.css";
|
|
||||||
import { CollectionNode } from "@kobalte/core";
|
|
||||||
import { Label } from "./Label";
|
|
||||||
import cx from "classnames";
|
|
||||||
import { FieldProps } from "./Field";
|
|
||||||
import { Orienter } from "./Orienter";
|
|
||||||
import { Typography } from "@/src/components/Typography/Typography";
|
|
||||||
import { Accessor, Component, For, Show, splitProps } from "solid-js";
|
|
||||||
import { Tag } from "@/src/components/Tag/Tag";
|
|
||||||
|
|
||||||
export type ComboboxProps<Option, OptGroup = never> = FieldProps &
|
|
||||||
KComboboxRootOptions<Option, OptGroup> & {
|
|
||||||
inverted: boolean;
|
|
||||||
itemControl?: Component<ComboboxControlState<Option>>;
|
|
||||||
};
|
|
||||||
|
|
||||||
export const DefaultItemComponent = <Option,>(
|
|
||||||
props: ComboboxItemComponentProps<Option>,
|
|
||||||
) => {
|
|
||||||
return (
|
|
||||||
<ComboboxItem item={props.item} class="item">
|
|
||||||
<ComboboxItemLabel>
|
|
||||||
<Typography hierarchy="body" size="xs" weight="bold">
|
|
||||||
{props.item.textValue}
|
|
||||||
</Typography>
|
|
||||||
</ComboboxItemLabel>
|
|
||||||
<ComboboxItemIndicator class="item-indicator">
|
|
||||||
<Icon icon="Checkmark" />
|
|
||||||
</ComboboxItemIndicator>
|
|
||||||
</ComboboxItem>
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
// adapted from https://github.com/kobaltedev/kobalte/blob/98a4810903c0c425d28cef4f0d1984192a225788/packages/core/src/combobox/combobox-base.tsx#L439
|
|
||||||
const getOptionTextValue = <Option,>(
|
|
||||||
option: Option,
|
|
||||||
optionTextValue:
|
|
||||||
| keyof Exclude<Option, null>
|
|
||||||
| ((option: Exclude<Option, null>) => string)
|
|
||||||
| undefined,
|
|
||||||
) => {
|
|
||||||
if (optionTextValue == null) {
|
|
||||||
// If no `optionTextValue`, the option itself is the label (ex: string[] of options).
|
|
||||||
return String(option);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the label from the option object as a string.
|
|
||||||
return String(
|
|
||||||
isFunction(optionTextValue)
|
|
||||||
? optionTextValue(option as never)
|
|
||||||
: (option as never)[optionTextValue],
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
export const DefaultItemControl = <Option,>(
|
|
||||||
props: ComboboxControlState<Option>,
|
|
||||||
) => (
|
|
||||||
<>
|
|
||||||
<Show when={props.multiple}>
|
|
||||||
<div class="selected-options">
|
|
||||||
<For each={props.selectedOptions()}>
|
|
||||||
{(option) => (
|
|
||||||
<Tag
|
|
||||||
inverted={props.inverted}
|
|
||||||
label={getOptionTextValue<Option>(option, props.optionTextValue)}
|
|
||||||
action={
|
|
||||||
props.disabled || props.readOnly
|
|
||||||
? undefined
|
|
||||||
: {
|
|
||||||
icon: "Close",
|
|
||||||
onClick: () => props.remove(option),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</For>
|
|
||||||
</div>
|
|
||||||
</Show>
|
|
||||||
{!(props.readOnly && props.multiple) && (
|
|
||||||
<div class="input-container">
|
|
||||||
<KCombobox.Input />
|
|
||||||
{!props.readOnly && (
|
|
||||||
<KCombobox.Trigger class="trigger">
|
|
||||||
<KCombobox.Icon class="icon">
|
|
||||||
<Icon icon="Expand" inverted={props.inverted} size="100%" />
|
|
||||||
</KCombobox.Icon>
|
|
||||||
</KCombobox.Trigger>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
|
|
||||||
// todo aria-label on combobox.control and combobox.input
|
|
||||||
export const Combobox = <Option, OptGroup = never>(
|
|
||||||
props: ComboboxProps<Option, OptGroup>,
|
|
||||||
) => {
|
|
||||||
const itemControl = () => props.itemControl || DefaultItemControl;
|
|
||||||
const itemComponent = () => props.itemComponent || DefaultItemComponent;
|
|
||||||
|
|
||||||
const align = () => {
|
|
||||||
if (props.readOnly) {
|
|
||||||
return "center";
|
|
||||||
} else {
|
|
||||||
return props.orientation === "horizontal" ? "start" : "center";
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
|
||||||
<KCombobox
|
|
||||||
class={cx("form-field", "combobox", props.size, props.orientation, {
|
|
||||||
inverted: props.inverted,
|
|
||||||
ghost: props.ghost,
|
|
||||||
})}
|
|
||||||
{...props}
|
|
||||||
itemComponent={itemComponent()}
|
|
||||||
>
|
|
||||||
<Orienter orientation={props.orientation} align={align()}>
|
|
||||||
<Label
|
|
||||||
labelComponent={KCombobox.Label}
|
|
||||||
descriptionComponent={KCombobox.Description}
|
|
||||||
{...props}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<KCombobox.Control<Option> class="control">
|
|
||||||
{(state) => {
|
|
||||||
const [controlProps] = splitProps(props, [
|
|
||||||
"inverted",
|
|
||||||
"multiple",
|
|
||||||
"readOnly",
|
|
||||||
"disabled",
|
|
||||||
]);
|
|
||||||
return itemControl()({ ...state, ...controlProps });
|
|
||||||
}}
|
|
||||||
</KCombobox.Control>
|
|
||||||
|
|
||||||
<KCombobox.Portal>
|
|
||||||
<KCombobox.Content class="combobox-content">
|
|
||||||
<KCombobox.Listbox class="listbox" />
|
|
||||||
</KCombobox.Content>
|
|
||||||
</KCombobox.Portal>
|
|
||||||
</Orienter>
|
|
||||||
</KCombobox>
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
// todo can we replicate the . notation that Kobalte achieves with their type definitions?
|
|
||||||
export const ComboboxItem = KCombobox.Item;
|
|
||||||
export const ComboboxItemDescription = KCombobox.ItemDescription;
|
|
||||||
export const ComboboxItemIndicator = KCombobox.ItemIndicator;
|
|
||||||
export const ComboboxItemLabel = KCombobox.ItemLabel;
|
|
||||||
|
|
||||||
// these interfaces were not exported, so we re-declare them
|
|
||||||
export interface ComboboxItemComponentProps<Option> {
|
|
||||||
/** The item to render. */
|
|
||||||
item: CollectionNode<Option>;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ComboboxSectionComponentProps<OptGroup> {
|
|
||||||
/** The section to render. */
|
|
||||||
section: CollectionNode<OptGroup>;
|
|
||||||
}
|
|
||||||
|
|
||||||
type ComboboxControlState<Option> = Pick<
|
|
||||||
ComboboxProps<Option>,
|
|
||||||
"optionTextValue" | "inverted" | "multiple" | "size" | "readOnly" | "disabled"
|
|
||||||
> & {
|
|
||||||
/** The selected options. */
|
|
||||||
selectedOptions: Accessor<Option[]>;
|
|
||||||
/** A function to remove an option from the selection. */
|
|
||||||
remove: (option: Option) => void;
|
|
||||||
/** A function to clear the selection. */
|
|
||||||
clear: () => void;
|
|
||||||
};
|
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
div.form-field.combobox {
|
div.form-field.machine-tags {
|
||||||
div.control {
|
div.control {
|
||||||
@apply flex flex-col size-full gap-2;
|
@apply flex flex-col size-full gap-2;
|
||||||
|
|
||||||
div.selected-options {
|
div.selected-options {
|
||||||
@apply flex flex-wrap gap-1 size-full min-h-5;
|
@apply flex flex-wrap gap-2 size-full min-h-5;
|
||||||
}
|
}
|
||||||
|
|
||||||
div.input-container {
|
div.input-container {
|
||||||
@@ -137,14 +137,14 @@ div.form-field.combobox {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
div.combobox-content {
|
div.machine-tags-content {
|
||||||
@apply rounded-sm bg-def-1 border border-def-2;
|
@apply rounded-sm bg-def-1 border border-def-2 z-10;
|
||||||
|
|
||||||
transform-origin: var(--kb-combobox-content-transform-origin);
|
transform-origin: var(--kb-combobox-content-transform-origin);
|
||||||
animation: comboboxContentHide 250ms ease-in forwards;
|
animation: machineTagsContentHide 250ms ease-in forwards;
|
||||||
|
|
||||||
&[data-expanded] {
|
&[data-expanded] {
|
||||||
animation: comboboxContentShow 250ms ease-out;
|
animation: machineTagsContentShow 250ms ease-out;
|
||||||
}
|
}
|
||||||
|
|
||||||
& > ul.listbox {
|
& > ul.listbox {
|
||||||
@@ -186,7 +186,7 @@ div.combobox-content {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
div.combobox-control {
|
div.machine-tags-control {
|
||||||
@apply flex flex-col w-full gap-2;
|
@apply flex flex-col w-full gap-2;
|
||||||
|
|
||||||
& > div.selected-options {
|
& > div.selected-options {
|
||||||
@@ -198,7 +198,7 @@ div.combobox-control {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@keyframes comboboxContentShow {
|
@keyframes machineTagsContentShow {
|
||||||
from {
|
from {
|
||||||
opacity: 0;
|
opacity: 0;
|
||||||
transform: translateY(-8px);
|
transform: translateY(-8px);
|
||||||
@@ -209,7 +209,7 @@ div.combobox-control {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@keyframes comboboxContentHide {
|
@keyframes machineTagsContentHide {
|
||||||
from {
|
from {
|
||||||
opacity: 1;
|
opacity: 1;
|
||||||
transform: translateY(0);
|
transform: translateY(0);
|
||||||
206
pkgs/clan-app/ui/src/components/Form/MachineTags.tsx
Normal file
206
pkgs/clan-app/ui/src/components/Form/MachineTags.tsx
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
import { Combobox } from "@kobalte/core/combobox";
|
||||||
|
import { FieldProps } from "./Field";
|
||||||
|
import { ComponentProps, createSignal, For, Show, splitProps } from "solid-js";
|
||||||
|
import Icon from "../Icon/Icon";
|
||||||
|
import cx from "classnames";
|
||||||
|
import { Typography } from "@/src/components/Typography/Typography";
|
||||||
|
import { Tag } from "@/src/components/Tag/Tag";
|
||||||
|
|
||||||
|
import "./MachineTags.css";
|
||||||
|
import { Label } from "@/src/components/Form/Label";
|
||||||
|
import { Orienter } from "@/src/components/Form/Orienter";
|
||||||
|
import { CollectionNode } from "@kobalte/core";
|
||||||
|
|
||||||
|
export interface MachineTag {
|
||||||
|
value: string;
|
||||||
|
disabled?: boolean;
|
||||||
|
new?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type MachineTagsProps = FieldProps & {
|
||||||
|
name: string;
|
||||||
|
input: ComponentProps<"select">;
|
||||||
|
readOnly?: boolean;
|
||||||
|
disabled?: boolean;
|
||||||
|
required?: boolean;
|
||||||
|
defaultValue?: string[];
|
||||||
|
defaultOptions?: string[];
|
||||||
|
readonlyOptions?: string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
const uniqueOptions = (options: MachineTag[]) => {
|
||||||
|
const record: Record<string, MachineTag> = {};
|
||||||
|
options.forEach((option) => {
|
||||||
|
// we want to preserve the first one we encounter
|
||||||
|
// this allows us to prefix the default 'all' tag
|
||||||
|
record[option.value] = record[option.value] || option;
|
||||||
|
});
|
||||||
|
return Object.values(record);
|
||||||
|
};
|
||||||
|
|
||||||
|
const sortedOptions = (options: MachineTag[]) =>
|
||||||
|
options.sort((a, b) => a.value.localeCompare(b.value));
|
||||||
|
|
||||||
|
const sortedAndUniqueOptions = (options: MachineTag[]) =>
|
||||||
|
sortedOptions(uniqueOptions(options));
|
||||||
|
|
||||||
|
// customises how each option is displayed in the dropdown
|
||||||
|
const ItemComponent = (props: { item: CollectionNode<MachineTag> }) => {
|
||||||
|
return (
|
||||||
|
<Combobox.Item item={props.item} class="item">
|
||||||
|
<Combobox.ItemLabel>
|
||||||
|
<Typography hierarchy="body" size="xs" weight="bold">
|
||||||
|
{props.item.textValue}
|
||||||
|
</Typography>
|
||||||
|
</Combobox.ItemLabel>
|
||||||
|
<Combobox.ItemIndicator class="item-indicator">
|
||||||
|
<Icon icon="Checkmark" />
|
||||||
|
</Combobox.ItemIndicator>
|
||||||
|
</Combobox.Item>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const MachineTags = (props: MachineTagsProps) => {
|
||||||
|
// convert default value string[] into MachineTag[]
|
||||||
|
const defaultValue = sortedAndUniqueOptions(
|
||||||
|
(props.defaultValue || []).map((value) => ({ value })),
|
||||||
|
);
|
||||||
|
|
||||||
|
// convert default options string[] into MachineTag[]
|
||||||
|
const [availableOptions, setAvailableOptions] = createSignal<MachineTag[]>(
|
||||||
|
sortedAndUniqueOptions([
|
||||||
|
...(props.readonlyOptions || []).map((value) => ({
|
||||||
|
value,
|
||||||
|
disabled: true,
|
||||||
|
})),
|
||||||
|
...(props.defaultOptions || []).map((value) => ({ value })),
|
||||||
|
]),
|
||||||
|
);
|
||||||
|
|
||||||
|
const onKeyDown = (event: KeyboardEvent) => {
|
||||||
|
// react when enter is pressed inside of the text input
|
||||||
|
if (event.key === "Enter") {
|
||||||
|
event.preventDefault();
|
||||||
|
event.stopPropagation();
|
||||||
|
|
||||||
|
// get the current input value, exiting early if it's empty
|
||||||
|
const input = event.currentTarget as HTMLInputElement;
|
||||||
|
if (input.value === "") return;
|
||||||
|
|
||||||
|
setAvailableOptions((options) => {
|
||||||
|
return options.map((option) => {
|
||||||
|
return {
|
||||||
|
...option,
|
||||||
|
new: undefined,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// reset the input value
|
||||||
|
input.value = "";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const align = () => {
|
||||||
|
if (props.readOnly) {
|
||||||
|
return "center";
|
||||||
|
} else {
|
||||||
|
return props.orientation === "horizontal" ? "start" : "center";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Combobox<MachineTag>
|
||||||
|
multiple
|
||||||
|
class={cx("form-field", "machine-tags", props.size, props.orientation, {
|
||||||
|
inverted: props.inverted,
|
||||||
|
ghost: props.ghost,
|
||||||
|
})}
|
||||||
|
{...splitProps(props, ["defaultValue"])[1]}
|
||||||
|
defaultValue={defaultValue}
|
||||||
|
options={availableOptions()}
|
||||||
|
optionValue="value"
|
||||||
|
optionTextValue="value"
|
||||||
|
optionLabel="value"
|
||||||
|
optionDisabled="disabled"
|
||||||
|
itemComponent={ItemComponent}
|
||||||
|
placeholder="Enter a tag name"
|
||||||
|
// triggerMode="focus"
|
||||||
|
removeOnBackspace={false}
|
||||||
|
defaultFilter={() => true}
|
||||||
|
onInput={(event) => {
|
||||||
|
const input = event.target as HTMLInputElement;
|
||||||
|
|
||||||
|
// as the user types in the input box, we maintain a "new" option
|
||||||
|
// in the list of available options
|
||||||
|
setAvailableOptions((options) => {
|
||||||
|
return [
|
||||||
|
// remove the old "new" entry
|
||||||
|
...options.filter((option) => !option.new),
|
||||||
|
// add the updated "new" entry
|
||||||
|
{ value: input.value, new: true },
|
||||||
|
];
|
||||||
|
});
|
||||||
|
}}
|
||||||
|
onBlur={() => {
|
||||||
|
// clear the in-progress "new" option from the list of available options
|
||||||
|
setAvailableOptions((options) => {
|
||||||
|
return options.filter((option) => !option.new);
|
||||||
|
});
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<Orienter orientation={props.orientation} align={align()}>
|
||||||
|
<Label
|
||||||
|
labelComponent={Combobox.Label}
|
||||||
|
descriptionComponent={Combobox.Description}
|
||||||
|
{...props}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<Combobox.HiddenSelect {...props.input} multiple />
|
||||||
|
|
||||||
|
<Combobox.Control<MachineTag> class="control">
|
||||||
|
{(state) => (
|
||||||
|
<div class="selected-options">
|
||||||
|
<For each={state.selectedOptions()}>
|
||||||
|
{(option) => (
|
||||||
|
<Tag
|
||||||
|
label={option.value}
|
||||||
|
inverted={props.inverted}
|
||||||
|
action={
|
||||||
|
option.disabled || props.disabled || props.readOnly
|
||||||
|
? undefined
|
||||||
|
: {
|
||||||
|
icon: "Close",
|
||||||
|
onClick: () => state.remove(option),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
</For>
|
||||||
|
<Show when={!props.readOnly}>
|
||||||
|
<div class="input-container">
|
||||||
|
<Combobox.Input onKeyDown={onKeyDown} />
|
||||||
|
<Combobox.Trigger class="trigger">
|
||||||
|
<Combobox.Icon class="icon">
|
||||||
|
<Icon
|
||||||
|
icon="Expand"
|
||||||
|
inverted={!props.inverted}
|
||||||
|
size="100%"
|
||||||
|
/>
|
||||||
|
</Combobox.Icon>
|
||||||
|
</Combobox.Trigger>
|
||||||
|
</div>
|
||||||
|
</Show>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</Combobox.Control>
|
||||||
|
</Orienter>
|
||||||
|
|
||||||
|
<Combobox.Portal>
|
||||||
|
<Combobox.Content class="machine-tags-content">
|
||||||
|
<Combobox.Listbox class="listbox" />
|
||||||
|
</Combobox.Content>
|
||||||
|
</Combobox.Portal>
|
||||||
|
</Combobox>
|
||||||
|
);
|
||||||
|
};
|
||||||
@@ -48,9 +48,6 @@ export const TextArea = (props: TextAreaProps) => {
|
|||||||
// Update the height
|
// Update the height
|
||||||
textareaRef.style.height = `${newHeight}px`;
|
textareaRef.style.height = `${newHeight}px`;
|
||||||
textareaRef.style.maxHeight = `${maxHeight}px`;
|
textareaRef.style.maxHeight = `${maxHeight}px`;
|
||||||
|
|
||||||
console.log("min/max height", minHeight, maxHeight);
|
|
||||||
console.log("textarea ref style", textareaRef.style);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Set up auto-resize effect
|
// Set up auto-resize effect
|
||||||
|
|||||||
29
pkgs/clan-app/ui/src/components/Form/index.tsx
Normal file
29
pkgs/clan-app/ui/src/components/Form/index.tsx
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
import { SuccessData } from "@/src/hooks/api";
|
||||||
|
import { Maybe } from "@modular-forms/solid";
|
||||||
|
|
||||||
|
export const tooltipText = (
|
||||||
|
name: string,
|
||||||
|
schema: SuccessData<"get_machine_fields_schema">,
|
||||||
|
staticValue: Maybe<string> = undefined,
|
||||||
|
): Maybe<string> => {
|
||||||
|
const entry = schema[name];
|
||||||
|
|
||||||
|
// return the static value if there is no field schema entry, or the entry
|
||||||
|
// indicates the field is writeable
|
||||||
|
if (!(entry && entry.readonly)) {
|
||||||
|
return staticValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const components: string[] = [];
|
||||||
|
|
||||||
|
if (staticValue) {
|
||||||
|
components.push(staticValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
components.push(`This field is read-only`);
|
||||||
|
if (entry.reason) {
|
||||||
|
components.push(entry.reason);
|
||||||
|
}
|
||||||
|
|
||||||
|
return components.join(". ");
|
||||||
|
};
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user