Compare commits

..

5 Commits

Author SHA1 Message Date
pinpox
998aaec269 wip 2025-07-22 17:39:50 +02:00
pinpox
77bb690b87 wip 2025-07-22 16:57:24 +02:00
pinpox
85e968c4f7 wip 2025-07-22 16:26:42 +02:00
pinpox
f84da1cf62 Add wait_for_file testing helper 2025-07-22 16:23:20 +02:00
pinpox
af8f4f00c2 add syncthing services 2025-07-22 16:00:01 +02:00
454 changed files with 6519 additions and 14589 deletions

View File

View File

@@ -0,0 +1,28 @@
name: "Update pinned clan-core for checks"
on:
repository_dispatch:
workflow_dispatch:
schedule:
- cron: "51 2 * * *"
jobs:
update-pinned-clan-core:
runs-on: nix
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Update clan-core for checks
run: nix run .#update-clan-core-for-checks
- name: Create pull request
env:
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
run: |
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
git commit -am "Update pinned clan-core for checks"
# Use shared PR creation script
export PR_BRANCH="update-clan-core-for-checks"
export PR_TITLE="Update Clan Core for Checks"
export PR_BODY="This PR updates the pinned clan-core flake input that is used for checks."
./.gitea/workflows/create-pr.sh

View File

@@ -24,7 +24,7 @@ If you're new to Clan and eager to dive in, start with our quickstart guide and
In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively:
- **Secrets Management**: Securely manage secrets by consulting [Vars](https://docs.clan.lol/concepts/generators/)<!-- [secrets.md](docs/site/concepts/generators.md) -->.
- **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/guides/getting-started/secrets/)<!-- [secrets.md](docs/site/guides/getting-started/secrets.md) -->.
### Contributing to Clan

View File

@@ -0,0 +1,210 @@
{ self, ... }:
{
clan.machines.test-backup = {
imports = [ self.nixosModules.test-backup ];
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
};
clan.inventory.services = {
borgbackup.test-backup = {
roles.client.machines = [ "test-backup" ];
roles.server.machines = [ "test-backup" ];
};
};
flake.nixosModules = {
test-backup =
{
pkgs,
lib,
...
}:
let
dependencies =
[
pkgs.stdenv.drvPath
]
++ builtins.map (i: i.outPath) (builtins.attrValues (builtins.removeAttrs self.inputs [ "self" ]));
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
imports = [
# Do not import inventory modules. They should be configured via 'clan.inventory'
#
# TODO: Configure localbackup via inventory
self.clanModules.localbackup
];
# Borgbackup overrides
services.borgbackup.repos.test-backups = {
path = "/var/lib/borgbackup/test-backups";
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
};
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
clan.core.networking.targetHost = "machine";
networking.hostName = "machine";
programs.ssh.knownHosts = {
machine.hostNames = [ "machine" ];
machine.publicKey = builtins.readFile ../assets/ssh/pubkey;
};
services.openssh = {
enable = true;
settings.UsePAM = false;
settings.UseDns = false;
hostKeys = [
{
path = "/root/.ssh/id_ed25519";
type = "ed25519";
}
];
};
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
# This is needed to unlock the user for sshd
# Because we use sshd without setuid binaries
users.users.borg.initialPassword = "hello";
systemd.tmpfiles.settings."vmsecrets" = {
"/root/.ssh/id_ed25519" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/ssh.id_ed25519" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.ssh" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
user = "root";
};
};
};
clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc.install-closure.source = "${closureInfo}/store-paths";
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
};
system.extraDependencies = dependencies;
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
clan.core.state.test-service = {
preBackupScript = ''
touch /var/test-service/pre-backup-command
'';
preRestoreScript = ''
touch /var/test-service/pre-restore-command
'';
postRestoreScript = ''
touch /var/test-service/post-restore-command
'';
folders = [ "/var/test-service" ];
};
fileSystems."/mnt/external-disk" = {
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
autoFormat = true;
fsType = "ext4";
options = [
"defaults"
"noauto"
];
};
clan.localbackup.targets.hdd = {
directory = "/mnt/external-disk";
preMountHook = ''
touch /run/mount-external-disk
'';
postUnmountHook = ''
touch /run/unmount-external-disk
'';
};
};
};
perSystem =
{ pkgs, ... }:
let
clanCore = self.checks.x86_64-linux.clan-core-for-checks;
in
{
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
nixos-test-backups = self.clanLib.test.containerTest {
name = "nixos-test-backups";
nodes.machine = {
imports =
[
self.nixosModules.clanCore
# Some custom overrides for the backup tests
self.nixosModules.test-backup
]
++
# import the inventory generated nixosModules
self.clan.clanInternals.inventoryClass.machines.test-backup.machineImports;
clan.core.settings.directory = ./.;
};
testScript = ''
import json
start_all()
# dummy data
machine.succeed("mkdir -p /var/test-backups /var/test-service")
machine.succeed("echo testing > /var/test-backups/somefile")
# create
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
machine.succeed("test -f /run/mount-external-disk")
machine.succeed("test -f /run/unmount-external-disk")
# list
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
print(out)
assert backup_id in out, f"backup {backup_id} not found in {out}"
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
assert localbackup_id in out, "localbackup not found in {out}"
## borgbackup restore
machine.succeed("rm -f /var/test-backups/somefile")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
machine.succeed("test -f /var/test-service/pre-backup-command")
## localbackup restore
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
machine.succeed("test -f /var/test-service/pre-backup-command")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -1,6 +1,6 @@
{ fetchgit }:
fetchgit {
url = "https://git.clan.lol/clan/clan-core.git";
rev = "5d884cecc2585a29b6a3596681839d081b4de192";
sha256 = "09is1afmncamavb2q88qac37vmsijxzsy1iz1vr6gsyjq2rixaxc";
rev = "eea93ea22c9818da67e148ba586277bab9e73cea";
sha256 = "sha256-PV0Z+97QuxQbkYSVuNIJwUNXMbHZG/vhsA9M4cDTCOE=";
}

View File

@@ -2,7 +2,6 @@
self,
lib,
inputs,
privateInputs ? { },
...
}:
let
@@ -20,29 +19,18 @@ let
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
in
{
imports =
let
clanCoreModulesDir = ../nixosModules/clanCore;
getClanCoreTestModules =
let
moduleNames = attrNames (builtins.readDir clanCoreModulesDir);
testPaths = map (
moduleName: clanCoreModulesDir + "/${moduleName}/tests/flake-module.nix"
) moduleNames;
in
filter pathExists testPaths;
in
getClanCoreTestModules
++ filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix
];
imports = filter pathExists [
./backups/flake-module.nix
../nixosModules/clanCore/machine-id/tests/flake-module.nix
../nixosModules/clanCore/state-version/tests/flake-module.nix
./devshell/flake-module.nix
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix
];
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
system:
let
@@ -100,6 +88,7 @@ in
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs;
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
@@ -157,12 +146,9 @@ in
'';
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
cp -r ${privateInputs.clan-core-for-checks} $out
chmod -R +w $out
cp -r ${pkgs.callPackage ./clan-core-for-checks.nix { }} $out
chmod +w $out/flake.lock
cp ${../flake.lock} $out/flake.lock
# Create marker file to disable private flake loading in tests
touch $out/.skip-private-inputs
'';
};
packages = lib.optionalAttrs (pkgs.stdenv.isLinux) {

View File

@@ -2,7 +2,6 @@
config,
self,
lib,
privateInputs,
...
}:
{
@@ -51,8 +50,7 @@
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
@@ -62,10 +60,6 @@
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
virtualisation.memorySize = 4096;
virtualisation.useNixStoreImage = true;
virtualisation.writableStore = true;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
@@ -84,8 +78,8 @@
start_all()
# Some distros like to automount disks with spaces
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
machine.succeed("clan flash write --debug --flake ${privateInputs.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdb && mount /dev/vdb "/mnt/with spaces"')
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
'';
} { inherit pkgs self; };
};

View File

@@ -1,7 +1,6 @@
{
self,
lib,
privateInputs,
...
}:
@@ -150,17 +149,17 @@
# vm-test-run-test-installation-> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
checks =
let
# Custom Python package for port management utilities
closureInfo = pkgs.closureInfo {
rootPaths = [
privateInputs.clan-core-for-checks
self.checks.x86_64-linux.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in
pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
@@ -208,7 +207,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${privateInputs.clan-core-for-checks}",
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
@@ -226,7 +225,7 @@
"install",
"--phases", "disko,install",
"--debug",
"--flake", str(flake_dir),
"--flake", flake_dir,
"--yes", "test-install-machine-without-system",
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
"-i", ssh_conn.ssh_key,
@@ -272,7 +271,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${privateInputs.clan-core-for-checks}",
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
@@ -290,6 +289,9 @@
assert not os.path.exists(hw_config_file), "hardware-configuration.nix should not exist initially"
assert not os.path.exists(facter_file), "facter.json should not exist initially"
# Set CLAN_FLAKE for the commands
os.environ["CLAN_FLAKE"] = flake_dir
# Test facter backend
clan_cmd = [
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",

View File

@@ -159,8 +159,7 @@ let
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in

View File

@@ -1,6 +1,5 @@
{
self,
privateInputs,
...
}:
{
@@ -36,8 +35,7 @@
pkgs.stdenv.drvPath
pkgs.stdenvNoCC
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
@@ -55,7 +53,7 @@
testScript = ''
start_all()
actual.fail("cat /etc/testfile")
actual.succeed("env CLAN_DIR=${privateInputs.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
actual.succeed("env CLAN_DIR=${self.checks.x86_64-linux.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
assert actual.succeed("cat /etc/testfile") == "morphed"
'';
} { inherit pkgs self; };

View File

@@ -0,0 +1,73 @@
({
name = "postgresql";
nodes.machine =
{ self, config, ... }:
{
imports = [
self.nixosModules.clanCore
self.clanModules.postgresql
self.clanModules.localbackup
];
clan.postgresql.users.test = { };
clan.postgresql.databases.test.create.options.OWNER = "test";
clan.postgresql.databases.test.restore.stopOnRestore = [ "sample-service" ];
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
clan.core.settings.directory = ./.;
systemd.services.sample-service = {
wantedBy = [ "multi-user.target" ];
script = ''
while true; do
echo "Hello, world!"
sleep 5
done
'';
};
environment.systemPackages = [ config.services.postgresql.package ];
};
testScript =
{ nodes, ... }:
''
start_all()
machine.wait_for_unit("postgresql")
machine.wait_for_unit("sample-service")
# Create a test table
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -c 'CREATE TABLE test (id serial PRIMARY KEY);' test")
machine.succeed("/run/current-system/sw/bin/localbackup-create >&2")
timestamp_before = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
machine.succeed("test -e /mnt/external-disk/snapshot.0/machine/var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'INSERT INTO test DEFAULT VALUES;'")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'DROP TABLE test;'")
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("rm -rf /var/backup/postgres")
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/backup/postgres/test /run/current-system/sw/bin/localbackup-restore >&2")
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("""
set -x
${nodes.machine.clan.core.state.test.postRestoreCommand}
""")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -l >&2")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
timestamp_after = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
assert timestamp_before < timestamp_after, f"{timestamp_before} >= {timestamp_after}: expected sample-service to be restarted after restore"
# Check that the table is still there
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'SELECT * FROM test;'")
output = machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql --csv -c \"SELECT datdba::regrole FROM pg_database WHERE datname = 'test'\"")
owner = output.split("\n")[1]
assert owner == "test", f"Expected database owner to be 'test', got '{owner}'"
# check if restore works if the database does not exist
machine.succeed("runuser -u postgres -- dropdb test")
machine.succeed("${nodes.machine.clan.core.state.test.postRestoreCommand}")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
'';
})

View File

@@ -29,11 +29,19 @@ nixosLib.runTest (
testScript =
{ nodes, ... }:
''
import subprocess
from nixos_test_lib.nix_setup import setup_nix_in_nix # type: ignore[import-untyped]
setup_nix_in_nix(None) # No closure info for this test
def run_clan(cmd: list[str], **kwargs) -> str:
import subprocess
clan = "${clan-core.packages.${hostPkgs.system}.clan-cli}/bin/clan"
clan_args = ["--flake", "${config.clan.test.flakeForSandbox}"]
return subprocess.run(
["${hostPkgs.util-linux}/bin/unshare", "--user", "--map-user", "1000", "--map-group", "1000", clan, *cmd, *clan_args],
**kwargs,
check=True,
).stdout
start_all()
admin1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("multi-user.target")
@@ -52,13 +60,7 @@ nixosLib.runTest (
# Check that the file is in the '0644' mode
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
# Run clan command
result = subprocess.run(
["${
clan-core.packages.${hostPkgs.system}.clan-cli
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
check=True
)
run_clan(["machines", "list"])
'';
}
)

View File

@@ -1,298 +0,0 @@
{ self, ... }:
{
# Machine for update test
clan.machines.test-update-machine = {
imports = [
self.nixosModules.test-update-machine
# Import the configuration file that will be created/updated during the test
./test-update-machine/configuration.nix
];
};
flake.nixosModules.test-update-machine =
{ lib, modulesPath, ... }:
{
imports = [
(modulesPath + "/testing/test-instrumentation.nix")
(modulesPath + "/profiles/qemu-guest.nix")
self.clanLib.test.minifyModule
../../lib/test/container-test-driver/nixos-module.nix
];
# Apply patch to fix x-initrd.mount filesystem handling in switch-to-configuration-ng
nixpkgs.overlays = [
(_final: prev: {
switch-to-configuration-ng = prev.switch-to-configuration-ng.overrideAttrs (old: {
patches = (old.patches or [ ]) ++ [ ./switch-to-configuration-initrd-mount-fix.patch ];
});
})
];
networking.hostName = "update-machine";
environment.etc."install-successful".text = "ok";
# Enable SSH and add authorized key for testing
services.openssh.enable = true;
services.openssh.settings.PasswordAuthentication = false;
users.users.root.openssh.authorizedKeys.keys = [ (builtins.readFile ../assets/ssh/pubkey) ];
services.openssh.knownHosts.localhost.publicKeyFile = ../assets/ssh/pubkey;
services.openssh.hostKeys = [
{
path = ../assets/ssh/privkey;
type = "ed25519";
}
];
security.sudo.wheelNeedsPassword = false;
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
boot.isContainer = true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
# Preserve the IP addresses assigned by the test framework
# (based on virtualisation.vlans = [1] and node number 1)
networking.interfaces.eth1 = {
useDHCP = false;
ipv4.addresses = [
{
address = "192.168.1.1";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2001:db8:1::1";
prefixLength = 64;
}
];
};
# Define the mounts that exist in the container to prevent them from being stopped
fileSystems = {
"/" = {
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
options = [ "x-initrd.mount" ];
};
"/nix/.rw-store" = {
device = "tmpfs";
fsType = "tmpfs";
options = [
"mode=0755"
];
};
"/nix/store" = {
device = "overlay";
fsType = "overlay";
options = [
"lowerdir=/nix/.ro-store"
"upperdir=/nix/.rw-store/upper"
"workdir=/nix/.rw-store/work"
];
};
};
};
perSystem =
{
pkgs,
...
}:
{
checks =
pkgs.lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system == "x86_64-linux")
{
nixos-test-update =
let
closureInfo = pkgs.closureInfo {
rootPaths = [
self.packages.${pkgs.system}.clan-cli
self.checks.${pkgs.system}.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in
self.clanLib.test.containerTest {
name = "update";
nodes.machine = {
imports = [ self.nixosModules.test-update-machine ];
};
extraPythonPackages = _p: [
self.legacyPackages.${pkgs.system}.nixosTestLib
];
testScript = ''
import tempfile
import os
import subprocess
from nixos_test_lib.ssh import setup_ssh_connection # type: ignore[import-untyped]
from nixos_test_lib.nix_setup import prepare_test_flake # type: ignore[import-untyped]
start_all()
machine.wait_for_unit("multi-user.target")
# Verify initial state
machine.succeed("test -f /etc/install-successful")
machine.fail("test -f /etc/update-successful")
# Set up test environment
with tempfile.TemporaryDirectory() as temp_dir:
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
# Set up SSH connection
ssh_conn = setup_ssh_connection(
machine,
temp_dir,
"${../assets/ssh/privkey}"
)
# Update the machine configuration to add a new file
machine_config_path = os.path.join(flake_dir, "machines", "test-update-machine", "configuration.nix")
os.makedirs(os.path.dirname(machine_config_path), exist_ok=True)
# Note: update command doesn't accept -i flag, SSH key must be in ssh-agent
# Start ssh-agent and add the key
agent_output = subprocess.check_output(["${pkgs.openssh}/bin/ssh-agent", "-s"], text=True)
for line in agent_output.splitlines():
if line.startswith("SSH_AUTH_SOCK="):
os.environ["SSH_AUTH_SOCK"] = line.split("=", 1)[1].split(";")[0]
elif line.startswith("SSH_AGENT_PID="):
os.environ["SSH_AGENT_PID"] = line.split("=", 1)[1].split(";")[0]
# Add the SSH key to the agent
subprocess.run(["${pkgs.openssh}/bin/ssh-add", ssh_conn.ssh_key], check=True)
##############
print("TEST: update with --build-host localhost --target-host localhost")
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."update-build-local-successful".text = "ok";
}
""")
# rsync the flake into the container
os.environ["PATH"] = f"{os.environ['PATH']}:${pkgs.openssh}/bin"
subprocess.run(
[
"${pkgs.rsync}/bin/rsync",
"-a",
"--delete",
"-e",
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
f"{str(flake_dir)}/",
f"root@192.168.1.1:/flake",
],
check=True
)
# install the clan-cli package into the container's Nix store
subprocess.run(
[
"${pkgs.nix}/bin/nix",
"copy",
"--to",
"ssh://root@192.168.1.1",
"--no-check-sigs",
f"${self.packages.${pkgs.system}.clan-cli}",
"--extra-experimental-features", "nix-command flakes",
"--from", f"{os.environ["TMPDIR"]}/store"
],
check=True,
env={
**os.environ,
"NIX_SSHOPTS": "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
},
)
# Run ssh on the host to run the clan update command via --build-host localhost
subprocess.run([
"ssh",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
f"root@192.168.1.1",
"${self.packages.${pkgs.system}.clan-cli}/bin/clan",
"machines",
"update",
"--debug",
"--flake", "/flake",
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"--build-host", "localhost",
"--target-host", "localhost",
"test-update-machine",
], check=True)
# Verify the update was successful
machine.succeed("test -f /etc/update-build-local-successful")
##############
print("TEST: update with --target-host")
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."target-host-update-successful".text = "ok";
}
""")
# Run clan update command
subprocess.run([
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",
"--flake", flake_dir,
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"test-update-machine",
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
], check=True)
# Verify the update was successful
machine.succeed("test -f /etc/target-host-update-successful")
##############
print("TEST: update with --build-host")
# Update configuration again
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."build-host-update-successful".text = "ok";
}
""")
# Run clan update command with --build-host
subprocess.run([
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",
"--flake", flake_dir,
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"--build-host", f"root@192.168.1.1:{ssh_conn.host_port}",
"test-update-machine",
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
], check=True)
# Verify the second update was successful
machine.succeed("test -f /etc/build-host-update-successful")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -1,17 +0,0 @@
diff --git a/src/main.rs b/src/main.rs
index 8baf5924a7db..1234567890ab 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1295,6 +1295,12 @@ won't take effect until you reboot the system.
for (mountpoint, current_filesystem) in current_filesystems {
// Use current version of systemctl binary before daemon is reexeced.
+
+ // Skip filesystem comparison if x-initrd.mount is present in options
+ if current_filesystem.options.contains("x-initrd.mount") {
+ continue;
+ }
+
let unit = path_to_unit_name(&current_system_bin, &mountpoint);
if let Some(new_filesystem) = new_filesystems.get(&mountpoint) {
if current_filesystem.fs_type != new_filesystem.fs_type

View File

@@ -1,3 +0,0 @@
{
# Initial empty configuration
}

View File

@@ -4,7 +4,7 @@ description = "Statically configure borgbackup with sane defaults."
!!! Danger "Deprecated"
Use [borgbackup](borgbackup.md) instead.
Don't use borgbackup-static through [inventory](../../concepts/inventory.md).
Don't use borgbackup-static through [inventory](../../guides/inventory.md).
This module implements the `borgbackup` backend and implements sane defaults
for backup management through `borgbackup` for members of the clan.

View File

@@ -112,124 +112,125 @@ in
'';
in
lib.mkIf (cfg.targets != { }) {
environment.systemPackages = [
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
${mountHook target}
echo "Creating backup '${target.name}'"
environment.systemPackages =
[
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
${mountHook target}
echo "Creating backup '${target.name}'"
${lib.optionalString (target.preBackupHook != null) ''
(
${target.preBackupHook}
)
''}
${lib.optionalString (target.preBackupHook != null) ''
(
${target.preBackupHook}
)
''}
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (builtins.attrValues config.clan.core.state)}
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (builtins.attrValues config.clan.core.state)}
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
'') (builtins.attrValues cfg.targets)}'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
(
${mountHook target}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
)
'') (builtins.attrValues cfg.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
if [[ "''${NAME:-}" == "" ]]; then
echo "No backup name given via NAME environment variable"
exit 1
fi
if [[ "''${FOLDERS:-}" == "" ]]; then
echo "No folders given via FOLDERS environment variable"
exit 1
fi
name=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$name::}
if command -v localbackup-mount-$name; then
localbackup-mount-$name
fi
if command -v localbackup-unmount-$name; then
trap "localbackup-unmount-$name" EXIT
fi
if [[ ! -d $backupname ]]; then
echo "No backup found $backupname"
exit 1
fi
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
for folder in "''${FOLDER[@]}"; do
mkdir -p "$folder"
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
]
++ (lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preMountHook != null) target.preMountHook}
${lib.optionalString (target.mountpoint != null) ''
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
'') (builtins.attrValues cfg.targets)}'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
(
${mountHook target}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
)
'') (builtins.attrValues cfg.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
if [[ "''${NAME:-}" == "" ]]; then
echo "No backup name given via NAME environment variable"
exit 1
fi
''}
${lib.optionalString (target.postMountHook != null) target.postMountHook}
''
) cfg.targets)
++ lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
${lib.optionalString (
target.mountpoint != null
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
''
) cfg.targets;
if [[ "''${FOLDERS:-}" == "" ]]; then
echo "No folders given via FOLDERS environment variable"
exit 1
fi
name=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$name::}
if command -v localbackup-mount-$name; then
localbackup-mount-$name
fi
if command -v localbackup-unmount-$name; then
trap "localbackup-unmount-$name" EXIT
fi
if [[ ! -d $backupname ]]; then
echo "No backup found $backupname"
exit 1
fi
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
for folder in "''${FOLDER[@]}"; do
mkdir -p "$folder"
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
]
++ (lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preMountHook != null) target.preMountHook}
${lib.optionalString (target.mountpoint != null) ''
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
fi
''}
${lib.optionalString (target.postMountHook != null) target.postMountHook}
''
) cfg.targets)
++ lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
${lib.optionalString (
target.mountpoint != null
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
''
) cfg.targets;
clan.core.backups.providers.localbackup = {
# TODO list needs to run locally or on the remote machine

View File

@@ -61,6 +61,7 @@ in
};
};
imports = [
../postgresql
(lib.mkRemovedOptionModule [
"clan"
"matrix-synapse"
@@ -105,56 +106,57 @@ in
};
};
clan.core.postgresql.enable = true;
clan.core.postgresql.users.matrix-synapse = { };
clan.core.postgresql.databases.matrix-synapse.create.options = {
clan.postgresql.users.matrix-synapse = { };
clan.postgresql.databases.matrix-synapse.create.options = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "matrix-synapse";
};
clan.core.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
clan.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
clan.core.vars.generators = {
"matrix-synapse" = {
files."synapse-registration_shared_secret" = { };
runtimeInputs = with pkgs; [
coreutils
pwgen
];
migrateFact = "matrix-synapse";
script = ''
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
'';
};
}
// lib.mapAttrs' (
name: user:
lib.nameValuePair "matrix-password-${user.name}" {
files."matrix-password-${user.name}" = { };
migrateFact = "matrix-password-${user.name}";
runtimeInputs = with pkgs; [ xkcdpass ];
script = ''
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
'';
clan.core.vars.generators =
{
"matrix-synapse" = {
files."synapse-registration_shared_secret" = { };
runtimeInputs = with pkgs; [
coreutils
pwgen
];
migrateFact = "matrix-synapse";
script = ''
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
'';
};
}
) cfg.users;
// lib.mapAttrs' (
name: user:
lib.nameValuePair "matrix-password-${user.name}" {
files."matrix-password-${user.name}" = { };
migrateFact = "matrix-password-${user.name}";
runtimeInputs = with pkgs; [ xkcdpass ];
script = ''
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
'';
}
) cfg.users;
systemd.services.matrix-synapse =
let
usersScript = ''
while ! ${pkgs.netcat}/bin/nc -z -v ::1 8008; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 1;
done
''
+ lib.concatMapStringsSep "\n" (user: ''
# only create user if it doesn't exist
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
'') (lib.attrValues cfg.users);
usersScript =
''
while ! ${pkgs.netcat}/bin/nc -z -v ::1 8008; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 1;
done
''
+ lib.concatMapStringsSep "\n" (user: ''
# only create user if it doesn't exist
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
'') (lib.attrValues cfg.users);
in
{
path = [ pkgs.curl ];

View File

@@ -1,9 +1,224 @@
{ lib, ... }:
{
imports = [
(lib.mkRemovedOptionModule [
"clan"
"postgresql"
] "The postgresql module has been migrated to a clan core option. Use clan.core.postgresql instead")
];
pkgs,
lib,
config,
...
}:
let
createDatabaseState =
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
compression = lib.optionalString (lib.versionAtLeast config.services.postgresql.package.version "16") "--compress=zstd";
in
{
folders = [ folder ];
preBackupScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
mkdir -p "${folder}"
runuser -u postgres -- pg_dump ${compression} --dbname=${db.name} -Fc -c > "${current}.tmp"
mv "${current}.tmp" ${current}
'';
postRestoreScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
systemctl stop ${lib.concatStringsSep " " db.restore.stopOnRestore}
trap "systemctl start ${lib.concatStringsSep " " db.restore.stopOnRestore}" EXIT
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
'';
};
createDatabase = db: ''
CREATE DATABASE "${db.name}" ${
lib.concatStringsSep " " (
lib.mapAttrsToList (name: value: "${name} = '${value}'") db.create.options
)
}
'';
cfg = config.clan.postgresql;
userClauses = lib.mapAttrsToList (
_: user:
''$PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"' ''
) cfg.users;
databaseClauses = lib.mapAttrsToList (
name: db:
lib.optionalString db.create.enable ''$PSQL -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${name}'" | grep -q 1 || $PSQL -d postgres -c ${lib.escapeShellArg (createDatabase db)} ''
) cfg.databases;
in
{
options.clan.postgresql = {
# we are reimplemeting ensureDatabase and ensureUser options here to allow to create databases with options
databases = lib.mkOption {
description = "Databases to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "Database name.";
};
service = lib.mkOption {
type = lib.types.str;
default = name;
description = "Service name that we associate with the database.";
};
# set to false, in case the upstream module uses ensureDatabase option
create.enable = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Create the database if it does not exist.";
};
create.options = lib.mkOption {
description = "Options to pass to the CREATE DATABASE command.";
type = lib.types.lazyAttrsOf lib.types.str;
default = { };
example = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "foo";
};
};
restore.stopOnRestore = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = "List of systemd services to stop before restoring the database.";
};
};
}
)
);
};
users = lib.mkOption {
description = "Users to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options.name = lib.mkOption {
description = "User name";
type = lib.types.str;
default = name;
};
}
)
);
};
};
config = {
services.postgresql.settings = {
wal_level = "replica";
max_wal_senders = 3;
};
services.postgresql.enable = true;
# We are duplicating a bit the upstream module but allow to create databases with options
systemd.services.postgresql.postStart = ''
PSQL="psql --port=${builtins.toString config.services.postgresql.settings.port}"
while ! $PSQL -d postgres -c "" 2> /dev/null; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 0.1
done
${lib.concatStringsSep "\n" userClauses}
${lib.concatStringsSep "\n" databaseClauses}
'';
clan.core.state = lib.mapAttrs' (
_: db: lib.nameValuePair db.service (createDatabaseState db)
) config.clan.postgresql.databases;
environment.systemPackages = builtins.map (
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
in
pkgs.writeShellScriptBin "postgres-db-restore-command-${db.name}" ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
${lib.optionalString (db.restore.stopOnRestore != [ ]) ''
systemctl stop ${builtins.toString db.restore.stopOnRestore}
trap "systemctl start ${builtins.toString db.restore.stopOnRestore}" EXIT
''}
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
''
) (builtins.attrValues config.clan.postgresql.databases);
};
}

View File

@@ -12,7 +12,7 @@ After the system was installed/deployed the following command can be used to dis
clan vars get [machine_name] root-password/root-password
```
See also: [Vars](../../concepts/generators.md)
See also: [Vars](../../guides/vars-backend.md)
To regenerate the password run:
```

View File

@@ -18,12 +18,13 @@
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash = {
neededFor = "users";
}
// (lib.optionalAttrs (_class == "nixos") {
restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
});
files.password-hash =
{
neededFor = "users";
}
// (lib.optionalAttrs (_class == "nixos") {
restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
});
files.password = {
deploy = false;
};

View File

@@ -32,16 +32,17 @@ in
cfg.certificate.searchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys = [
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional cfg.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
hostKeys =
[
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional cfg.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
clan.core.vars.generators.openssh = {
@@ -61,8 +62,7 @@ in
hostNames = [
"localhost"
config.networking.hostName
]
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};

View File

@@ -1,27 +1,3 @@
---
description = "DEPRECATED: Statically configure syncthing peers through clan"
description = "Statically configure syncthing peers through clan"
---
# ⚠️ DEPRECATED
This module has been migrated to the new clanServices system.
Please use the new syncthing service instead:
```nix
{
services.syncthing = {
instances.default = {
roles.peer.machines = {
machine1 = { };
machine2 = { };
machine3 = {
excludeMachines = [ "machine4" ];
};
};
};
};
}
```
The new service provides the same functionality with better integration into clan's inventory system.

View File

@@ -4,8 +4,6 @@
pkgs,
...
}:
# DEPRECATED: This module has been migrated to clanServices/syncthing
# Please use the syncthing service instead: services.syncthing.instances.default.roles.peer.machines = { ... };
let
dir = config.clan.core.settings.directory;
machineVarDir = "${dir}/vars/per-machine/";
@@ -34,15 +32,14 @@ let
value = {
name = machine;
id = (lib.removeSuffix "\n" (builtins.readFile (syncthingPublicKeyPath machine)));
addresses = [
"dynamic"
]
++ (
if (lib.elem machine networkIpMachines) then
[ "tcp://[${(lib.removeSuffix "\n" (builtins.readFile (zerotierIpMachinePath machine)))}]:22000" ]
else
[ ]
);
addresses =
[ "dynamic" ]
++ (
if (lib.elem machine networkIpMachines) then
[ "tcp://[${(lib.removeSuffix "\n" (builtins.readFile (zerotierIpMachinePath machine)))}]:22000" ]
else
[ ]
);
};
}) syncthingPublicKeyMachines;
in

View File

@@ -1,40 +0,0 @@
---
description = "A secure, file synchronization app for devices over networks, offering a private alternative to cloud services."
features = [ "inventory" ]
[constraints]
roles.introducer.min = 1
roles.introducer.max = 1
---
**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future.
## Usage
We recommend configuring this module as an sync-service through the provided options. Although it provides a Web GUI through which more usage scenarios are supported.
## Features
- **Private and Secure**: Syncthing uses TLS encryption to secure data transfer between devices, ensuring that only the intended devices can read your data.
- **Decentralized**: No central server is involved in the data transfer. Each device communicates directly with others.
- **Open Source**: The source code is openly available for audit and contribution, fostering trust and continuous improvement.
- **Cross-Platform**: Syncthing supports multiple platforms including Windows, macOS, Linux, BSD, and Android.
- **Real-time Synchronization**: Changes made to files are synchronized in real-time across all connected devices.
- **Web GUI**: It includes a user-friendly web interface for managing devices and configurations. (`127.0.0.1:8384`)
## Configuration
- **Share Folders**: Select folders to share with connected devices and configure permissions and synchronization parameters.
!!! info
Clan automatically discovers other devices. Automatic discovery requires one machine to be an [introducer](#clan.syncthing.introducer)
If that is not the case you can add the other device by its Device ID manually.
You can find and share Device IDs under the "Add Device" button in the Web GUI. (`127.0.0.1:8384`)
## Troubleshooting
- **Sync Conflicts**: Resolve synchronization conflicts manually by reviewing file versions and modification times in the Web GUI (`127.0.0.1:8384`).
## Support
- **Documentation**: Extensive documentation is available on the [Syncthing website](https://docs.syncthing.net/).

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/peer.nix ];
}

View File

@@ -1,6 +0,0 @@
{ ... }:
{
imports = [
../shared.nix
];
}

View File

@@ -1,21 +0,0 @@
{ config, lib, ... }:
let
instanceNames = builtins.attrNames config.clan.inventory.services.syncthing;
instanceName = builtins.head instanceNames;
instance = config.clan.inventory.services.syncthing.${instanceName};
introducer = builtins.head instance.roles.introducer.machines;
introducerId = "${config.clan.core.settings.directory}/vars/per-machine/${introducer}/syncthing/id/value";
in
{
imports = [
../shared.nix
];
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
if builtins.pathExists introducerId then
builtins.readFile introducerId
else
throw "${introducerId} does not exists. Please run `clan vars generate ${introducer}` to generate the introducer device id"
);
}

View File

@@ -16,7 +16,7 @@ After the system was installed/deployed the following command can be used to dis
clan vars get [machine_name] root-password/root-password
```
See also: [Vars](../../concepts/generators.md)
See also: [Vars](../../guides/vars-backend.md)
To regenerate the password run:
```

View File

@@ -10,6 +10,7 @@ in
{
imports = [
../postgresql
(lib.mkRemovedOptionModule [
"clan"
"vaultwarden"
@@ -56,17 +57,15 @@ in
config = {
clan.core.postgresql.enable = true;
clan.core.postgresql.users.vaultwarden = { };
clan.core.postgresql.databases.vaultwarden.create.options = {
clan.postgresql.users.vaultwarden = { };
clan.postgresql.databases.vaultwarden.create.options = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "vaultwarden";
};
clan.core.postgresql.databases.vaultwarden.restore.stopOnRestore = [ "vaultwarden" ];
clan.postgresql.databases.vaultwarden.restore.stopOnRestore = [ "vaultwarden" ];
services.nginx = {
enable = true;

View File

@@ -41,13 +41,25 @@
};
};
};
};
# We don't have a good way to specify dependencies between
# clanServices for now. When it get's implemtende, we should just
# use the ssh and users modules here.
imports = [
./ssh.nix
./root-password.nix
];
perInstance =
{ settings, ... }:
{
nixosModule =
{ ... }:
{
imports = [
# We don't have a good way to specify dependencies between
# clanServices for now. When it get's implemtende, we should just
# use the ssh and users modules here.
./ssh.nix
./root-password.nix
];
_module.args = { inherit settings; };
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
};
};
};
}

View File

@@ -1,55 +1,39 @@
# We don't have a way of specifying dependencies between clanServices for now.
# When it get's added this file should be removed and the users module used instead.
{
roles.default.perInstance =
{ ... }:
{
nixosModule =
{
config,
pkgs,
...
}:
{
config,
pkgs,
...
}:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash.neededFor = "users";
clan.core.vars.generators.root-password = {
files.password-hash.neededFor = "users";
files.password.deploy = false;
files.password.deploy = false;
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
prompts.password.display = {
group = "Root User";
label = "Password";
required = false;
helperText = ''
Your password will be encrypted and stored securely using the secret store you've configured.
'';
};
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "Leave empty to generate automatically";
script = ''
prompt_value="$(cat "$prompts"/password)"
if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/password
else
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
fi
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
'';
};
};
};
script = ''
prompt_value="$(cat "$prompts"/password)"
if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/password
else
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
fi
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
'';
};
}

View File

@@ -1,124 +1,115 @@
{
roles.default.perInstance =
{ settings, ... }:
{
nixosModule =
config,
pkgs,
lib,
settings,
...
}:
let
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
domains = stringSet settings.certificateSearchDomains;
in
{
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
settings.HostCertificate = lib.mkIf (
settings.certificateSearchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys =
[
{
config,
pkgs,
lib,
...
}:
let
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.rsaHostKey.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
domains = stringSet settings.certificateSearchDomains;
clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
migrateFact = "openssh";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
'';
};
in
{
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
'';
};
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
settings.HostCertificate = lib.mkIf (
settings.certificateSearchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys = [
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.rsaHostKey.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
migrateFact = "openssh";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
'';
};
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
]
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
'';
};
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
files."ssh.id_ed25519-cert.pub".secret = false;
dependencies = [
"openssh"
"openssh-ca"
];
validation = {
name = config.clan.core.settings.machine.name;
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
};
runtimeInputs = [
pkgs.openssh
pkgs.jq
];
script = ''
ssh-keygen \
-s $in/openssh-ca/id_ed25519 \
-I ${config.clan.core.settings.machine.name} \
-h \
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
$in/openssh/ssh.id_ed25519.pub
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
'';
};
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
share = true;
files.id_ed25519.deploy = false;
files."id_ed25519.pub" = {
deploy = false;
secret = false;
};
runtimeInputs = [
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
'';
};
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
certAuthority = true;
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
};
};
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
files."ssh.id_ed25519-cert.pub".secret = false;
dependencies = [
"openssh"
"openssh-ca"
];
validation = {
name = config.clan.core.settings.machine.name;
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
};
runtimeInputs = [
pkgs.openssh
pkgs.jq
];
script = ''
ssh-keygen \
-s $in/openssh-ca/id_ed25519 \
-I ${config.clan.core.settings.machine.name} \
-h \
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
$in/openssh/ssh.id_ed25519.pub
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
'';
};
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
share = true;
files.id_ed25519.deploy = false;
files."id_ed25519.pub" = {
deploy = false;
secret = false;
};
runtimeInputs = [
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
'';
};
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
certAuthority = true;
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
};
}

View File

@@ -1,59 +1,9 @@
## Usage
BorgBackup (short: Borg) gives you:
```nix
inventory.instances = {
borgbackup = {
module = {
name = "borgbackup";
input = "clan";
};
roles.client.machines."jon".settings = {
destinations."storagebox" = {
repo = "username@$hostname:/./borgbackup";
rsh = ''ssh -oPort=23 -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
};
};
roles.server.machines = { };
};
};
```
The input should be named according to your flake input. Jon is configured as a
client machine with a destination pointing to a Hetzner Storage Box.
## Overview
This guide explains how to set up and manage
[BorgBackup](https://borgbackup.readthedocs.io/) for secure, efficient backups
in a clan network. BorgBackup provides:
- Space efficient storage of backups with deduplication
- Secure, authenticated encryption
- Compression: lz4, zstd, zlib, lzma or none
- Mountable backups with FUSE
- Space efficient storage of backups.
- Secure, authenticated encryption.
- Compression: lz4, zstd, zlib, lzma or none.
- Mountable backups with FUSE.
- Easy installation on multiple platforms: Linux, macOS, BSD, …
- Free software (BSD license).
- Backed by a large and active open-source community.
## Roles
### 1. Client
Clients are machines that create and send backups to various destinations. Each
client can have multiple backup destinations configured.
### 2. Server
Servers act as backup repositories, receiving and storing backups from client
machines. They can be dedicated backup servers within your clan network.
## Backup destinations
This service allows you to perform backups to multiple `destinations`.
Destinations can be:
- **Local**: Local disk storage
- **Server**: Your own borgbackup server (using the `server` role)
- **Third-party services**: Such as Hetzner's Storage Box
For a more comprehensive guide on backups look into the guide section.

View File

@@ -0,0 +1,29 @@
{
lib,
config,
settings,
...
}:
{
services.data-mesher.initNetwork =
let
# for a given machine, read it's public key and remove any new lines
readHostKey =
machine:
let
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
in
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
in
{
enable = true;
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
tld = settings.network.tld;
hostTTL = settings.network.hostTTL;
# admin and signer host public keys
signingKeys = builtins.map readHostKey (builtins.attrNames settings.bootstrapNodes);
};
}

View File

@@ -5,15 +5,31 @@ let
{
options = {
bootstrapNodes = lib.mkOption {
type = lib.types.nullOr (lib.types.listOf lib.types.str);
type = lib.types.nullOr (lib.types.attrsOf lib.types.str);
# the default bootstrap nodes are any machines with the admin or signers role
# we iterate through those machines, determining an IP address for them based on their VPN
# currently only supports zerotier
# default = builtins.foldl' (
# urls: name:
# let
# ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
# in
# if builtins.pathExists ipPath then
# let
# ip = builtins.readFile ipPath;
# in
# urls ++ [ "[${ip}]:${builtins.toString settings.network.port}" ]
# else
# urls
# ) [ ] (dmLib.machines config).bootstrap;
description = ''
A list of bootstrap nodes that act as an initial gateway when joining
the cluster.
'';
example = [
"192.168.1.1:7946"
"192.168.1.2:7946"
];
example = {
"node1" = "192.168.1.1:7946";
"node2" = "192.168.1.2:7946";
};
};
network = {
@@ -39,59 +55,6 @@ let
};
};
};
mkBootstrapNodes =
{
config,
lib,
roles,
settings,
}:
lib.mkDefault (
builtins.foldl' (
urls: name:
let
ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
in
if builtins.pathExists ipPath then
let
ip = builtins.readFile ipPath;
in
urls ++ [ "[${ip}]:${builtins.toString settings.network.port}" ]
else
urls
) [ ] (builtins.attrNames ((roles.admin.machines or { }) // (roles.signer.machines or { })))
);
mkDmService = dmSettings: config: {
enable = true;
openFirewall = true;
settings = {
log_level = "warn";
state_dir = "/var/lib/data-mesher";
# read network id from vars
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
host = {
names = [ config.networking.hostName ];
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
};
cluster = {
port = dmSettings.network.port;
join_interval = "30s";
push_pull_interval = "30s";
interface = dmSettings.network.interface;
bootstrap_nodes = dmSettings.bootstrapNodes;
};
http.port = 7331;
http.interface = "lo";
};
};
in
{
_class = "clan.service";
@@ -104,9 +67,11 @@ in
interface =
{ lib, ... }:
{
imports = [ sharedInterface ];
options = {
network = {
tld = lib.mkOption {
type = lib.types.str;
@@ -124,117 +89,54 @@ in
};
};
perInstance =
{ settings, roles, ... }:
{
extendSettings,
roles,
lib,
...
}:
{
nixosModule =
{ config, ... }:
let
settings = extendSettings {
bootstrapNodes = mkBootstrapNodes {
inherit
config
lib
roles
settings
;
};
};
in
{
imports = [ ./shared.nix ];
services.data-mesher = (mkDmService settings config) // {
initNetwork =
let
# for a given machine, read it's public key and remove any new lines
readHostKey =
machine:
let
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
in
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
in
{
enable = true;
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
tld = settings.network.tld;
hostTTL = settings.network.hostTTL;
# admin and signer host public keys
signingKeys = builtins.map readHostKey (
builtins.attrNames ((roles.admin.machines or { }) // (roles.signer.machines or { }))
);
};
};
};
nixosModule = {
imports = [
./admin.nix
./shared.nix
];
_module.args = { inherit settings roles; };
};
};
};
roles.signer = {
interface = sharedInterface;
interface =
{ ... }:
{
imports = [ sharedInterface ];
};
perInstance =
{ settings, roles, ... }:
{
extendSettings,
lib,
roles,
...
}:
{
nixosModule =
{ config, ... }:
let
settings = extendSettings {
bootstrapNodes = mkBootstrapNodes {
inherit
config
lib
roles
settings
;
};
};
in
{
imports = [ ./shared.nix ];
services.data-mesher = (mkDmService settings config);
};
nixosModule = {
imports = [
./signer.nix
./shared.nix
];
_module.args = { inherit settings roles; };
};
};
};
roles.peer = {
interface = sharedInterface;
interface =
{ ... }:
{
imports = [ sharedInterface ];
};
perInstance =
{ settings, roles, ... }:
{
extendSettings,
lib,
roles,
...
}:
{
nixosModule =
{ config, ... }:
let
settings = extendSettings {
bootstrapNodes = mkBootstrapNodes {
inherit
config
lib
roles
settings
;
};
};
in
{
imports = [ ./shared.nix ];
services.data-mesher = (mkDmService settings config);
};
nixosModule = {
imports = [
./peer.nix
./shared.nix
];
_module.args = { inherit settings roles; };
};
};
};
}

View File

@@ -1,9 +1,39 @@
{
config,
settings,
...
}:
{
services.data-mesher = {
enable = true;
openFirewall = true;
settings = {
log_level = "warn";
state_dir = "/var/lib/data-mesher";
# read network id from vars
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
host = {
names = [ config.networking.hostName ];
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
};
cluster = {
port = settings.network.port;
join_interval = "30s";
push_pull_interval = "30s";
interface = settings.network.interface;
bootstrap_nodes = (builtins.attrValues settings.bootstrapNodes);
};
http.port = 7331;
http.interface = "lo";
};
};
# Generate host key.
clan.core.vars.generators.data-mesher-host-key = {
files =

View File

@@ -16,11 +16,11 @@
instances = {
data-mesher =
let
bootstrapNodes = [
"[2001:db8:1::1]:7946" # admin
"[2001:db8:1::2]:7946" # peer
# "2001:db8:1::3:7946" #signer
];
bootstrapNodes = {
admin = "[2001:db8:1::1]:7946";
peer = "[2001:db8:1::2]:7946";
# signer = "2001:db8:1::3:7946";
};
in
{
roles.peer.machines.peer.settings = {

View File

@@ -1,86 +0,0 @@
A Dynamic-DNS (DDNS) service continuously keeps one or more DNS records in sync with the current public IP address of your machine.
In *clan* this service is backed by [qdm12/ddns-updater](https://github.com/qdm12/ddns-updater).
> Info
> ddns-updater itself is **heavily opinionated and version-specific**. Whenever you need the exhaustive list of flags or
> provider-specific fields refer to its *versioned* documentation **not** the GitHub README
---
# 1. Configuration model
Internally ddns-updater consumes a single file named `config.json`.
A minimal configuration for the registrar *Namecheap* looks like:
```json
{
"settings": [
{
"provider": "namecheap",
"domain": "sub.example.com",
"password": "e5322165c1d74692bfa6d807100c0310"
}
]
}
```
Another example for *Porkbun*:
```json
{
"settings": [
{
"provider": "porkbun",
"domain": "domain.com",
"api_key": "sk1_…",
"secret_api_key": "pk1_…",
"ip_version": "ipv4",
"ipv6_suffix": ""
}
]
}
```
When you write a `clan.nix` the **common** fields (`provider`, `domain`, `period`, …) are already exposed as typed
*Nix options*.
Registrar-specific or very new keys can be passed through an open attribute set called **extraSettings**.
---
# 2. Full Porkbun example
Manage three records `@`, `home` and `test` of the domain
`jon.blog` and refresh them every 15 minutes:
```nix title="clan.nix" hl_lines="10-11"
inventory.instances = {
dyndns = {
roles.default.machines."jon" = { };
roles.default.settings = {
period = 15; # minutes
settings = {
"all-jon-blog" = {
provider = "porkbun";
domain = "jon.blog";
# (1) tell the secret-manager which key we are going to store
secret_field_name = "secret_api_key";
# everything below is copied verbatim into config.json
extraSettings = {
host = "@,home,test"; # (2) comma-separated list of sub-domains
ip_version = "ipv4";
ipv6_suffix = "";
api_key = "pk1_4bb2b231275a02fdc23b7e6f3552s01S213S"; # (3) public safe to commit
};
};
};
};
};
};
```
1. `secret_field_name` tells the *vars-generator* to store the entered secret under the specified JSON field name in the configuration.
2. ddns-updater allows multiple hosts by separating them with a comma.
3. The `api_key` above is *public*; the corresponding **private key** is retrieved through `secret_field_name`.

View File

@@ -1,277 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/dyndns";
manifest.description = "A dynamic DNS service to update domain IPs";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
interface =
{ lib, ... }:
{
options = {
server = {
enable = lib.mkEnableOption "dyndns webserver";
domain = lib.mkOption {
type = lib.types.str;
description = "Domain to serve the webservice on";
};
port = lib.mkOption {
type = lib.types.int;
default = 54805;
description = "Port to listen on";
};
acmeEmail = lib.mkOption {
type = lib.types.str;
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
};
period = lib.mkOption {
type = lib.types.int;
default = 5;
description = "Domain update period in minutes";
};
settings = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ ... }:
{
options = {
provider = lib.mkOption {
example = "namecheap";
type = lib.types.str;
description = "The dyndns provider to use";
};
domain = lib.mkOption {
type = lib.types.str;
example = "example.com";
description = "The top level domain to update.";
};
secret_field_name = lib.mkOption {
example = "api_key";
type = lib.types.enum [
"password"
"token"
"api_key"
"secret_api_key"
];
default = "password";
description = "The field name for the secret";
};
extraSettings = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
description = ''
Extra settings for the provider.
Provider specific settings: https://github.com/qdm12/ddns-updater#configuration
'';
};
};
}
)
);
default = { };
description = "Configuration for which domains to update";
};
};
};
perInstance =
{ settings, ... }:
{
nixosModule =
{
config,
lib,
pkgs,
...
}:
let
name = "dyndns";
cfg = settings;
# We dedup secrets if they have the same provider + base domain
secret_id = opt: "${name}-${opt.provider}-${opt.domain}";
secret_path =
opt: config.clan.core.vars.generators."${secret_id opt}".files."${secret_id opt}".path;
# We check that a secret has not been set in extraSettings.
extraSettingsSafe =
opt:
if (builtins.hasAttr opt.secret_field_name opt.extraSettings) then
throw "Please do not set ${opt.secret_field_name} in extraSettings, it is automatically set by the dyndns module."
else
opt.extraSettings;
service_config = {
settings = builtins.catAttrs "value" (
builtins.attrValues (
lib.mapAttrs (_: opt: {
value =
(extraSettingsSafe opt)
// {
domain = opt.domain;
provider = opt.provider;
}
// {
"${opt.secret_field_name}" = secret_id opt;
};
}) cfg.settings
)
);
};
secret_generator = _: opt: {
name = secret_id opt;
value = {
share = true;
migrateFact = "${secret_id opt}";
prompts.${secret_id opt} = {
type = "hidden";
persist = true;
};
};
};
in
{
imports = lib.optional cfg.server.enable (
lib.modules.importApply ./nginx.nix {
inherit config;
inherit settings;
inherit lib;
}
);
clan.core.vars.generators = lib.mkIf (cfg.settings != { }) (
lib.mapAttrs' secret_generator cfg.settings
);
users.groups.${name} = lib.mkIf (cfg.settings != { }) { };
users.users.${name} = lib.mkIf (cfg.settings != { }) {
group = name;
isSystemUser = true;
description = "User for ${name} service";
home = "/var/lib/${name}";
createHome = true;
};
services.nginx = lib.mkIf cfg.server.enable {
virtualHosts = {
"${cfg.server.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://localhost:${toString cfg.server.port}";
};
};
};
};
systemd.services.${name} = lib.mkIf (cfg.settings != { }) {
path = [ ];
description = "Dynamic DNS updater";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
MYCONFIG = "${builtins.toJSON service_config}";
SERVER_ENABLED = if cfg.server.enable then "yes" else "no";
PERIOD = "${toString cfg.period}m";
LISTENING_ADDRESS = ":${toString cfg.server.port}";
GODEBUG = "netdns=go"; # We need to set this untill this has been merged. https://github.com/NixOS/nixpkgs/pull/432758
};
serviceConfig =
let
pyscript =
pkgs.writers.writePython3Bin "generate_secret_config.py"
{
libraries = [ ];
doCheck = false;
}
''
import json
from pathlib import Path
import os
cred_dir = Path(os.getenv("CREDENTIALS_DIRECTORY"))
config_str = os.getenv("MYCONFIG")
def get_credential(name):
secret_p = cred_dir / name
with open(secret_p, 'r') as f:
return f.read().strip()
config = json.loads(config_str)
print(f"Config: {config}")
for attrset in config["settings"]:
if "password" in attrset:
attrset['password'] = get_credential(attrset['password'])
elif "token" in attrset:
attrset['token'] = get_credential(attrset['token'])
elif "secret_api_key" in attrset:
attrset['secret_api_key'] = get_credential(attrset['secret_api_key'])
elif "api_key" in attrset:
attrset['api_key'] = get_credential(attrset['api_key'])
else:
raise ValueError(f"Missing secret field in {attrset}")
# create directory data if it does not exist
data_dir = Path('data')
data_dir.mkdir(mode=0o770, exist_ok=True)
# Create a temporary config file
# with appropriate permissions
tmp_config_path = data_dir / '.config.json'
tmp_config_path.touch(mode=0o660, exist_ok=False)
# Write the config with secrets back
with open(tmp_config_path, 'w') as f:
f.write(json.dumps(config, indent=4))
# Move config into place
config_path = data_dir / 'config.json'
tmp_config_path.rename(config_path)
# Set file permissions to read
# and write only by the user and group
for file in data_dir.iterdir():
file.chmod(0o660)
'';
in
{
ExecStartPre = lib.getExe pyscript;
ExecStart = lib.getExe pkgs.ddns-updater;
LoadCredential = lib.mapAttrsToList (_: opt: "${secret_id opt}:${secret_path opt}") cfg.settings;
User = name;
Group = name;
NoNewPrivileges = true;
PrivateTmp = true;
ProtectSystem = "strict";
ReadOnlyPaths = "/";
PrivateDevices = "yes";
ProtectKernelModules = "yes";
ProtectKernelTunables = "yes";
WorkingDirectory = "/var/lib/${name}";
ReadWritePaths = [
"/proc/self"
"/var/lib/${name}"
];
Restart = "always";
RestartSec = 60;
};
};
};
};
};
}

View File

@@ -1,19 +0,0 @@
{ lib, ... }:
let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules = {
dyndns = module;
};
perSystem =
{ ... }:
{
clan.nixosTests.dyndns = {
imports = [ ./tests/vm/default.nix ];
clan.modules."@clan/dyndns" = module;
};
};
}

View File

@@ -1,50 +0,0 @@
{
config,
lib,
settings,
...
}:
{
security.acme.acceptTerms = true;
security.acme.defaults.email = settings.server.acmeEmail;
networking.firewall.allowedTCPPorts = [
443
80
];
services.nginx = {
enable = true;
statusPage = lib.mkDefault true;
recommendedBrotliSettings = lib.mkDefault true;
recommendedGzipSettings = lib.mkDefault true;
recommendedOptimisation = lib.mkDefault true;
recommendedProxySettings = lib.mkDefault true;
recommendedTlsSettings = lib.mkDefault true;
# Nginx sends all the access logs to /var/log/nginx/access.log by default.
# instead of going to the journal!
commonHttpConfig = "access_log syslog:server=unix:/dev/log;";
resolver.addresses =
let
isIPv6 = addr: builtins.match ".*:.*:.*" addr != null;
escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr;
cloudflare = [
"1.1.1.1"
"2606:4700:4700::1111"
];
resolvers =
if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
in
map escapeIPv6 resolvers;
sslDhparam = config.security.dhparams.params.nginx.path;
};
security.dhparams = {
enable = true;
params.nginx = { };
};
}

View File

@@ -1,77 +0,0 @@
{
pkgs,
...
}:
{
name = "service-dyndns";
clan = {
directory = ./.;
inventory = {
machines.server = { };
instances = {
dyndns-test = {
module.name = "@clan/dyndns";
module.input = "self";
roles.default.machines."server".settings = {
server = {
enable = true;
domain = "test.example.com";
port = 54805;
acmeEmail = "test@example.com";
};
period = 1;
settings = {
"test.example.com" = {
provider = "namecheap";
domain = "example.com";
secret_field_name = "password";
extraSettings = {
host = "test";
server = "dynamicdns.park-your-domain.com";
};
};
};
};
};
};
};
};
nodes = {
server = {
# Disable firewall for testing
networking.firewall.enable = false;
# Mock ACME for testing (avoid real certificate requests)
security.acme.defaults.server = "https://localhost:14000/dir";
};
};
testScript = ''
start_all()
# Test that dyndns service starts (will fail without secrets, but that's expected)
server.wait_for_unit("multi-user.target")
# Test that nginx service is running
server.wait_for_unit("nginx.service")
# Test that nginx is listening on expected ports
server.wait_for_open_port(80)
server.wait_for_open_port(443)
# Test that the dyndns user was created
# server.succeed("getent passwd dyndns")
# server.succeed("getent group dyndns")
#
# Test that the home directory was created
server.succeed("test -d /var/lib/dyndns")
# Test that nginx configuration includes our domain
server.succeed("${pkgs.nginx}/bin/nginx -t")
print("All tests passed!")
'';
}

View File

@@ -1,9 +1,3 @@
# Example clan service. See https://docs.clan.lol/guides/services/community/
# for more details
# The test for this module in ./tests/vm/default.nix shows an example of how
# the service is used.
{ packages }:
{ ... }:
{
@@ -11,94 +5,30 @@
manifest.name = "clan-core/hello-word";
manifest.description = "This is a test";
# This service provides two roles: "morning" and "evening". Roles can be
# defined in this file directly (e.g. the "morning" role) or split up into a
# separate file (e.g. the "evening" role)
roles.morning = {
roles.peer = {
interface =
{ lib, ... }:
{
# Here we define the settings for this role. They will be accessible
# via `roles.morning.settings` in the role
options.greeting = lib.mkOption {
options.foo = lib.mkOption {
type = lib.types.str;
default = "Good morning";
description = "The greeting to use";
};
};
# Maps over all instances and produces one result per instance.
perInstance =
{
# Role settings for this machine/instance
settings,
# The name of this instance of the service
instanceName,
# The current machine
machine,
# All roles of this service, with their assigned machines
roles,
...
}:
{
# Analog to 'perSystem' of flake-parts.
# For every instance of this service we will add a nixosModule to a morning-machine
nixosModule =
{ config, ... }:
{
# Interaction examples what you could do here:
# - Get some settings of this machine
# settings.ipRanges
#
# - Get all evening names:
# allEveningNames = lib.attrNames roles.evening.machines
#
# - Get all roles of the machine:
# machine.roles
#
# - Get the settings that where applied to a specific evening machine:
# roles.evening.machines.peer1.settings
imports = [ ];
environment.etc.hello.text = "${settings.greeting} World!";
};
};
};
# The impnlementation of the evening role is in a separate file. We have kept
# the interface here, so we can see all settings of the service in one place,
# but you can also move it to the respective file
roles.evening = {
interface =
{ lib, ... }:
{
options.greeting = lib.mkOption {
type = lib.types.str;
default = "Good evening";
description = "The greeting to use";
# default = "";
description = "Some option";
};
};
};
imports = [ ./evening.nix ];
# This part gets applied to all machines, regardless of their role.
perMachine =
{ machine, ... }:
{
nixosModule =
{ pkgs, ... }:
{
environment.systemPackages = [
(pkgs.writeShellScriptBin "greet-world" ''
#!${pkgs.bash}/bin/bash
set -euo pipefail
cat /etc/hello
echo " I'm ${machine.name}"
'')
];
nixosModule = {
clan.core.vars.generators.hello = {
files.hello = {
secret = false;
};
script = ''
echo "Hello world from ${machine.name}" > $out/hello
'';
};
};
};
}

View File

@@ -1,12 +0,0 @@
{
roles.evening.perInstance =
{ settings, ... }:
{
nixosModule =
{ ... }:
{
imports = [ ];
environment.etc.hello.text = "${settings.greeting} World!";
};
};
}

View File

@@ -27,10 +27,20 @@ let
module.name = "hello-world";
module.input = "self";
roles.evening.machines.jon = { };
roles.peer.machines.jon = { };
};
};
};
# NOTE:
# If you wonder why 'self-zerotier-redux':
# A local module has prefix 'self', otherwise it is the name of the 'input'
# The rest is the name of the service as in the instance 'module.name';
#
# -> ${module.input}-${module.name}
# In this case it is 'self-zerotier-redux'
# This is usually only used internally, but we can use it to test the evaluation of service module in isolation
# evaluatedService =
# testFlake.clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.self-zerotier-redux.config;
in
{
test_simple = {

View File

@@ -5,35 +5,22 @@
directory = ./.;
inventory = {
machines.peer1 = { };
machines.peer2 = { };
instances."test" = {
module.name = "hello-service";
module.input = "self";
# Assign the roles to the two machines
roles.morning.machines.peer1 = { };
roles.evening.machines.peer2 = {
# Set roles settings for the peers, where we want to differ from
# the role defaults
settings = {
greeting = "Good night";
};
};
roles.peer.machines.peer1 = { };
};
};
};
testScript =
{ ... }:
{ nodes, ... }:
''
start_all()
value = peer1.succeed("greet-world")
assert value.strip() == "Good morning World! I'm peer1", value
value = peer2.succeed("greet-world")
assert value.strip() == "Good night World! I'm peer2", value
# peer1 should have the 'hello' file
value = peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.hello.files.hello.path}")
assert value.strip() == "Hello world from peer1", value
'';
}

View File

@@ -1,35 +0,0 @@
## Features
- Creates incremental snapshots using rsnapshot
- Supports multiple backup targets
- Mount/unmount hooks for external storage
- Pre/post backup hooks for custom scripts
- Configurable snapshot retention
- Automatic state folder detection
## Usage
Enable the localbackup service and configure backup targets:
```nix
instances = {
localbackup = {
module.name = "@clan/localbackup";
module.input = "self";
roles.default.machines."machine".settings = {
targets.external= {
directory = "/mnt/backup";
mountpoint = "/mnt/backup";
};
};
};
};
```
## Commands
The service provides these commands:
- `localbackup-create`: Create a new backup
- `localbackup-list`: List available backups
- `localbackup-restore`: Restore from backup (requires NAME and FOLDERS environment variables)

View File

@@ -1,267 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "localbackup";
manifest.description = "Automatically backups current machine to local directory.";
manifest.categories = [ "System" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
interface =
{ lib, ... }:
{
options = {
targets = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
default = name;
description = "the name of the backup job";
};
directory = lib.mkOption {
type = lib.types.str;
description = "the directory to backup";
};
mountpoint = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "mountpoint of the directory to backup. If set, the directory will be mounted before the backup and unmounted afterwards";
};
preMountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the directory is mounted";
};
postMountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the directory is mounted";
};
preUnmountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the directory is unmounted";
};
postUnmountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the directory is unmounted";
};
preBackupHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the backup";
};
postBackupHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the backup";
};
};
}
)
);
# default = { };
description = "List of directories where backups are stored";
};
snapshots = lib.mkOption {
type = lib.types.int;
default = 20;
description = "Number of snapshots to keep";
};
};
};
perInstance =
{
settings,
...
}:
{
nixosModule =
{
config,
lib,
pkgs,
...
}:
let
mountHook = target: ''
if [[ -x /run/current-system/sw/bin/localbackup-mount-${target.name} ]]; then
/run/current-system/sw/bin/localbackup-mount-${target.name}
fi
if [[ -x /run/current-system/sw/bin/localbackup-unmount-${target.name} ]]; then
trap "/run/current-system/sw/bin/localbackup-unmount-${target.name}" EXIT
fi
'';
uniqueFolders = lib.unique (
lib.flatten (lib.mapAttrsToList (_name: state: state.folders) config.clan.core.state)
);
rsnapshotConfig = target: ''
config_version 1.2
snapshot_root ${target.directory}
sync_first 1
cmd_cp ${pkgs.coreutils}/bin/cp
cmd_rm ${pkgs.coreutils}/bin/rm
cmd_rsync ${pkgs.rsync}/bin/rsync
cmd_ssh ${pkgs.openssh}/bin/ssh
cmd_logger ${pkgs.inetutils}/bin/logger
cmd_du ${pkgs.coreutils}/bin/du
cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff
${lib.optionalString (target.postBackupHook != null) ''
cmd_postexec ${pkgs.writeShellScript "postexec.sh" ''
set -efu -o pipefail
${target.postBackupHook}
''}
''}
retain snapshot ${builtins.toString settings.snapshots}
${lib.concatMapStringsSep "\n" (folder: ''
backup ${folder} ${config.networking.hostName}/
'') uniqueFolders}
'';
in
{
environment.systemPackages = [
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
${mountHook target}
echo "Creating backup '${target.name}'"
${lib.optionalString (target.preBackupHook != null) ''
(
${target.preBackupHook}
)
''}
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (builtins.attrValues config.clan.core.state)}
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
'') (builtins.attrValues settings.targets)}'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
(
${mountHook target}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
)
'') (builtins.attrValues settings.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
if [[ "''${NAME:-}" == "" ]]; then
echo "No backup name given via NAME environment variable"
exit 1
fi
if [[ "''${FOLDERS:-}" == "" ]]; then
echo "No folders given via FOLDERS environment variable"
exit 1
fi
name=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$name::}
if command -v localbackup-mount-$name; then
localbackup-mount-$name
fi
if command -v localbackup-unmount-$name; then
trap "localbackup-unmount-$name" EXIT
fi
if [[ ! -d $backupname ]]; then
echo "No backup found $backupname"
exit 1
fi
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
for folder in "''${FOLDER[@]}"; do
mkdir -p "$folder"
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
]
++ (lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preMountHook != null) target.preMountHook}
${lib.optionalString (target.mountpoint != null) ''
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
fi
''}
${lib.optionalString (target.postMountHook != null) target.postMountHook}
''
) settings.targets)
++ lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
${lib.optionalString (
target.mountpoint != null
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
''
) settings.targets;
clan.core.backups.providers.localbackup = {
# TODO list needs to run locally or on the remote machine
list = "localbackup-list";
create = "localbackup-create";
restore = "localbackup-restore";
};
};
};
};
}

View File

@@ -1,16 +0,0 @@
{ lib, ... }:
let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules.localbackup = module;
perSystem =
{ ... }:
{
clan.nixosTests.localbackup = {
imports = [ ./tests/vm/default.nix ];
clan.modules."@clan/localbackup" = module;
};
};
}

View File

@@ -1,62 +0,0 @@
{ ... }:
{
name = "service-localbackup";
clan = {
directory = ./.;
test.useContainers = true;
inventory = {
machines.machine = { };
instances = {
localbackup = {
module.name = "@clan/localbackup";
module.input = "self";
roles.default.machines."machine".settings = {
targets.hdd = {
directory = "/mnt/external-disk";
preMountHook = ''
touch /run/mount-external-disk
'';
postUnmountHook = ''
touch /run/unmount-external-disk
'';
};
};
};
};
};
};
nodes.machine = {
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
};
testScript = ''
import json
start_all()
machine.systemctl("start network-online.target")
machine.wait_for_unit("network-online.target")
# dummy data
machine.succeed("mkdir -p /var/test-backups")
machine.succeed("echo testing > /var/test-backups/somefile")
# create
machine.succeed("localbackup-create >&2")
machine.wait_until_succeeds("! systemctl is-active localbackup-job-serverone >&2")
# list
snapshot_list = machine.succeed("localbackup-list").strip()
assert json.loads(snapshot_list)[0]["name"].strip() == "hdd::/mnt/external-disk/snapshot.0"
# borgbackup restore
machine.succeed("rm -f /var/test-backups/somefile")
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/test-backups /run/current-system/sw/bin/localbackup-restore >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
'';
}

View File

@@ -184,24 +184,24 @@
settings.certificate.searchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys = [
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
hostKeys =
[
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
]
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
};

View File

@@ -1,20 +1,36 @@
---
description = "A secure, file synchronization app for devices over networks, offering a private alternative to cloud services."
---
**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future.
## Usage
```nix
{
instances.syncthing = {
roles.peer.tags.all = { };
roles.peer.settings.folders = {
documents = {
path = "~/syncthing/documents";
};
};
};
}
```
We recommend configuring this module as an sync-service through the provided options. Although it provides a Web GUI through which more usage scenarios are supported.
Now the folder `~/syncthing/documents` will be shared with all your machines.
## Features
- **Private and Secure**: Syncthing uses TLS encryption to secure data transfer between devices, ensuring that only the intended devices can read your data.
- **Decentralized**: No central server is involved in the data transfer. Each device communicates directly with others.
- **Open Source**: The source code is openly available for audit and contribution, fostering trust and continuous improvement.
- **Cross-Platform**: Syncthing supports multiple platforms including Windows, macOS, Linux, BSD, and Android.
- **Real-time Synchronization**: Changes made to files are synchronized in real-time across all connected devices.
- **Web GUI**: It includes a user-friendly web interface for managing devices and configurations. (`127.0.0.1:8384`)
## Documentation
Extensive documentation is available on the [Syncthing](https://docs.syncthing.net/) website.
## Configuration
- **Share Folders**: Select folders to share with connected devices and configure permissions and synchronization parameters.
!!! info
Clan automatically discovers other devices. Automatic discovery requires one machine to be an [introducer](#roles.introducer)
If that is not the case you can add the other device by its Device ID manually.
You can find and share Device IDs under the "Add Device" button in the Web GUI. (`127.0.0.1:8384`)
## Troubleshooting
- **Sync Conflicts**: Resolve synchronization conflicts manually by reviewing file versions and modification times in the Web GUI (`127.0.0.1:8384`).
## Support
- **Documentation**: Extensive documentation is available on the [Syncthing website](https://docs.syncthing.net/).

View File

@@ -1,243 +1,174 @@
{ ... }:
{
# lib,
# config,
# pkgs,
...
}:
{
_class = "clan.service";
manifest.name = "clan-core/syncthing";
manifest.description = "Syncthing is a continuous file synchronization program with automatic peer discovery";
manifest.categories = [
"Utility"
"System"
"Network"
];
manifest.readme = builtins.readFile ./README.md;
manifest.description = "A secure, file synchronization app for devices over networks";
roles.peer = {
roles.introducer = {
interface =
{ lib, ... }:
{
options.openDefaultPorts = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to open the default syncthing ports in the firewall.
'';
};
options = {
options.folders = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule {
options = {
path = lib.mkOption {
type = lib.types.str;
description = "Path to the folder to sync";
};
devices = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = "List of device names to share this folder with. Empty list means all peers and extraDevices.";
};
ignorePerms = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Ignore permission changes";
};
rescanIntervalS = lib.mkOption {
type = lib.types.int;
default = 3600;
description = "Rescan interval in seconds";
};
type = lib.mkOption {
type = lib.types.enum [
"sendreceive"
"sendonly"
"receiveonly"
];
default = "sendreceive";
description = "Folder type";
};
versioning = lib.mkOption {
type = lib.types.nullOr (
lib.types.submodule {
options = {
type = lib.mkOption {
type = lib.types.enum [
"external"
"simple"
"staggered"
"trashcan"
];
description = "Versioning type";
};
params = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
description = "Versioning parameters";
};
};
}
);
default = null;
description = "Versioning configuration";
};
};
}
);
default = { };
description = "Folders to synchronize between all peers";
};
options.extraDevices = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule {
options = {
id = lib.mkOption {
type = lib.types.str;
description = "Device ID of the external syncthing device";
example = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2";
};
addresses = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ "dynamic" ];
description = "List of addresses for the device";
example = [
"dynamic"
"tcp://192.168.1.100:22000"
];
};
name = lib.mkOption {
type = lib.types.str;
default = "";
description = "Human readable name for the device";
};
};
}
);
default = { };
description = "External syncthing devices not managed by clan (e.g., mobile phones)";
example = lib.literalExpression ''
{
phone = {
id = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2";
name = "My Phone";
addresses = [ "dynamic" ];
};
}
'';
# TODO is this option even needed or used anywhere?
id = lib.mkOption {
description = ''
The ID of the machine.
It is generated automatically by default.
'';
type = lib.types.nullOr lib.types.str;
example = "BABNJY4-G2ICDLF-QQEG7DD-N3OBNGF-BCCOFK6-MV3K7QJ-2WUZHXS-7DTW4AS";
# default = config.clan.core.vars.generators.syncthing.files."id".value;
defaultText = "config.clan.core.vars.generators.syncthing.files.\"id\".value";
};
introducer = lib.mkOption {
description = ''
The introducer for the machine.
'';
type = lib.types.nullOr lib.types.str;
default = null;
};
autoAcceptDevices = lib.mkOption {
description = ''
Auto accept incoming device requests.
Should only be used on the introducer.
'';
type = lib.types.bool;
default = false;
};
autoShares = lib.mkOption {
description = ''
Auto share the following Folders by their ID's with introduced devices.
Should only be used on the introducer.
'';
type = lib.types.listOf lib.types.str;
default = [ ];
example = [
"folder1"
"folder2"
];
};
};
};
perInstance =
{
settings,
# instanceName,
roles,
settings,
...
}:
{
nixosModule =
{
config,
lib,
...
}:
let
allPeerMachines = lib.attrNames roles.peer.machines;
readMachineVar =
machine: varPath: default:
let
fullPath = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/${varPath}";
in
if builtins.pathExists fullPath then
lib.removeSuffix "\n" (builtins.readFile fullPath)
else
default;
peerDevices = lib.listToAttrs (
lib.forEach allPeerMachines (machine: {
name = machine;
value = {
name = machine;
id = readMachineVar machine "syncthing/id/value" "";
addresses = [
"dynamic"
]
++
lib.optional (readMachineVar machine "zerotier/zerotier-ip/value" null != null)
"tcp://[${readMachineVar machine "zerotier/zerotier-ip/value" ""}]:22000";
};
})
);
extraDevicesConfig = lib.mapAttrs (deviceName: deviceConfig: {
inherit (deviceConfig) id addresses;
name = if deviceConfig.name != "" then deviceConfig.name else deviceName;
}) settings.extraDevices;
allDevices = peerDevices // extraDevicesConfig;
validDevices = lib.filterAttrs (_: device: device.id != "") allDevices;
syncthingFolders = lib.mapAttrs (
_folderName: folderConfig:
let
targetDevices =
if folderConfig.devices == [ ] then lib.attrNames validDevices else folderConfig.devices;
in
folderConfig
// {
devices = targetDevices;
}
) settings.folders;
in
{
_module.args = {
inherit settings roles;
introducerID = null;
};
imports = [
./vars.nix
];
config = lib.mkMerge [
{
services.syncthing = {
enable = true;
configDir = "/var/lib/syncthing";
group = "syncthing";
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null;
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null;
settings = {
devices = validDevices;
folders = syncthingFolders;
};
};
}
# Conditionally open firewall ports
(lib.mkIf settings.openDefaultPorts {
services.syncthing.openDefaultPorts = true;
# Syncthing ports: 8384 for remote access to GUI
# 22000 TCP and/or UDP for sync traffic
# 21027/UDP for discovery
# source: https://docs.syncthing.net/users/firewall.html
networking.firewall.interfaces."zt+".allowedTCPPorts = [
8384
22000
];
networking.firewall.interfaces."zt+".allowedUDPPorts = [
22000
21027
];
})
./shared.nix
];
};
};
};
perMachine = _: {
nixosModule =
roles.peer = {
interface =
{ lib, ... }:
{
# Activates inotify compatibility on syncthing
# use mkOverride 900 here as it otherwise would collide with the default of the
# upstream nixos xserver.nix
boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkOverride 900 524288;
options = {
# TODO is this option even needed or used anywhere?
id = lib.mkOption {
description = ''
The ID of the machine.
It is generated automatically by default.
'';
type = lib.types.nullOr lib.types.str;
example = "BABNJY4-G2ICDLF-QQEG7DD-N3OBNGF-BCCOFK6-MV3K7QJ-2WUZHXS-7DTW4AS";
# default = config.clan.core.vars.generators.syncthing.files."id".value;
defaultText = "config.clan.core.vars.generators.syncthing.files.\"id\".value";
};
introducer = lib.mkOption {
description = ''
The introducer for the machine.
'';
type = lib.types.nullOr lib.types.str;
default = null;
};
autoAcceptDevices = lib.mkOption {
description = ''
Auto accept incoming device requests.
Should only be used on the introducer.
'';
type = lib.types.bool;
default = false;
};
autoShares = lib.mkOption {
description = ''
Auto share the following Folders by their ID's with introduced devices.
Should only be used on the introducer.
'';
type = lib.types.listOf lib.types.str;
default = [ ];
example = [
"folder1"
"folder2"
];
};
};
};
perInstance =
{
# instanceName,
roles,
settings,
...
}:
{
nixosModule =
{
lib,
config,
...
}:
{
_module.args =
let
introducer = builtins.head (lib.attrNames roles.introducer.machines);
introducerIDPath =
if settings.introducer == null then
"${config.clan.core.settings.directory}/vars/per-machine/${introducer}/syncthing/id/value"
else
"${config.clan.core.settings.directory}/vars/per-machine/${settings.introducer}/syncthing/id/value";
introducerID = lib.strings.removeSuffix "\n" (
if builtins.pathExists introducerIDPath then
builtins.readFile introducerIDPath
else
throw "${introducerIDPath} does not exists. Please run `clan vars generate ${introducer}` to generate the introducer device id"
);
in
{
inherit settings roles introducerID;
};
imports = [
./shared.nix
];
};
};
};
}

View File

@@ -1,7 +1,6 @@
{
self,
lib,
inputs,
...
}:
let
@@ -10,45 +9,15 @@ let
};
in
{
clan.modules = {
syncthing = module;
};
clan.modules.syncthing = module;
perSystem =
let
unit-test-module = (
self.clanLib.test.flakeModules.makeEvalChecks {
inherit module;
inherit inputs;
fileset = lib.fileset.unions [
# The zerotier service being tested
../../clanServices/syncthing
# Required modules
../../nixosModules/clanCore
# Dependencies like clan-cli
../../pkgs/clan-cli
];
testName = "syncthing";
tests = ./tests/eval-tests.nix;
testArgs = { };
}
);
in
{ ... }:
{
imports = [
unit-test-module
];
/**
1. Prepare the test vars
nix run .#generate-test-vars -- clanServices/syncthing/tests/vm syncthing-service
2. To run the test
nix build .#checks.x86_64-linux.syncthing-service
*/
clan.nixosTests.syncthing-service = {
clan.nixosTests.syncthing = {
imports = [ ./tests/vm/default.nix ];
clan.modules.syncthing-service = module;
clan.modules."@clan/syncthing" = module;
};
};
}

View File

@@ -2,49 +2,11 @@
config,
pkgs,
lib,
settings,
introducerID,
...
}:
{
options.clan.syncthing = {
id = lib.mkOption {
description = ''
The ID of the machine.
It is generated automatically by default.
'';
type = lib.types.nullOr lib.types.str;
example = "BABNJY4-G2ICDLF-QQEG7DD-N3OBNGF-BCCOFK6-MV3K7QJ-2WUZHXS-7DTW4AS";
default = config.clan.core.vars.generators.syncthing.files."id".value;
defaultText = "config.clan.core.vars.generators.syncthing.files.\"id\".value";
};
introducer = lib.mkOption {
description = ''
The introducer for the machine.
'';
type = lib.types.nullOr lib.types.str;
default = null;
};
autoAcceptDevices = lib.mkOption {
description = ''
Auto accept incoming device requests.
Should only be used on the introducer.
'';
type = lib.types.bool;
default = false;
};
autoShares = lib.mkOption {
description = ''
Auto share the following Folders by their ID's with introduced devices.
Should only be used on the introducer.
'';
type = lib.types.listOf lib.types.str;
default = [ ];
example = [
"folder1"
"folder2"
];
};
};
imports = [
{
# Syncthing ports: 8384 for remote access to GUI
@@ -65,7 +27,7 @@
{
assertion = lib.all (
attr: builtins.hasAttr attr config.services.syncthing.settings.folders
) config.clan.syncthing.autoShares;
) settings.autoShares;
message = ''
Syncthing: If you want to AutoShare a folder, you need to have it configured on the sharing device.
'';
@@ -80,12 +42,8 @@
services.syncthing = {
enable = true;
overrideFolders = lib.mkDefault (
if (config.clan.syncthing.introducer == null) then true else false
);
overrideDevices = lib.mkDefault (
if (config.clan.syncthing.introducer == null) then true else false
);
overrideFolders = lib.mkDefault (if (introducerID == null) then true else false);
overrideDevices = lib.mkDefault (if (introducerID == null) then true else false);
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files."key".path or null;
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files."cert".path or null;
@@ -98,13 +56,13 @@
devices =
{ }
// (
if (config.clan.syncthing.introducer == null) then
if (introducerID == null) then
{ }
else
{
"${config.clan.syncthing.introducer}" = {
"${introducerID}" = {
name = "introducer";
id = config.clan.syncthing.introducer;
id = introducerID;
introducer = true;
autoAcceptFolders = true;
};
@@ -112,6 +70,7 @@
);
};
};
systemd.services.syncthing-auto-accept =
let
baseAddress = "127.0.0.1:8384";
@@ -120,7 +79,7 @@
SharedFolderById = "/rest/config/folders/";
apiKey = config.clan.core.vars.generators.syncthing.files."apikey".path;
in
lib.mkIf config.clan.syncthing.autoAcceptDevices {
lib.mkIf settings.autoAcceptDevices {
description = "Syncthing auto accept devices";
requisite = [ "syncthing.service" ];
after = [ "syncthing.service" ];
@@ -138,7 +97,7 @@
${lib.getExe pkgs.curl} -X POST -d "{\"deviceId\": $ID}" -H "Content-Type: application/json" -H "X-API-Key: $APIKEY" ${baseAddress}${postNewDevice}
# get all shared folders by their ID
for folder in ${builtins.toString config.clan.syncthing.autoShares}; do
for folder in ${builtins.toString settings.autoShares}; do
SHARED_IDS=$(${lib.getExe pkgs.curl} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder" | ${lib.getExe pkgs.jq} ."devices")
PATCHED_IDS=$(echo $SHARED_IDS | ${lib.getExe pkgs.jq} ".+= [{\"deviceID\": $ID, \"introducedBy\": \"\", \"encryptionPassword\": \"\"}]")
${lib.getExe pkgs.curl} -X PATCH -d "{\"devices\": $PATCHED_IDS}" -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder"
@@ -147,7 +106,7 @@
'';
};
systemd.timers.syncthing-auto-accept = lib.mkIf config.clan.syncthing.autoAcceptDevices {
systemd.timers.syncthing-auto-accept = lib.mkIf settings.autoAcceptDevices {
description = "Syncthing Auto Accept";
wantedBy = [ "syncthing-auto-accept.service" ];
@@ -162,7 +121,7 @@
let
apiKey = config.clan.core.vars.generators.syncthing.files."apikey".path;
in
lib.mkIf config.clan.syncthing.autoAcceptDevices {
lib.mkIf settings.autoAcceptDevices {
description = "Set the api key";
after = [ "syncthing-init.service" ];
wantedBy = [ "multi-user.target" ];
@@ -182,7 +141,6 @@
};
clan.core.vars.generators.syncthing = {
migrateFact = "syncthing";
files."key".group = config.services.syncthing.group;
files."key".owner = config.services.syncthing.user;

View File

@@ -1,62 +0,0 @@
{
module,
clanLib,
lib,
...
}:
let
testFlake =
(clanLib.clan {
self = { };
directory = ./vm;
machines.machine1 = {
nixpkgs.hostPlatform = "x86_64-linux";
};
machines.machine2 = {
nixpkgs.hostPlatform = "x86_64-linux";
};
machines.machine3 = {
nixpkgs.hostPlatform = "x86_64-linux";
};
machines.machine4 = {
nixpkgs.hostPlatform = "x86_64-linux";
};
modules.syncthing = module;
inventory.instances = {
default = {
module.name = "syncthing";
module.input = "self";
roles.peer.tags.all = { };
roles.peer.settings.extraDevices.phone = {
id = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2";
};
};
};
}).config;
in
{
test_machine1_peers = {
expr = {
devices = lib.attrNames testFlake.nixosConfigurations.machine1.config.services.syncthing.settings.devices;
machine4_ID =
testFlake.nixosConfigurations.machine1.config.services.syncthing.settings.devices.machine1.id;
externalPhoneId =
testFlake.nixosConfigurations.machine1.config.services.syncthing.settings.devices.phone.id;
};
expected = {
devices = [
"machine1"
"machine2"
"machine3"
"machine4"
"phone"
];
machine4_ID = "LJOGYGS-RQPWIHV-HD4B3GK-JZPVPK6-VI3IAY5-CWQWIXK-NJSQMFH-KXHOHA4";
externalPhoneId = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2";
};
};
}

View File

@@ -1,95 +1,99 @@
{
name = "service-syncthing-service";
name = "syncthing";
clan = {
test.useContainers = false;
directory = ./.;
test.useContainers = true;
inventory = {
machines.machine1 = { };
machines.machine2 = { };
machines.machine3 = { };
machines.machine4 = { };
instances.default = {
module.name = "syncthing-service";
machines.introducer = { };
machines.peer1 = { };
machines.peer2 = { };
instances."test" = {
module.name = "@clan/syncthing";
module.input = "self";
roles.peer.tags.all = { };
roles.peer.settings.folders = {
documents = {
path = "/var/lib/syncthing/documents";
type = "sendreceive";
};
partly_shared = {
devices = [
"machine1"
"machine4"
];
path = "~/music";
type = "sendreceive";
};
};
roles.introducer.machines.introducer.settings = {
autoAcceptDevices = true;
autoShares = [ "Shared" ];
};
roles.peer.machines.peer1 = { };
roles.peer.machines.peer2 = { };
};
instances.small = {
module.name = "syncthing-service";
module.input = "self";
roles.peer.machines = {
machine3 = { };
machine4 = { };
};
roles.peer.settings.folders = {
pictures = {
path = "~/pictures";
type = "sendreceive";
};
};
nodes = {
# peer2.console.keyMap = "colemak";
peer1.services.syncthing.openDefaultPorts = true;
peer2.services.syncthing.openDefaultPorts = true;
introducer = {
services.syncthing.openDefaultPorts = true;
# For faster Tests
systemd.timers.syncthing-auto-accept.timerConfig = {
OnActiveSec = 1;
OnUnitActiveSec = 1;
};
services.syncthing.settings.folders = {
"Shared" = {
enable = true;
path = "~/Shared";
versioning = {
type = "trashcan";
params = {
cleanoutDays = "30";
};
};
};
};
};
};
testScript =
{ ... }:
''
start_all()
testScript = ''
start_all()
machine1.wait_for_unit("syncthing.service")
machine2.wait_for_unit("syncthing.service")
machine3.wait_for_unit("syncthing.service")
machine4.wait_for_unit("syncthing.service")
# import time
# time.sleep(500000)
machine1.wait_for_open_port(8384)
machine2.wait_for_open_port(8384)
machine3.wait_for_open_port(8384)
machine4.wait_for_open_port(8384)
introducer.wait_for_unit("syncthing.service")
peer1.wait_for_unit("syncthing.service")
peer2.wait_for_unit("syncthing.service")
machine1.wait_for_open_port(22000)
machine2.wait_for_open_port(22000)
machine3.wait_for_open_port(22000)
machine4.wait_for_open_port(22000)
# Check that syncthing web interface is accessible
introducer.wait_for_open_port(8384)
peer1.wait_for_open_port(8384)
peer2.wait_for_open_port(8384)
# Check that the correct folders are synchronized
# documents - all
machine1.wait_for_file("/var/lib/syncthing/documents")
machine2.wait_for_file("/var/lib/syncthing/documents")
machine3.wait_for_file("/var/lib/syncthing/documents")
machine4.wait_for_file("/var/lib/syncthing/documents")
# music - machine 1 & 4
machine1.wait_for_file("/var/lib/syncthing/music")
machine4.wait_for_file("/var/lib/syncthing/music")
# pictures - machine 3 & 4
machine3.wait_for_file("/var/lib/syncthing/pictures")
machine4.wait_for_file("/var/lib/syncthing/pictures")
# Basic connectivity test
introducer.succeed("curl -s http://127.0.0.1:8384")
peer1.succeed("curl -s http://127.0.0.1:8384")
peer2.succeed("curl -s http://127.0.0.1:8384")
machine1.succeed("echo document > /var/lib/syncthing/documents/document")
machine1.succeed("echo music > /var/lib/syncthing/music/music")
machine3.succeed("echo picture > /var/lib/syncthing/pictures/picture")
# Check that folders are created correctly
peer1.execute("ls -la /var/lib/syncthing")
peer2.execute("ls -la /var/lib/syncthing")
machine2.wait_for_file("/var/lib/syncthing/documents/document", 20)
machine3.wait_for_file("/var/lib/syncthing/documents/document", 20)
machine4.wait_for_file("/var/lib/syncthing/documents/document", 20)
peer1.wait_for_file("/var/lib/syncthing/Shared")
peer2.wait_for_file("/var/lib/syncthing/Shared")
machine4.wait_for_file("/var/lib/syncthing/music/music", 20)
# Check file synchronisation from peer1 to peer2
introducer.shutdown()
machine4.wait_for_file("/var/lib/syncthing/pictures/picture", 20)
'';
peer1.execute("echo hello > /var/lib/syncthing/Shared/hello")
# peer2.wait_until_succeeds("timeout 5 cat /var/lib/syncthing/Shared")
peer2.wait_for_file("/var/lib/syncthing/Shared/hello")
out = peer2.succeed("cat /var/lib/syncthing/Shared/hello")
assert "hello" in out
'';
}

View File

@@ -0,0 +1,6 @@
[
{
"publickey": "age1t6s4d0sn3jlzlu7zxuux5fx8xw9xjfr0gqfaur0wessfrxsg35hsqttv2t",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1numxr6m52fxrm9a7sdw4vdpkp463mm8qtuf5d0p0jde04wydfgtscwdx78",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1aqng9vmlgth5aucu5ty2wa0kk9tvk7erj4s07hq03s6emu72fgxsqkrqql",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1kul02mg50nxccsl38nvma0enrgx454wq0qdefllj4l0adqkllvls5wuhfr",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1kqgx4elusxx4u8409gml5z6tvrsayqsphewsl93mtqn7pl2p5dwq9lujpj",
"type": "age"
}
]

View File

@@ -0,0 +1,6 @@
[
{
"publickey": "age1mq8u3x9z8v3zdm59qslxyn33zm0rpjzrd9sr9fjzqwp8d66t9czq626xsd",
"type": "age"
}
]

View File

@@ -0,0 +1,6 @@
[
{
"publickey": "age18mp7v3ck9eg0vqy3y83z5hdhum9rc77ntwal42p30udejkzvky2qjluy4x",
"type": "age"
}
]

View File

@@ -0,0 +1,15 @@
{
"data": "ENC[AES256_GCM,data:JbpTTfHD92NlaUR7xAyJFoqD+4mYDlpE1gdWuCsrMyar8rUzS6vX7i7ymd69K0tPAT/UUZAmNacPFwvjTkZmdv+/719FNBkowrc=,iv:ZHTcm+V1dNZ07kRQEDNFYh8NMMwZ5g5cq0Tg281Aaec=,tag:tjAJRuQrRC0JYhS0tA+VUw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJZWV1ZUZlODZ2OXlXbzVX\nSTkzV0VFL1NJVXpzKy9hTy9NN3gzeWxJbzE4CnJZc0JYd2s3ci85aDFuQ3pJbmtT\nSWZGRmREM25nWjhkZ1hGNE0rcVpMbFEKLS0tIDk3aHZ6ZVlaTmhOV3B4b3g5MzV4\nUUNWcXdHcTlVVEw3UGlxQWtBSDdpMk0K6nCih/rHq4vLS/oDz8cbjY8TVVsQmzaW\nivSd3WhpUaRdigyw/u3/5Lmaii1awy2qJdyREbzzUVgJPfoZ87pabw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-22T13:55:15Z",
"mac": "ENC[AES256_GCM,data:KoxJwNfRO1SDlgCc5p9+ZDP6rXOAUXG48ousVXKgNfR+qyS9i0FIYjgJxsSxzsYyn0Md7fbbJdX8MEnJZkgkTn0pJ46HfHsD4oiE66AF4pcgdIssTo4BX6RvoqbCdtS6hi6dpyrW7j1PPhwO3DRhaFIO58Nk1fxcVpyATzm8Gyg=,iv:SYotgqC8fA80mmjYZxUM0p+MUGxRYKHCd1pscS3HVt0=,tag:3XrQabwt+KEtk8JLZ4HTPA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:XqOUvbQOkOUrxQAMrUSRGfOl2eYjGZGI5st1iZl9Re8ymQa0LtokGzknfSiSucwfP50YTo7NoQsNCRsq/nS0RWbby0TQceqSVSw=,iv:GT3UrKdanFy4aMRPTwuOIyvP7pT6MXGRIstgKZmvG/4=,tag:Bg3ZzCPpw3KHXwy2Hazqrg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmWWJuTjBCaUhUeXhSRmZS\nVW9uQ09yOXVJVFFybzJsdmVmaEpOZVdEVVJNCmxxcUU3eG44cTlrY1lnVUFjdlZK\nOXNvR2pDMVJtZXFCbjJlTW1xVGoyTlUKLS0tIDlkMS9FNVJuZGZyODJzZzVRNzJQ\nNWpuaUxpMnRaUWt5bmw1TjBJV0dUOHcKmuHPHwzTUtkl7eZjZ4422C8cxGUnnzg1\nBPwAewDXEgq+clTXpU3UWjSvUNpqIkh9GDRE+qYfYWa7m36TrzACig==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:20:56Z",
"mac": "ENC[AES256_GCM,data:INCJWY2hsAwF049zFpYUTccJvczfgrPQwu+YUt8s/vfdrAk+tmLqCVFh9idG97l/OIEVPLv1gFE3SlcC+kKCNQV4SAyZA62CTeeNdgSSqDDipjrFn4fr1L8ZenCn56/giW0GIB2bBCa5WUYaHtGDoG8f6HSqNIhjnY9/qmDLUWQ=,iv:Utda22592sXOEEKFRSgfP+yLgi6FQGeEFiT61ZiKcws=,tag:UWyEnSNt0ztxTh0FGRzTtA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:bDwReDwUe9t0lXtTRU5r6WY3OvV1qOvkkByG2PbjXtArF4w6y3X/i6SYCnoys3wMvAY32+uH13ETXDS7LR/6W4/+pCAGAb3ATqQ=,iv:84dC7UP6QkHVkLwSvmb/NjXCA9OJ+of49UfitU0Cuoo=,tag:oO15dzpH7BKjawPGI1+F0Q==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBVXpNTkhTU3A1OU5HZm9p\nb2t5YTIxZWU4Z3hZR0ljTUNRT2hvQXEwNjJRCjh4SXY1NWhPVFFTMWswaWo2cFJB\ndUx4V1RiRFQ1UkNQU24zaGlSV3ZYeE0KLS0tIGJ5ak9RS2JnNk0vU3BaMEZLTFI0\nZit5RDlZbWE1QzY0RHQzcU5pZy84TUkK3/HHFBGfA9EpU+WDrlM9w5rbmgOb7ZAi\nVQiO08PVGTOvsEDed5A2oIXImRopcuBATzKoi4DNXpYlpSI/scYuAA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:08Z",
"mac": "ENC[AES256_GCM,data:qdNz02bNRHaWhTLxHqgvMRokpXZHvijyoP2c+KXMQxWP/7884SWQ4xoPSiUeLOPatLXpjXWQ/OBiDqQAzLkbC0/ujJH+BJQtV75xl6ytOjLbYw+hwatAR9C5GxiuFgKjLSsdaqzRlXJsULbf56edXeT+U/u5G8542WjF4AUat00=,iv:FYQRJQaRR0aPFyHyl0QCVPpjTXePXiZ2LjccPaybWh4=,tag:W/xkIgTUoHiTX9azluinAQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:2B9BeSPThkGX4sBAtm9c5Uc8vz5q+hSs8y0ibQ520s2EOYNWEEwP0e+9CBUmU1n0TKR4FP3HMHA0N7LKHBEXEb513I5lNHWgREc=,iv:qkmur96UjZ1BoWgvvihWk/7VRDg4snMeDA0s1MWdLb4=,tag:3djouRryDjcOFPv9Z8nLvg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxd2wxQWQwSHl5R0ZlRkxX\nWXBuck04TStoTHRYMDJaQkNRcDQvZFV6U0U0ClROOVpiVXhCVTdzeXpVQlRTYWRP\naWE2R0VtaGwvUk9UcGRWbEZtQlh5ckkKLS0tIDNjdm40UCthbDBwSE9ZYXBwVC9h\nRENrMFV2a21QRjBBYWJ4bFZ3K2NxWDAKr9hgDOuG4lR6dChQCvw/VOQdRNF0Mj1k\nzQRfQaC8nfZGGOJtU09zv5+ZPYJdheRuz91M18qY5uA5qrch5Yrvww==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:21Z",
"mac": "ENC[AES256_GCM,data:F6Ombz312QLl0BAkDbhN37KQ8gxMoukRqz8XkqANcdm8HyqfWL/V5Hxdt5Ve3HW4KzLO1lxb9M0/rQbtCj1hKMSnfM4XjP1R4ClLkU0knxeuLwbOtc4nN1qkaIs5fPcR0G9+gv+FnhZqDzosrhogDSWAayqiArwRWscMY4l716w=,iv:KYlcVQQGcWPIsL81VmTRmEBmC6PPT71i3ywcEbSfc5k=,tag:ogDnTv1KtXVtBDvhkjV1pw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:chE6ezkSGLHwvfqrUPiTLEoB63NhzBJgHQnGLe/0mwuR9iST3SpthUlKv7K8cezOvq7VVqJUoOGQSbqtuA04NUvwShVCBbqPjeA=,iv:50IV6ggQVnQpUpU+jfBB2z5/BZYs3lktGMfsWsCtJUs=,tag:BjtV31lmGOT5FposJA91WA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBZSVAzVXcrMWdUQmNCNHo4\neWw5NnJjYkdHbkE1dmJPcDlNb1dpT3kxbFN3Cm1HWVFxblgrNkpaZmtOVHBpdEVF\nWXRMamdSUGxkSk5ic3p3cEZrNG5QZEkKLS0tIDdYOGZRclREL0o1bWZmVTZoMUlu\nQzJPZFlUdFE4NkE0di94QVRzRXgwb1EKkqzL7yOdALLd8I2GnKPG3d61XTbNMs4+\n52M+o+wA8h+mnImbyVOtU9D6AkxvbKywZLmNRukkPqnkLqu5IXoSMQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:34Z",
"mac": "ENC[AES256_GCM,data:DI61cHhur+0q+gGtXHA6uFyZyKY/hCTvo2MLhhWPCSUWzYJ+j37fHfH1KUZcol+5prhnUIRrC4AoVt+0MeoXrNj+3SyptTVo1MgqNBayQkBMxKYp++SbqlXnlkLD+XOohpw6+f67rGNecjNc/OwCcfXu7PZmFhAFkwC39hUm7l0=,iv:lyinQFzoXo37Zs1QtbM1Jla+KRSMSUcph7bIR6/X1nw=,tag:nFfq7KtaTwZkQ9joCUOE2w==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -0,0 +1,15 @@
{
"data": "ENC[AES256_GCM,data:Lr3bPR9MjNvwBYPIQg0D4qIDhRbD++ZOpuGvz093d+DWva1b4h1jhcsnmziOvINZQ3vVpizklkASRWo757FOJLLV1LiXNqiZAbY=,iv:HbsNN9Who9BFTHEUrRVAA5MAkadXVqTGEsq5kTPZdQo=,tag:pGW8oINEGRjDgg1JoHdUEQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBM1Q5UEpvaDQ4K25oTWN3\nbmdPaTdZUTFFR0xUVEttajNXOSt1T3BnTjJnCkE2eUVleXJKckovdkw3ak1xUGZj\nK3hET0E0OU5KYlZLL2daZFNaZ3cyS00KLS0tIHFwMW9LTHNLR1JwaHdiMnc2bWhH\nMWtwMElUYW50ZXlGZG1US1hiWlNoRjAKTg62lhjMCG1uPtxAmq5L7QGwmlwvGnxG\n+qTZHAPAoTUaWtnJfJpueGB1OJbr4HbUH0gN0cBqq7Y0DGIyvGqidA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-22T13:55:27Z",
"mac": "ENC[AES256_GCM,data:1fh6Jp+y6jGI3NgZMgGcuVHX3GLXYH9LKbPG4cVwOk1otX94zr0UcrVOSgH3m9J7QpGlFl48HwCfoNZzkRVmX633Px1UZQopOSZLao8Ao7ZcAwP3EmIowwJBC8//pYIhE6JwPlIlRbHOQRDd8HhIM1VwNjc8dUBBX0VyTAgyT4w=,iv:0Z722NhqETyVY+mkerERVw9TmKx0aASdSdYYdNucmCg=,tag:PT56bX50YajhlHQttz+Ffw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1,15 @@
{
"data": "ENC[AES256_GCM,data:XgQnGe/dmXyir8gDOgcsdYok1d1blDBx+AFDLw+tXBzv5FY1pSbonSuOKmEVWDEmRCR3o1D8qiuUrDsa68D3als69A/bRhYHR+A=,iv:JzNB2VjE+HHAOQXkN3t95wQPiBBj/c93X5JHP8fosHg=,tag:fcAG53Tp+38NAfqSObIa4A==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6amIrbk9uV1IyQ20zMExo\nL0RrMmxOa3dEOGNmZFhIWXF1Sjc1K3VtdkdFCklLdEVNYnpsclRUdUtyTmVteWhu\nMHpPQmYzSEVldytGazJ1azBnQWJsaEUKLS0tIEZFY0hMNnV1TE9jODdpdy80eXRx\nMDZWSGt1MnhYQWJoMmNoaW1KWGVsVk0Ki0VQLz79+QQeiOri0aBqHEsVessIyjX8\nv3OZAjwMglPNv3j4CIqY/F4sfrAYxKUNB7g0Ui56BZlrG/i68EupAg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-22T13:55:39Z",
"mac": "ENC[AES256_GCM,data:081iGB26aJv0067lLJVetcKOyzzaHys6W70hcxB9010kpyU6AkxNt+HCa2lvJNAv+lls4WXgM6WsD/KODkDvbZsP4U3P9sJqY4RbTqJvypN4yjmzogneB7GVOenMQ8ywbm+ILM54nx8Enn6GPm4YX6yTat4WVTFFd+dJYmfBBmM=,iv:rXOqKtBSZvwozT49Zhp6aBpLXWlim7KLRGg6yIb2vkQ=,tag:vmjlvW0PNlHYs7syshJETg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../../../../sops/machines/introducer

View File

@@ -0,0 +1,19 @@
{
"data": "ENC[AES256_GCM,data:/dsV5uaNTBmB82lbzlQHGyRbFeXM9l32igrLcfGG13td,iv:6PB0LqjRtvrSYxeOPN+261VqacGg0jczCyyF4FZQa/I=,tag:4GlcYRhYhHVdSkwDSzcT6Q==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKYmJIUVhVQ3c3ZnhweUhU\nZnR6THB1Y2RjWExPckIwNEl1SlRGeVJORUVVCkk1ZzZwbGNwc1JQOFRpN2MwT0xG\nZTErNzVmSnhPazJJVHBKUnl6d2lYaVkKLS0tIHJRclM2WEYzQTRwVXRHcUUxSTlt\na1pHamdpdm9hUU9PSndybzdlMWY3NkkKcTnYp/5fUTdiNr0ajJeAPuLwhgWAdlAE\ngZ4soLdZoBFUHSsh0LhEm6jO2DXEKUZY7oLZi1gSZZbPA7PI/5k7OA==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1t6s4d0sn3jlzlu7zxuux5fx8xw9xjfr0gqfaur0wessfrxsg35hsqttv2t",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB3THZibDFMaTZrKzJVWjd3\nd3RScjZVZmNlbitmc2FkRXl0UlVmZlRoM2dJCnlkUW1pa3dHZlNqTklxc3dZTkha\nQUpWck5BcEk4NjRjSXI3TlFzQ0pzVVUKLS0tIFpJdHJzKzVkeGFBcnRvR3pDeFN2\nNFNFaW9kY3JXdzk0Q3hPS29iOFRkK1UKoQSqFLIAeU6aRL+rDQ+oJ9PS6aAtAPeo\n5Kpwoi3KQHVrDDIRBaxvZ2BXObOyU0tBqjzBdrOgtRn+96HmnZ6JDw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-22T13:55:15Z",
"mac": "ENC[AES256_GCM,data:4T1+t0hZwN0ThdXoCcNeUiQNj2RosRP6HL6SKmsnECn3pmDZqJEt/0xEQWDeDcvfHyZkJAFbU0RfhYsyz2wtYZwuMDL8WEbHURa1GQ4uKNWfUPPeu88eTwnYsbvtUS6TdRZ26eDoQKDNEed1cy9TfI4Bugdt0rrl6O+su2Ikvyo=,iv:aMLJ4r55UY5cIB1rQNns3jN2U/ZjfFN1vl6ZsXpcBbo=,tag:P2Dv7LeGB1fT4TYHwJgLGw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
../../../../../../sops/machines/introducer

View File

@@ -0,0 +1,19 @@
{
"data": "ENC[AES256_GCM,data:6bI+k3BUKfpOnYSZx6ucCEKLvPCC2+LVrHzxQyN7MvLKk43LnKsfSSI8lpp5l8CE8Bpcpq9BqWVU3ZhnwmM4RbiHg/+C7R374KqoV0TLXMwq4lkeVM5oh4On2qBbnxsXtBz6NbJmIiJPPTT5YARdDAdIVUZ21jOj7G5jvm1BbAWojVkrKpb5IpOTmuxSA0CPEQRsvP9NHH+5Btm3J8Py4bHcRpgNeKaFfTrJjtXN2te8hTjJb8sGmFvuOr92Tdh/+O5qeA4SjMXqRJEiJiE+Fc9X9ukrxlZkDwsSn2gXQu7ahGhjOZCvQ/Th9j55/ZPiu5zrypfBIltSfGaMzHTqSECSGozfNSqbAAuu/ybCMmp87HDg9RQ3M/2yYqmSs2cRSB1/jN7ha3LTEnPhPKWnpOmnNvQT/rSX0wyfGtdKVk0bIgEc28wIKANyJC+HPPtENdcmdUhx6q2CRi1h+nFz92zkRGzWySgCcl51WawFDD6KRKLvqpq9LWv/bNl4NOQd307hPZguLLB6/Q/yV29j+XgGrZcmlOnn5NfAlVfF+F6ckebPxDKMg0Z3DD/83DPM18iO5SeDcaNVS3c3KoeSpURWMHr5+2n+qtu3wVvKsPPhAsBV8diwGxp5z00Rr84mHXPfnz56Tr+m/rYLfflryrybxTujcuWFCGgzwMdeaYx4tz7lQAY1XtdKSNTYjhWdDfFbqThP3HVGJsRQshCd+8Q4miEfJzDmUgbrUuoF3VjkMApZ4wULV5BCcLrDYIXkwu1lyDt5fk4G0TyZCP8JvBV3zcVxjAPW86tSHNcX0V56Yd090s51u0iJyM/eqNxoNXa/DWBuLd4jpWJRz2c48pUO1XRgqvZHFmUcO5q7SPL/bCvdpjqaopnOqt3501zGB2q+TGfyZUZ3sqtylTPz4C7tv/hrCzmRCTzEFlfsmUOi08DL/6K65ihe6uXjoH2fB7ypjUy4u5L1XcBIvxSitcSacosJHSUtWcFZ6mWIgVyChTjWRfCAF4eB3aVctW+OmVTeiTcdAfbDiPR5FNyDWPTMnnorjBVZkpU=,iv:RGNo6ipQplIsHL7avQpTgEKMDifKnB5W96vYf6X96cQ=,tag:k8oObQ1m3aVnG94MR7dhHw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5NFVRb3cvZEZXdW1xWlBP\nWWNCRjlxYmpPQ1k2aFBmOUR4M0F2ai9Iam5ZCkxzU2dUamZXRmRqV3ZyWTRvdUda\ncTlHa1BuSDQ0em9lMXRwUlc5V2E1WUUKLS0tIDRkamJVZ3NIVlJGRlNrK0EyT09x\nbHVsMzlKVzJGNS8vVVlHRlFJRmUvUlUK1OSBTQi6R3XYEZEQmpGCrMr8m4jCBRiV\nj/G9sCahhTUc0D7E7DTXD3fwfXnZk1bD7buA99f908DT2Bjv4TfeKQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1t6s4d0sn3jlzlu7zxuux5fx8xw9xjfr0gqfaur0wessfrxsg35hsqttv2t",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBwZkJFbWhINkxmK3g4RHFN\neko4VTZHVGZOK1E2dFVqbWdVZktnTEdqT1JJCnBraG1acHVrNzlBWmJhNEMya1Fi\ndUw0NDdKakdLcGE1Z0tsSHVvQ0l1NFkKLS0tIDZvek42eXVEbEdWeG1vSENnakQ4\nQzlGaUNKMVhEM2QwY3ZPTW5nYXNLUkUK2laHy33U+hcQMT4jlUOqtVRCy+hNHyaS\nyuSk1i7Am3VeProaXccREjHjYRHn/l/B1oLRQQQT4cLcComxOArz0w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-22T13:55:16Z",
"mac": "ENC[AES256_GCM,data:CVyuCoiArDYnoz/GQ0OZ2K0rJ1+Y1xoznp+v4rxAfL22fv3mY29EDw7ByYXxye0ARCD8gBFCpKeUvWbfOPJGfjsfAFT0AxLYFbUONgejYZpVZbnlElfLUSi39ZhaSwcImqe4RLJ2TND/HuJ6jwz0Lb1h8BWOq5/NeF8JJH2tHFo=,iv:n2liiufvBjCjASBIAmOu2Q5IONsX99GFmZw7YDeHJ+4=,tag:0bYMs8k362w7jApCird4yA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -0,0 +1 @@
UHILKRC-BZSMZT7-CA5ANWX-DJTR55R-NBGUBK4-SOZJG5Z-MONIHKP-ZHFGOAL

View File

@@ -0,0 +1 @@
../../../../../../sops/machines/introducer

View File

@@ -0,0 +1,19 @@
{
"data": "ENC[AES256_GCM,data:YOzFYRjbmtSqzz+gaimswru/Xl3h2bZ0zkkM+QokycDIGhk5wrdlrGNJiaiQk3xJ1JUEbJewmsBFPz0Xcl8Nh2aAj58s0fMg/H0lb6WfF0n7B4u3TZ6urWir4ViUPdVxcS7oxKhJCU2KoqFNpXS/NjRCmi4G12j42VGBStLNxCFZ8EH4oK/nV0tYpJ++meKA9IlQbuEKmAv6Xt7Ry1isW4THl7jzYwE6y7I1ThqudnDtvqguY+Nl27vMmFed+WmpE4jsXjeW7T/CTynafen81uL7gP6xiy/n+3nZNYgutHCTqa1AonITDIJrzRjAPMmYxwv0k1ebYImBCT0wH7rnmbLF7y4EMNFuA3O5nAvpZ8tkkLzooVf0PCYrtWbwsxOm,iv:T6aRMEHnOezxswMnJXBXoHVkVDFtDqvtCRhqvQHLOWU=,tag:2zTGHZWCt6SQrESyC3f9Rg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSK2pOU0Zoam52V00vdnFi\nR2hpenpHQ1BDaHlyODZ0cmgvdzlLMTZ6em5VCi9sdG9zdzY0Z1pDQVF3dnNSREdt\nQVVUQ29uaE03SWM3bnA3blRHZWJmUmMKLS0tIGwyNUhVMTJ5SkxudlZaVUJaM0xJ\nSEpoaFZaUlczMkYxWlZhb1RBbUZ3SFUKGmjG+r998QLDJylznGhuqa6magj9x9PM\nly7PlqaoZ1diLuFklqFVExK3cXwvA6xdOScZqd8/P/sEdzAodDuKQw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1t6s4d0sn3jlzlu7zxuux5fx8xw9xjfr0gqfaur0wessfrxsg35hsqttv2t",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnMlpRK043Rm0zbTJxUGlv\nbE5mOUhhZDQ3a2UrNVhHZithUTJMRlo2Q2hZCjQ2aUt6VXpuOVE4dVVyVFc0WjBm\nMWI0SHVqYUNjdkQvSXY5QWdFTDRjdGcKLS0tIDBleURzYVBlZVcwQk85YzhOWE1z\nd2ZwYVRMM0dxM3U0RWI0Z2kxSjlETWsK+/m7xmcoXlnfYkRL3RK1VATGY6RtkmHA\nfg+YeAENLT1Mr9SnJCWcodxFicz8hiN5PinjynjqWgfv/xLMuh2G9Q==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-22T13:55:16Z",
"mac": "ENC[AES256_GCM,data:dKjYJ4ogUztSwuP6gkyXj/gYd3TDztuwivhtRzLYwoJLJ7c4anEeZDA51tslnhdDDXQ101JDEt0tD05wAo1YdLTNg9YR0eAh4wZ/dkJ+U92U7DYEW40YXxy8co/WWW50eYqbD6SnZbZAMjf4SbaH6kqtS2MTqeba2B1G/y8sb94=,iv:Z8tzsvSTRjondcN0Vnc3bcyVlVnpqLYRoaOCe6dLQGQ=,tag:leifNnUovWvO5cqzyN2PNw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:H+qlkDBtl4xYPz7X56gamUTQfuDE5+m/BkDPK9963A9F,iv:paPRFOnxW8aSt/NYg4afp+M+8svt0j4G6mZaJms/c7o=,tag:9uxvemHUr6Jm0MzvSf9cxQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1numxr6m52fxrm9a7sdw4vdpkp463mm8qtuf5d0p0jde04wydfgtscwdx78",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBM1ZUaEwwOGVFSVF0dlRX\nQzdMRjJFZU5kSTNDTzVrZ1JzNHdROXpGNlI0CkZXQ0JkalloT2xNNEh5ZG5NT2Vn\nVlFKZTV6aTYwNmF2bVpHM01YdUtLeGMKLS0tIDRCVDZrNk5UdkVOODdnZFptbDlZ\nVnNNMGc4U0FvOHlsU2dwTE03TUtyL1kKoikATM2w95ca39e7K9mOezI9z4hSgUAA\nUiq4GH9Sn4jtuew2FXK2ZqedzgSQuSOob8iaYBUMGdWL7bfn2gZsHQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBNMUQxZmxrZDR0OU0vd2tl\ndjlCNEFSeTVFRFRxa01xNXhRTDdFRWJtQkhFCitPV2hDNG9uWXE0a0gyK3NuSnc3\nblBKVjNkMzkyWHVKMzBKV1NzdFVsbXMKLS0tIDdkMTVrSk9pcjhlWkVISWNoQVU4\nTm1sQVlDbUVObFIrRkNObG5nMUdWVE0KWwIpPYL8HwcbUDUS6TG4DyJgAz+xz9lA\nLLRYARWF0zNJlOTHAIEksT14YSU3dplMxunwas2mLBjfYH0DIg2Org==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:20:56Z",
"mac": "ENC[AES256_GCM,data:vN/zqTJO+szaDV1aJc/ZYAbe0ISc7dJvQg8prjVkYIZA+aAh5Hk2Kec6iz+gH23CqapYcsEYJ/qd24eRrSsxPSJuvYWJAgyrPasNXNuGNrsVnkvTyClTUGiBbhoac6yHu66BHVpH15BJUgeSmO7iVD7HD+KPy0p66agJwWQEcn0=,iv:GLRGWh131n5otLJjBGElmMGxGOL1bljGZ8zzTE/fLn4=,tag:a65wuRUzsBtO2rO+hJN0RA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:F5FWqPeOBJvuwzuomHd4IkWgGeBBmYmgSVeH/TSJKKG4BHAwgam8dT1oTWiKEd/SfLj6LY33N+s4fKqe3bdCCa5/CPC3P7ZP1RW0/fyAdftlXFpbmXjsFf1rsTcTPMPBxOdwbu6+w6ji16sEkKXsN/GQsk6bhT7gxEgy4ZXY1kiiE08TtIG8p6aIcES/sTXhNqvZvWkJeESh1pkVhfP0OSWp0x05RPvxg2FoU1IMcqmySZwL/Ltqp4I3pKNHmLzuTFfFmK10fC9OptBbPmPtp75k0pJcQ0xbzJAyMM1riGzhOOMO1Qj/DXcSPDVugrMYDZRpEqNGg+LPBuCF1l2e7UitPLY4/1g669lCAPR3stiSW3S2NZIsDsW/ubN2qYeMBSBM8NWpQPgcsrKELCgUExD3MFMO9G0Rl3sU1BsYw+jf+kZNZWDL/w9W6vYt73q3Nmu2DfPpGzwKLrnbBpqjgMJKev/9JG/gMejbMe5AK1leyr5TDi97ku3zTvQfur4YMWABiBswmIbgbjBZAJOIKzy10AJTFQbqNoGGaOz22i2RBJLVHU7CKoKyJL99Iun0MjSlIIv5hTZMXDnHPZxvLq4w0htA5U2VJNKg04YKaPpc5ceDLVt9kgEF6OKG7/5nwDoQ/cgjrwbzl85sWpWQgXxwpSDPN8A0rkyZkgxNch/tUASgUoOExJRd6ht0iJiFM3tW25z8NU1ZWijs287hCkXNac63ZnXCa/2gB2zj+iHHLzgGvsxfhFiM2smjC0CSNbteTYr3sgZszrJSDdyI3Aerx92UwN336Iu0pXAvAGh8SBLBdz1CcmAlldK08Z8tFxplqOaqCQ8AtuklfQQeh8/Lx0VW09HIGkjQp88xotW4bZ803rPb7pqJNTzQmSF/4JWtIJXEG8eRbyHXpTM9p9RYvlY7/yK75EH/vwq6eAu6Cm2AwUxLjqMv9uoAad0gij66DflQ93D3hOfJaF+yNLa+HTce/p0Ymqeo/YM3/UUQ/ZHVbPonULXkylYs8JFFVkf3DBJTGYB85UCZ/ObNyXFtk/GuzQ==,iv:zV2Wm02uMdGmdKR0qiYnTdTlyRec85wXwUJOE3mAeWo=,tag:8fw+x1O1/53ff0qOuoph/A==,type:str]",
"sops": {
"age": [
{
"recipient": "age1numxr6m52fxrm9a7sdw4vdpkp463mm8qtuf5d0p0jde04wydfgtscwdx78",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvbTg5alA5SzJoUTZRSFM0\ndk1XaTlkOXo3NVNxYUJCc1FWWG16Q3IwMERnCkU1S1hlT3ZxcTRhaWs4c1BaVkNi\ndEJ0dTNTV1MxN0QrV2dJUzUvRTlsV3MKLS0tIExSb20zTDhuVExUMG9JZGd2bG00\nL0NTanIwZE92N1IxNWR5NW5PTktpK0EKsBMTH1ln0H/dK2AgUBWiGtdcY2ujvu1H\nTR+X3MZfFHQdOIvW4dkhUJt7BnZ5cEOCUizE5oI1+DifjDqk1dd+VQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzNHJsaElGRWc3ZTQrQlBs\nb0NLMmJUNzhyOHFielFZa1VnRDcwQ2JaREU0CnhrR3hGVXF3SnpRZWE5dlc5VFVV\nT1A4SGNoT2s0MU1jdHJ5U0ZvK3l0emcKLS0tIDZULzBXZ294NXBSRVJVRHlwUkpr\nWi90bUpNQlAremhpaWZkVXpRcE5yL1UKp5KUA5g/trDODHcXI7SWjbWR/ozXpxRp\ngHbONszf4y4hjkHOCVvtQ2NmR/cF64nhCswZYlfssUb4aJBVrMlVEw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:20:56Z",
"mac": "ENC[AES256_GCM,data:3l51gIfAbCnYvWjp6meJGsWQJPI7x4yswFe3WPLGtDJMPrRdOWW5UtU2Qb/rcoKGoSee5OJblOGvkloWTp++HebS2TxKEQ26qU9ycOq27vFt6mfmu4IVcmTqoVM3BPRh9md590lqdLyIsy/BULFBhe0jGBUQbXfDkfewronDOdU=,iv:JkVkjT9G5Z2+u/ZYDaoM8sRk5cBVWYR8fm48Gwfbr/Y=,tag:L9xZSAP3zQZfy6malNIFDw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
LJOGYGS-RQPWIHV-HD4B3GK-JZPVPK6-VI3IAY5-CWQWIXK-NJSQMFH-KXHOHA4

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:oYhwmtmsMpBKoUnC2sze5M7Qx3y9+ukpt7apmicJJOaDNliI8+NrrYntl56X70U4i+DN3Iz7qwPGGBxjveV0z7kIUEvlDiu3GgQmC/34omktyogb8dH/POoO+Z/E1CxKJEIn+rEU6Oqhg4aPKt/JbveW5wlevlQsMwW1oAtS701JJeY6KU6AfDerU0cS4K5+xw+dFg28pIvIIe02uDzTnFEXZYzvPk7UxfOE0IFxs6/zzPFoBbeFo2WIZZBMQYzP4AQMEiFxxaK7qthBwGuCEc7yMpW1uwVe9CZBfkVkg+wIX4DSm+0j8TiyikobsBxkvPUgqYHNyQaINJp82aGPOzFLsJ6ixf4RqgOzPikzdsyU8phi7waiDPDLf2rhZmnR,iv:9zhjW5wBlNtCxV1kBqzUlZaVRjAbr9LpxypJaIk4RPw=,tag:u1qxkOdHIqkelp/bchb4bg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1numxr6m52fxrm9a7sdw4vdpkp463mm8qtuf5d0p0jde04wydfgtscwdx78",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBFZGxnQXhHUWhONitsWnRk\nbSsvSHRYbHJLc1BjOGgyb3Fyem5VeXk0MENRCkpkdGVmb01rK2FCV25Oc1JGY3U4\nK0xIb0UvTCsrV3lDRkIwZmhscDN4TUkKLS0tIG1GOTFPZTlvcXVOMElHYWN5ZFBU\nbjB4Y3FmY2ZMMmZ6cUFlZ0NUL2c3Z2MK+Fn2tf9eX6Iy6GMsa5//fzKDAUUCFa4x\neeCStPrxTbmKWb4T2NxXrZYK73kvOAmEQenUasddZ24/SDhnnrlXbw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0N0V1Q1l1NS9JK1R1RG8z\nVE9rRnVqQW9mRGNvWHViZWlKS21qSkF4RENFCjl0OU1rMUFHUHVDR3NjSXc1Tnpr\ndE1HUGxQYXMzWW11Q2tlNUdjdXpqdk0KLS0tIGtDZGJpQ3FrbnFZVDJuYjN6bFFN\nZGZqUFNKMmpFSUd5QXFtSFVSWFlpelUKFzm5m/+ReOVDHpvgqAKs5Vwq4XVCPH0K\nzmxtSIAwey9lUxnWNkuKOUG07o89ACsVe6pVPLLqHpGLotvne68GRw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:20:56Z",
"mac": "ENC[AES256_GCM,data:k92YniWJ8U75G0YJ8yt/Xgi5NtyeoJrSNCCMd+Znj9/JL8mWwevJ9aeR387TK7OjVyKp8TOXTSky3bOqZbr5BTHUQDRr0LlQDO6ozmN3CVOdKpK31hfIotTHNRCUcpq+CoEB4VyjeAAdnzCkvyhckZ/L/CtQpJkSGI02M+mHcwE=,iv:cG5Q75Y0zeGj6EezNY1rRD0ZLtzIxkcKKsSHV8Og1aM=,tag:dsSFoW/XzoeOHmH5sqJzzg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:ZMxZz2bWSw4lKwXp+kdeblLJxpLxdWyZ9ZPBMkZeZGmf,iv:Dnpmvt8YomnwKeJ9ULWy8iAw9OspZ99glCYDtr2Ymkw=,tag:aBHwYdO8lZmtb8PbjCEeKA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1aqng9vmlgth5aucu5ty2wa0kk9tvk7erj4s07hq03s6emu72fgxsqkrqql",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHSWh4aDdUVnowNDBGRllw\nWitjU1pKQkhSNTdmMjZOUkJ6a1UwY2FkWEhzCjlIeGJvV2JpeTBXcVFCQzY2MXJC\nVlFsY3Uzd3R3cEJuVlZYYXd3dXkxSUEKLS0tIHpnT2Jjb1EzNUpXVlhseTBSdDd6\nYmorNlo3bXY4OGl3WXJQZ2dHbHlUVFUKdVPZd/2QtR/ELpf+vy5sXdGC0tS1N4uE\n5zsRpMBWQptheOUF0tNZAbw264gbYX/fece/myTJLISAPM9wZQd7ug==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5YzdiaDBxYURVcTVLQkF6\nMEVDd04wSXhYV1hKWXlhYVJVaUJtYUxmRGwwClU1SXNpUzJQNXVTTGQ2d2x0QmRV\ndWgyL2hMR2pTZWE5bGsvWmVYWFY5akkKLS0tICtLRHFuVjBlUldZQ2FCWTAza0Yy\nMWRSQTd4WWNjcGtYT1RqcS9ybWp1ZWMKidHF/OqLKVYWo02zHnSLm8BEN+qyJ2+i\nBqg62OqGLt8pbE/r1bxnxia4ZCgfc+xKtsWuARKq/BUBrKlMrBZGEA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:08Z",
"mac": "ENC[AES256_GCM,data:JInxA9c2UQBDFzetaHU8KYNnp1JS3eBPyAMjsP6sNwOsRJBY8enckQANvhXF4Mo6eksx9RUhdcqd5zqO/uupnqV4AuARxwI8Sq3ak1r1fnH4uzOC/wV5hWIHdOfKrePuE1+ayo/mR0xqM3ipxuUfJ/cM0ktYrTvJypR1brHkJyg=,iv:Qs1M3UMUOUuPVG564xJAJAhRsj7GGrs+jqNbFjupGDg=,tag:WjTJT3N7FkVmdv2giNVECQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:AiUO8delOdyU9pl1q9DuSegdy/MMh8q30M1hXuinpKVC9Wdtw/NFUVfKSM6wX5d3hkBnZ+BmG75bMIkZXXJ6Cl3Q7l3M60HcBrDQaUYJ3tfuO5dJGSi9Ab5RtnWKmaA/5wtb71sM08PbmmkPXnVI2KgXwX23VnNPdlMXFRYdWuzbTD7U+S7BPKiINjMCyLlMlNfw7YidGufwCkYb6YbpJ+Z6EkfArkH6pGKlB38vaz0rSiVSBa4fxzD4rgx2lh98yJ8VC80O/PdpvYhWub1aHpK1j6gq6InptvLNIyUh0i/9YXHbjpXc/D7hhphF/tA0JUOFMyN0Ses8cvBPHLOMU3uM82aYk9lBfnn74vKsNk5IWeiPskzN6dwmHr6yJHQmDiFp9ll7Ujf9GU5DzJhQsVUFf1xb2wq05YMomEpDdDo0xz35L/w8hFQg5kKLDvzKCiDQuvYz4KdaDZqobZbJ+ICNx0dZHlBg6NGvYK3Q1TpNkR6qHDnOGYC3dbZzeskuLDnD6VScPrLvf0duOrNwE2QP5Eqtk/5r+yP4SJzNK9UMZhYcXIF2koel6dxhlEmJMBLryR/56+GFIWOHIebKUGJhtumj0cl21QAcZgeQ42oy6mza+K17u+SpRwb+Vl+d10zSauxV6jqHE/qec4ZxKyFJH1TDR4R3FcqY3Xb2323r0Lx3Z6h3QSuXaoSz3lZj1FnXNh5x4Xctb87cNd6u/woxL3X105OwMOxaI4SIt81WWSA7do9WFzfNjOUahyullq5RWNOapLPAmMjuSdJBeImFEbKPUst25vgkzs3x4PZkS02s6R+o3h3PoeTZPDIpNIv6Ye6sRy0txhF4nBmHKFNjoClxaDG6esdDlsyu+eyZKRy6/Hvfe/iimlyb+iQMLlmTLoES9B7J0F1JAwJhetdnfoa3pbgZsehb2b0BmqgHbytgA/nOlNWcbae+aX8HCj4Ju0RnYwObbjaVqHQoST/LjnnVJCo1y0UZ4CnOg7MNEKrtxRb3BV8s9Xp3BR3UPFN/gKWBGh/xyHoR3ZEJ0mxt3vjdymqMtBQ=,iv:4slGBU5rnFPaiFXr6Nzuk54ku76XL9+cFOF3IPWbZn4=,tag:+XOCNKwG2PqzY/PTzruouA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1aqng9vmlgth5aucu5ty2wa0kk9tvk7erj4s07hq03s6emu72fgxsqkrqql",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYbVZ0c2tqSEZJNkxTbGpC\nd1U3YVQwQnhmY21pY0R5SnJHL0lVZC81SFFzCjEyV2VwQng5ejc2L0F0ZUFrWDB6\nY1k0blJQbUhRQ0kweGM5Ty9uQXVGS0EKLS0tIG96S1NOQWlidXZORDlrYWZWcFUy\nb2RLa2pKNEhkSFlwdVFMRXlEOStkNVkKYahEQPOfncD2TGeT1JNPWCOG0ScUCJ0K\n2cYRCBxayd/ssfyEL3jV+TVSxFnKrAhtGJzP2xEAZFYJsHoLG+56sQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQcWdtYmpwYmJQV3BNL01x\nY0FURkczRjBKYWlzOFplcDZ4cktGMTNMN0VVCm5FQmorTzRMUzVSd2Y4VS94R050\nR0V5MXUvODh2Q0IxeU9RelJLTUZoWmsKLS0tIFFjT1pyS3NwWi9OY24xTytLN3NZ\nUDdqUEp4ek5tTGxvdFU4SGduR1VBeUkKX013r6b+KL1i3glEcpwgv/KzBEE9N9sj\nqgyZ3S5dhCY1LOquP/xnS7kqZzN+znv61F7577wV1pBy53KPOhl47Q==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:08Z",
"mac": "ENC[AES256_GCM,data:Oiq0mU3K+zgaHhdlniCGgx22Sa1tEOmO+ddfDdD2IsMm1c9vAI4O+/5PsF/DE4h0WoORJsn88qfzPDva90YUJKQAJsflMHNk8jjExGhtVLVjxkpZk2EtpBzfg311riYzKl5vSJxLqp67Ks2dHyl8qHEH71dSgkXxVMIhPDUmJCg=,iv:B+Hi7qoRbBT53wH3cBSLoQocdXCcfKenkwnErTweMwY=,tag:88c4zp8a6WNfnJkRZvNFqw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
K2FAKIB-HUUM7M7-CUPDN5Y-NORZCJP-F43EFQY-UPV4AUO-GAI3VZW-PH5TRQX

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:9U+BF9rhGbNCokJoZEl347m1jJm9n+JOJFXSVrqYM+r2A7NPTDGBKIrnVpGdOM+8VmOIzAIROGb1h+wgQoHbr98YtVRx88D0n9bA7ZtSCNiACXzgTl/swaqYCPgJ5FtPvylNagK92SDwjpVSV7jfLA0kdE49HESg9dWIUlPS2CoORhW+FY4wynXdMEsgZmeRgtYFMubqshX/Ep07qnF9HjQmmDy5XKQ4uEm3HydlZOZvW6USwyRYIcqH8x5p1xzif5zYgUsu1iHAmbhtqzULJoe2rzuM3+7Av0LeGcQpXe3gkX/3+aA/SgGN5mJOtLalcwClow5HH0atlOopgy4523Z+jANvc4XwExpJbSYeJuqUTV20q1X16Kyph1O+XYOW,iv:dS7CcP6oPfdByqIW3sz4AUm2BfVInFmDDkxxbGhySmc=,tag:T5ZbP2hDaKNR/c2J/mlS6g==,type:str]",
"sops": {
"age": [
{
"recipient": "age1aqng9vmlgth5aucu5ty2wa0kk9tvk7erj4s07hq03s6emu72fgxsqkrqql",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvcDZLcXpaODVma1RUNHE0\nMElMRkpRSWJwa2Q3dnFEQ0J2a1JpblZuWVFJCk1tUTV1U3VpYUJqajZKd3l3OHNE\nWDk4TzhQSlluOGdnNlcrUTBxaXJrRVEKLS0tIHdiOVhRTHI1emE5TnMxbEZJdm1W\nR2NxMVRhQ0xDT2o0cE1POXRQT2ZwMmMK+3x5+5pVrGPILDpCpqPKuVX5emmfDvFt\n2HjLIwy/2mWcsktXh2gk8KAD4WWL/JjatTuP+EP5zoOFcL5/U3S5Pg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArS3B3SjQ5ejRLU0xUT245\nWGNkaUErdTQ4ODNqSzY2aVFPeUlabHl2MkNnCnlzcUh0ZjlUcFBXbEJtZ2pDUmNr\nYWRvRG1EbkwrZVE3NlU2cWc4VSswQ3MKLS0tIGYvZFZWOXV3U2hMVU1XOHVCSXdp\nSm1vaC83c01TZms3Z0NqMHhqbzRNUGMKKiRAy58NZDz5rjNA3xYv8/dU4H5L0fo/\nQc3WsEXIIwnVHO0X6WN+qUpF8uTPAZW26wecspdsf/5NOJtM7o8+EA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:08Z",
"mac": "ENC[AES256_GCM,data:QGID6lY6g1qvYXhtXrlNA3E+IjJZRKiB8+CDDLYGwx5vtX32M/WNVfzEs8bUdUDEJTUnfFI6tBHjFbKUVNFmbH2utZ3aiUGVJadVUzuWQf3HO/McyPwRMm47gt29U0iagUTSdVbzSROwdwCfXfZeyJUPXXNZgyA5LqL8puPS+jo=,iv:kZzMQQ+1shjScxxmMVSqK5ckXckgCtxbqa8uBZ9RTZI=,tag:do0RhBzfHmFZc8L90WXTqQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

Some files were not shown because too many files have changed in this diff Show More