Compare commits

..

3 Commits

Author SHA1 Message Date
Guilhem Saurel
45720e29a8 clanServices/wifi: fix for multiple instances
Without this, `nix build .#checks.x86_64-linux.wifi` fails with:
```
error: The option `nodes.first.systemd.services.NetworkManager-setup-secrets.serviceConfig.ExecStart' has conflicting definition values:
- In `/nix/store/x0…45-source/clanServices/wifi/default.nix, via option mappedServices."self-@clan/wifi".roles.default.perInstance, via option nixosModule': <derivation wifi-secrets>
- In `/nix/store/x0…45-source/clanServices/wifi/default.nix, via option mappedServices."self-@clan/wifi".roles.default.perInstance, via option nixosModule': <derivation wifi-secrets>
Use `lib.mkForce value` or `lib.mkDefault value` to change the priority on any of these definitions.
```
2025-07-16 14:02:03 +02:00
Guilhem Saurel
dfce4e6cd5 clanServices/wifi: update test with a second instance 2025-07-16 14:02:03 +02:00
Jörg Thalheim
5d070511a9 waypipe: disable gpu for now 2025-07-16 14:02:03 +02:00
446 changed files with 6057 additions and 17292 deletions

View File

View File

@@ -1,20 +0,0 @@
name: Build Clan App (Darwin)
on:
schedule:
# Run every 4 hours
- cron: "0 */4 * * *"
workflow_dispatch:
push:
branches:
- main
jobs:
build-clan-app-darwin:
runs-on: nix
steps:
- uses: actions/checkout@v4
- name: Build clan-app for x86_64-darwin
run: |
nix build .#packages.x86_64-darwin.clan-app --system x86_64-darwin --log-format bar-with-logs

View File

@@ -1,7 +1,6 @@
#!/bin/sh
#!/usr/bin/env bash
# Shared script for creating pull requests in Gitea workflows
set -eu
set -euo pipefail
# Required environment variables:
# - CI_BOT_TOKEN: Gitea bot token for authentication
@@ -9,22 +8,22 @@ set -eu
# - PR_TITLE: Title of the pull request
# - PR_BODY: Body/description of the pull request
if [ -z "${CI_BOT_TOKEN:-}" ]; then
if [[ -z "${CI_BOT_TOKEN:-}" ]]; then
echo "Error: CI_BOT_TOKEN is not set" >&2
exit 1
fi
if [ -z "${PR_BRANCH:-}" ]; then
if [[ -z "${PR_BRANCH:-}" ]]; then
echo "Error: PR_BRANCH is not set" >&2
exit 1
fi
if [ -z "${PR_TITLE:-}" ]; then
if [[ -z "${PR_TITLE:-}" ]]; then
echo "Error: PR_TITLE is not set" >&2
exit 1
fi
if [ -z "${PR_BODY:-}" ]; then
if [[ -z "${PR_BODY:-}" ]]; then
echo "Error: PR_BODY is not set" >&2
exit 1
fi
@@ -44,12 +43,9 @@ resp=$(nix run --inputs-from . nixpkgs#curl -- -X POST \
}" \
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls")
if ! pr_number=$(echo "$resp" | jq -r '.number'); then
echo "Error parsing response from pull request creation" >&2
exit 1
fi
pr_number=$(echo "$resp" | jq -r '.number')
if [ "$pr_number" = "null" ]; then
if [[ "$pr_number" == "null" ]]; then
echo "Error creating pull request:" >&2
echo "$resp" | jq . >&2
exit 1
@@ -68,15 +64,12 @@ while true; do
"delete_branch_after_merge": true
}' \
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls/$pr_number/merge")
if ! msg=$(echo "$resp" | jq -r '.message'); then
echo "Error parsing merge response" >&2
exit 1
fi
if [ "$msg" != "Please try again later" ]; then
msg=$(echo "$resp" | jq -r '.message')
if [[ "$msg" != "Please try again later" ]]; then
break
fi
echo "Retrying in 2 seconds..."
sleep 2
done
echo "Pull request #$pr_number merge initiated"
echo "Pull request #$pr_number merge initiated"

View File

@@ -0,0 +1,28 @@
name: "Update pinned clan-core for checks"
on:
repository_dispatch:
workflow_dispatch:
schedule:
- cron: "51 2 * * *"
jobs:
update-pinned-clan-core:
runs-on: nix
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Update clan-core for checks
run: nix run .#update-clan-core-for-checks
- name: Create pull request
env:
CI_BOT_TOKEN: ${{ secrets.CI_BOT_TOKEN }}
run: |
export GIT_AUTHOR_NAME=clan-bot GIT_AUTHOR_EMAIL=clan-bot@clan.lol GIT_COMMITTER_NAME=clan-bot GIT_COMMITTER_EMAIL=clan-bot@clan.lol
git commit -am "Update pinned clan-core for checks"
# Use shared PR creation script
export PR_BRANCH="update-clan-core-for-checks"
export PR_TITLE="Update Clan Core for Checks"
export PR_BODY="This PR updates the pinned clan-core flake input that is used for checks."
./.gitea/workflows/create-pr.sh

View File

@@ -24,7 +24,7 @@ If you're new to Clan and eager to dive in, start with our quickstart guide and
In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively:
- **Secrets Management**: Securely manage secrets by consulting [Vars](https://docs.clan.lol/concepts/generators/)<!-- [secrets.md](docs/site/concepts/generators.md) -->.
- **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/guides/getting-started/secrets/)<!-- [secrets.md](docs/site/guides/getting-started/secrets.md) -->.
### Contributing to Clan

View File

@@ -0,0 +1,210 @@
{ self, ... }:
{
clan.machines.test-backup = {
imports = [ self.nixosModules.test-backup ];
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
};
clan.inventory.services = {
borgbackup.test-backup = {
roles.client.machines = [ "test-backup" ];
roles.server.machines = [ "test-backup" ];
};
};
flake.nixosModules = {
test-backup =
{
pkgs,
lib,
...
}:
let
dependencies =
[
pkgs.stdenv.drvPath
]
++ builtins.map (i: i.outPath) (builtins.attrValues (builtins.removeAttrs self.inputs [ "self" ]));
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
imports = [
# Do not import inventory modules. They should be configured via 'clan.inventory'
#
# TODO: Configure localbackup via inventory
self.clanModules.localbackup
];
# Borgbackup overrides
services.borgbackup.repos.test-backups = {
path = "/var/lib/borgbackup/test-backups";
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
};
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
clan.core.networking.targetHost = "machine";
networking.hostName = "machine";
programs.ssh.knownHosts = {
machine.hostNames = [ "machine" ];
machine.publicKey = builtins.readFile ../assets/ssh/pubkey;
};
services.openssh = {
enable = true;
settings.UsePAM = false;
settings.UseDns = false;
hostKeys = [
{
path = "/root/.ssh/id_ed25519";
type = "ed25519";
}
];
};
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
# This is needed to unlock the user for sshd
# Because we use sshd without setuid binaries
users.users.borg.initialPassword = "hello";
systemd.tmpfiles.settings."vmsecrets" = {
"/root/.ssh/id_ed25519" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/ssh.id_ed25519" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.ssh" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
user = "root";
};
};
};
clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc.install-closure.source = "${closureInfo}/store-paths";
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
};
system.extraDependencies = dependencies;
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
clan.core.state.test-service = {
preBackupScript = ''
touch /var/test-service/pre-backup-command
'';
preRestoreScript = ''
touch /var/test-service/pre-restore-command
'';
postRestoreScript = ''
touch /var/test-service/post-restore-command
'';
folders = [ "/var/test-service" ];
};
fileSystems."/mnt/external-disk" = {
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
autoFormat = true;
fsType = "ext4";
options = [
"defaults"
"noauto"
];
};
clan.localbackup.targets.hdd = {
directory = "/mnt/external-disk";
preMountHook = ''
touch /run/mount-external-disk
'';
postUnmountHook = ''
touch /run/unmount-external-disk
'';
};
};
};
perSystem =
{ pkgs, ... }:
let
clanCore = self.checks.x86_64-linux.clan-core-for-checks;
in
{
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
nixos-test-backups = self.clanLib.test.containerTest {
name = "nixos-test-backups";
nodes.machine = {
imports =
[
self.nixosModules.clanCore
# Some custom overrides for the backup tests
self.nixosModules.test-backup
]
++
# import the inventory generated nixosModules
self.clan.clanInternals.inventoryClass.machines.test-backup.machineImports;
clan.core.settings.directory = ./.;
};
testScript = ''
import json
start_all()
# dummy data
machine.succeed("mkdir -p /var/test-backups /var/test-service")
machine.succeed("echo testing > /var/test-backups/somefile")
# create
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
machine.succeed("test -f /run/mount-external-disk")
machine.succeed("test -f /run/unmount-external-disk")
# list
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
print(out)
assert backup_id in out, f"backup {backup_id} not found in {out}"
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
assert localbackup_id in out, "localbackup not found in {out}"
## borgbackup restore
machine.succeed("rm -f /var/test-backups/somefile")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
machine.succeed("test -f /var/test-service/pre-backup-command")
## localbackup restore
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
machine.succeed("test -f /var/test-service/pre-backup-command")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -1,6 +1,6 @@
{ fetchgit }:
fetchgit {
url = "https://git.clan.lol/clan/clan-core.git";
rev = "5d884cecc2585a29b6a3596681839d081b4de192";
sha256 = "09is1afmncamavb2q88qac37vmsijxzsy1iz1vr6gsyjq2rixaxc";
rev = "eea93ea22c9818da67e148ba586277bab9e73cea";
sha256 = "sha256-PV0Z+97QuxQbkYSVuNIJwUNXMbHZG/vhsA9M4cDTCOE=";
}

View File

@@ -2,7 +2,6 @@
self,
lib,
inputs,
privateInputs ? { },
...
}:
let
@@ -20,29 +19,18 @@ let
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
in
{
imports =
let
clanCoreModulesDir = ../nixosModules/clanCore;
getClanCoreTestModules =
let
moduleNames = attrNames (builtins.readDir clanCoreModulesDir);
testPaths = map (
moduleName: clanCoreModulesDir + "/${moduleName}/tests/flake-module.nix"
) moduleNames;
in
filter pathExists testPaths;
in
getClanCoreTestModules
++ filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix
];
imports = filter pathExists [
./backups/flake-module.nix
../nixosModules/clanCore/machine-id/tests/flake-module.nix
../nixosModules/clanCore/state-version/tests/flake-module.nix
./devshell/flake-module.nix
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix
];
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
system:
let
@@ -100,6 +88,7 @@ in
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs;
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
@@ -157,12 +146,9 @@ in
'';
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
cp -r ${privateInputs.clan-core-for-checks} $out
chmod -R +w $out
cp -r ${pkgs.callPackage ./clan-core-for-checks.nix { }} $out
chmod +w $out/flake.lock
cp ${../flake.lock} $out/flake.lock
# Create marker file to disable private flake loading in tests
touch $out/.skip-private-inputs
'';
};
packages = lib.optionalAttrs (pkgs.stdenv.isLinux) {

View File

@@ -2,7 +2,6 @@
config,
self,
lib,
privateInputs,
...
}:
{
@@ -51,8 +50,7 @@
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.toplevel
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript
self.nixosConfigurations."test-flash-machine-${pkgs.hostPlatform.system}".config.system.build.diskoScript.drvPath
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
@@ -62,10 +60,6 @@
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
virtualisation.memorySize = 4096;
virtualisation.useNixStoreImage = true;
virtualisation.writableStore = true;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
@@ -84,8 +78,8 @@
start_all()
# Some distros like to automount disks with spaces
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdc && mount /dev/vdc "/mnt/with spaces"')
machine.succeed("clan flash write --debug --flake ${privateInputs.clan-core-for-checks} --yes --disk main /dev/vdc test-flash-machine-${pkgs.hostPlatform.system}")
machine.succeed('mkdir -p "/mnt/with spaces" && mkfs.ext4 /dev/vdb && mount /dev/vdb "/mnt/with spaces"')
machine.succeed("clan flash write --debug --flake ${self.checks.x86_64-linux.clan-core-for-checks} --yes --disk main /dev/vdb test-flash-machine-${pkgs.hostPlatform.system}")
'';
} { inherit pkgs self; };
};

View File

@@ -1,7 +1,6 @@
{
self,
lib,
privateInputs,
...
}:
@@ -150,17 +149,17 @@
# vm-test-run-test-installation-> target: To debug, enter the VM and run 'systemctl status backdoor.service'.
checks =
let
# Custom Python package for port management utilities
closureInfo = pkgs.closureInfo {
rootPaths = [
privateInputs.clan-core-for-checks
self.checks.x86_64-linux.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.toplevel
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.initialRamdisk
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-install-machine-with-system.config.system.build.diskoScript
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in
pkgs.lib.mkIf (pkgs.stdenv.isLinux && !pkgs.stdenv.isAarch64) {
@@ -208,7 +207,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${privateInputs.clan-core-for-checks}",
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
@@ -226,7 +225,7 @@
"install",
"--phases", "disko,install",
"--debug",
"--flake", str(flake_dir),
"--flake", flake_dir,
"--yes", "test-install-machine-without-system",
"--target-host", f"nonrootuser@localhost:{ssh_conn.host_port}",
"-i", ssh_conn.ssh_key,
@@ -272,7 +271,7 @@
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${privateInputs.clan-core-for-checks}",
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
@@ -290,6 +289,9 @@
assert not os.path.exists(hw_config_file), "hardware-configuration.nix should not exist initially"
assert not os.path.exists(facter_file), "facter.json should not exist initially"
# Set CLAN_FLAKE for the commands
os.environ["CLAN_FLAKE"] = flake_dir
# Test facter backend
clan_cmd = [
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",

View File

@@ -159,8 +159,7 @@ let
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in

View File

@@ -1,6 +1,5 @@
{
self,
privateInputs,
...
}:
{
@@ -36,8 +35,7 @@
pkgs.stdenv.drvPath
pkgs.stdenvNoCC
self.nixosConfigurations.test-morph-machine.config.system.build.toplevel
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
] ++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
@@ -55,7 +53,7 @@
testScript = ''
start_all()
actual.fail("cat /etc/testfile")
actual.succeed("env CLAN_DIR=${privateInputs.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
actual.succeed("env CLAN_DIR=${self.checks.x86_64-linux.clan-core-for-checks} clan machines morph test-morph-template --i-will-be-fired-for-using-this --debug --name test-morph-machine")
assert actual.succeed("cat /etc/testfile") == "morphed"
'';
} { inherit pkgs self; };

View File

@@ -0,0 +1,73 @@
({
name = "postgresql";
nodes.machine =
{ self, config, ... }:
{
imports = [
self.nixosModules.clanCore
self.clanModules.postgresql
self.clanModules.localbackup
];
clan.postgresql.users.test = { };
clan.postgresql.databases.test.create.options.OWNER = "test";
clan.postgresql.databases.test.restore.stopOnRestore = [ "sample-service" ];
clan.localbackup.targets.hdd.directory = "/mnt/external-disk";
clan.core.settings.directory = ./.;
systemd.services.sample-service = {
wantedBy = [ "multi-user.target" ];
script = ''
while true; do
echo "Hello, world!"
sleep 5
done
'';
};
environment.systemPackages = [ config.services.postgresql.package ];
};
testScript =
{ nodes, ... }:
''
start_all()
machine.wait_for_unit("postgresql")
machine.wait_for_unit("sample-service")
# Create a test table
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -c 'CREATE TABLE test (id serial PRIMARY KEY);' test")
machine.succeed("/run/current-system/sw/bin/localbackup-create >&2")
timestamp_before = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
machine.succeed("test -e /mnt/external-disk/snapshot.0/machine/var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'INSERT INTO test DEFAULT VALUES;'")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'DROP TABLE test;'")
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("rm -rf /var/backup/postgres")
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/backup/postgres/test /run/current-system/sw/bin/localbackup-restore >&2")
machine.succeed("test -e /var/backup/postgres/test/pg-dump || { echo 'pg-dump not found'; exit 1; }")
machine.succeed("""
set -x
${nodes.machine.clan.core.state.test.postRestoreCommand}
""")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -l >&2")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
timestamp_after = int(machine.succeed("systemctl show --property=ExecMainStartTimestampMonotonic sample-service | cut -d= -f2").strip())
assert timestamp_before < timestamp_after, f"{timestamp_before} >= {timestamp_after}: expected sample-service to be restarted after restore"
# Check that the table is still there
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c 'SELECT * FROM test;'")
output = machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql --csv -c \"SELECT datdba::regrole FROM pg_database WHERE datname = 'test'\"")
owner = output.split("\n")[1]
assert owner == "test", f"Expected database owner to be 'test', got '{owner}'"
# check if restore works if the database does not exist
machine.succeed("runuser -u postgres -- dropdb test")
machine.succeed("${nodes.machine.clan.core.state.test.postRestoreCommand}")
machine.succeed("runuser -u postgres -- /run/current-system/sw/bin/psql -d test -c '\dt' >&2")
'';
})

View File

@@ -29,11 +29,19 @@ nixosLib.runTest (
testScript =
{ nodes, ... }:
''
import subprocess
from nixos_test_lib.nix_setup import setup_nix_in_nix # type: ignore[import-untyped]
setup_nix_in_nix(None) # No closure info for this test
def run_clan(cmd: list[str], **kwargs) -> str:
import subprocess
clan = "${clan-core.packages.${hostPkgs.system}.clan-cli}/bin/clan"
clan_args = ["--flake", "${config.clan.test.flakeForSandbox}"]
return subprocess.run(
["${hostPkgs.util-linux}/bin/unshare", "--user", "--map-user", "1000", "--map-group", "1000", clan, *cmd, *clan_args],
**kwargs,
check=True,
).stdout
start_all()
admin1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("multi-user.target")
@@ -52,13 +60,7 @@ nixosLib.runTest (
# Check that the file is in the '0644' mode
assert "-rw-r--r--" in ls_out, f"File is not in the '0644' mode: {ls_out}"
# Run clan command
result = subprocess.run(
["${
clan-core.packages.${hostPkgs.system}.clan-cli
}/bin/clan", "machines", "list", "--flake", "${config.clan.test.flakeForSandbox}"],
check=True
)
run_clan(["machines", "list"])
'';
}
)

View File

@@ -1,298 +0,0 @@
{ self, ... }:
{
# Machine for update test
clan.machines.test-update-machine = {
imports = [
self.nixosModules.test-update-machine
# Import the configuration file that will be created/updated during the test
./test-update-machine/configuration.nix
];
};
flake.nixosModules.test-update-machine =
{ lib, modulesPath, ... }:
{
imports = [
(modulesPath + "/testing/test-instrumentation.nix")
(modulesPath + "/profiles/qemu-guest.nix")
self.clanLib.test.minifyModule
../../lib/test/container-test-driver/nixos-module.nix
];
# Apply patch to fix x-initrd.mount filesystem handling in switch-to-configuration-ng
nixpkgs.overlays = [
(_final: prev: {
switch-to-configuration-ng = prev.switch-to-configuration-ng.overrideAttrs (old: {
patches = (old.patches or [ ]) ++ [ ./switch-to-configuration-initrd-mount-fix.patch ];
});
})
];
networking.hostName = "update-machine";
environment.etc."install-successful".text = "ok";
# Enable SSH and add authorized key for testing
services.openssh.enable = true;
services.openssh.settings.PasswordAuthentication = false;
users.users.root.openssh.authorizedKeys.keys = [ (builtins.readFile ../assets/ssh/pubkey) ];
services.openssh.knownHosts.localhost.publicKeyFile = ../assets/ssh/pubkey;
services.openssh.hostKeys = [
{
path = ../assets/ssh/privkey;
type = "ed25519";
}
];
security.sudo.wheelNeedsPassword = false;
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
boot.isContainer = true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
# Preserve the IP addresses assigned by the test framework
# (based on virtualisation.vlans = [1] and node number 1)
networking.interfaces.eth1 = {
useDHCP = false;
ipv4.addresses = [
{
address = "192.168.1.1";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2001:db8:1::1";
prefixLength = 64;
}
];
};
# Define the mounts that exist in the container to prevent them from being stopped
fileSystems = {
"/" = {
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
options = [ "x-initrd.mount" ];
};
"/nix/.rw-store" = {
device = "tmpfs";
fsType = "tmpfs";
options = [
"mode=0755"
];
};
"/nix/store" = {
device = "overlay";
fsType = "overlay";
options = [
"lowerdir=/nix/.ro-store"
"upperdir=/nix/.rw-store/upper"
"workdir=/nix/.rw-store/work"
];
};
};
};
perSystem =
{
pkgs,
...
}:
{
checks =
pkgs.lib.optionalAttrs (pkgs.stdenv.isLinux && pkgs.stdenv.hostPlatform.system == "x86_64-linux")
{
nixos-test-update =
let
closureInfo = pkgs.closureInfo {
rootPaths = [
self.packages.${pkgs.system}.clan-cli
self.checks.${pkgs.system}.clan-core-for-checks
self.clanInternals.machines.${pkgs.hostPlatform.system}.test-update-machine.config.system.build.toplevel
pkgs.stdenv.drvPath
pkgs.bash.drvPath
pkgs.buildPackages.xorg.lndir
]
++ builtins.map (i: i.outPath) (builtins.attrValues self.inputs);
};
in
self.clanLib.test.containerTest {
name = "update";
nodes.machine = {
imports = [ self.nixosModules.test-update-machine ];
};
extraPythonPackages = _p: [
self.legacyPackages.${pkgs.system}.nixosTestLib
];
testScript = ''
import tempfile
import os
import subprocess
from nixos_test_lib.ssh import setup_ssh_connection # type: ignore[import-untyped]
from nixos_test_lib.nix_setup import prepare_test_flake # type: ignore[import-untyped]
start_all()
machine.wait_for_unit("multi-user.target")
# Verify initial state
machine.succeed("test -f /etc/install-successful")
machine.fail("test -f /etc/update-successful")
# Set up test environment
with tempfile.TemporaryDirectory() as temp_dir:
# Prepare test flake and Nix store
flake_dir = prepare_test_flake(
temp_dir,
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
(flake_dir / ".clan-flake").write_text("") # Ensure .clan-flake exists
# Set up SSH connection
ssh_conn = setup_ssh_connection(
machine,
temp_dir,
"${../assets/ssh/privkey}"
)
# Update the machine configuration to add a new file
machine_config_path = os.path.join(flake_dir, "machines", "test-update-machine", "configuration.nix")
os.makedirs(os.path.dirname(machine_config_path), exist_ok=True)
# Note: update command doesn't accept -i flag, SSH key must be in ssh-agent
# Start ssh-agent and add the key
agent_output = subprocess.check_output(["${pkgs.openssh}/bin/ssh-agent", "-s"], text=True)
for line in agent_output.splitlines():
if line.startswith("SSH_AUTH_SOCK="):
os.environ["SSH_AUTH_SOCK"] = line.split("=", 1)[1].split(";")[0]
elif line.startswith("SSH_AGENT_PID="):
os.environ["SSH_AGENT_PID"] = line.split("=", 1)[1].split(";")[0]
# Add the SSH key to the agent
subprocess.run(["${pkgs.openssh}/bin/ssh-add", ssh_conn.ssh_key], check=True)
##############
print("TEST: update with --build-host localhost --target-host localhost")
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."update-build-local-successful".text = "ok";
}
""")
# rsync the flake into the container
os.environ["PATH"] = f"{os.environ['PATH']}:${pkgs.openssh}/bin"
subprocess.run(
[
"${pkgs.rsync}/bin/rsync",
"-a",
"--delete",
"-e",
"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
f"{str(flake_dir)}/",
f"root@192.168.1.1:/flake",
],
check=True
)
# install the clan-cli package into the container's Nix store
subprocess.run(
[
"${pkgs.nix}/bin/nix",
"copy",
"--to",
"ssh://root@192.168.1.1",
"--no-check-sigs",
f"${self.packages.${pkgs.system}.clan-cli}",
"--extra-experimental-features", "nix-command flakes",
"--from", f"{os.environ["TMPDIR"]}/store"
],
check=True,
env={
**os.environ,
"NIX_SSHOPTS": "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no",
},
)
# Run ssh on the host to run the clan update command via --build-host localhost
subprocess.run([
"ssh",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
f"root@192.168.1.1",
"${self.packages.${pkgs.system}.clan-cli}/bin/clan",
"machines",
"update",
"--debug",
"--flake", "/flake",
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"--build-host", "localhost",
"--target-host", "localhost",
"test-update-machine",
], check=True)
# Verify the update was successful
machine.succeed("test -f /etc/update-build-local-successful")
##############
print("TEST: update with --target-host")
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."target-host-update-successful".text = "ok";
}
""")
# Run clan update command
subprocess.run([
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",
"--flake", flake_dir,
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"test-update-machine",
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
], check=True)
# Verify the update was successful
machine.succeed("test -f /etc/target-host-update-successful")
##############
print("TEST: update with --build-host")
# Update configuration again
with open(machine_config_path, "w") as f:
f.write("""
{
environment.etc."build-host-update-successful".text = "ok";
}
""")
# Run clan update command with --build-host
subprocess.run([
"${self.packages.${pkgs.system}.clan-cli-full}/bin/clan",
"machines",
"update",
"--debug",
"--flake", flake_dir,
"--host-key-check", "none",
"--upload-inputs", # Use local store instead of fetching from network
"--build-host", f"root@192.168.1.1:{ssh_conn.host_port}",
"test-update-machine",
"--target-host", f"root@192.168.1.1:{ssh_conn.host_port}",
], check=True)
# Verify the second update was successful
machine.succeed("test -f /etc/build-host-update-successful")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -1,17 +0,0 @@
diff --git a/src/main.rs b/src/main.rs
index 8baf5924a7db..1234567890ab 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1295,6 +1295,12 @@ won't take effect until you reboot the system.
for (mountpoint, current_filesystem) in current_filesystems {
// Use current version of systemctl binary before daemon is reexeced.
+
+ // Skip filesystem comparison if x-initrd.mount is present in options
+ if current_filesystem.options.contains("x-initrd.mount") {
+ continue;
+ }
+
let unit = path_to_unit_name(&current_system_bin, &mountpoint);
if let Some(new_filesystem) = new_filesystems.get(&mountpoint) {
if current_filesystem.fs_type != new_filesystem.fs_type

View File

@@ -1,3 +0,0 @@
{
# Initial empty configuration
}

View File

@@ -4,7 +4,7 @@ description = "Statically configure borgbackup with sane defaults."
!!! Danger "Deprecated"
Use [borgbackup](borgbackup.md) instead.
Don't use borgbackup-static through [inventory](../../concepts/inventory.md).
Don't use borgbackup-static through [inventory](../../guides/inventory.md).
This module implements the `borgbackup` backend and implements sane defaults
for backup management through `borgbackup` for members of the clan.

View File

@@ -112,124 +112,125 @@ in
'';
in
lib.mkIf (cfg.targets != { }) {
environment.systemPackages = [
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
${mountHook target}
echo "Creating backup '${target.name}'"
environment.systemPackages =
[
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
${mountHook target}
echo "Creating backup '${target.name}'"
${lib.optionalString (target.preBackupHook != null) ''
(
${target.preBackupHook}
)
''}
${lib.optionalString (target.preBackupHook != null) ''
(
${target.preBackupHook}
)
''}
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (builtins.attrValues config.clan.core.state)}
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (builtins.attrValues config.clan.core.state)}
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
'') (builtins.attrValues cfg.targets)}'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
(
${mountHook target}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
)
'') (builtins.attrValues cfg.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
if [[ "''${NAME:-}" == "" ]]; then
echo "No backup name given via NAME environment variable"
exit 1
fi
if [[ "''${FOLDERS:-}" == "" ]]; then
echo "No folders given via FOLDERS environment variable"
exit 1
fi
name=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$name::}
if command -v localbackup-mount-$name; then
localbackup-mount-$name
fi
if command -v localbackup-unmount-$name; then
trap "localbackup-unmount-$name" EXIT
fi
if [[ ! -d $backupname ]]; then
echo "No backup found $backupname"
exit 1
fi
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
for folder in "''${FOLDER[@]}"; do
mkdir -p "$folder"
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
]
++ (lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preMountHook != null) target.preMountHook}
${lib.optionalString (target.mountpoint != null) ''
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
'') (builtins.attrValues cfg.targets)}'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
(
${mountHook target}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
)
'') (builtins.attrValues cfg.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
if [[ "''${NAME:-}" == "" ]]; then
echo "No backup name given via NAME environment variable"
exit 1
fi
''}
${lib.optionalString (target.postMountHook != null) target.postMountHook}
''
) cfg.targets)
++ lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
${lib.optionalString (
target.mountpoint != null
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
''
) cfg.targets;
if [[ "''${FOLDERS:-}" == "" ]]; then
echo "No folders given via FOLDERS environment variable"
exit 1
fi
name=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$name::}
if command -v localbackup-mount-$name; then
localbackup-mount-$name
fi
if command -v localbackup-unmount-$name; then
trap "localbackup-unmount-$name" EXIT
fi
if [[ ! -d $backupname ]]; then
echo "No backup found $backupname"
exit 1
fi
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
for folder in "''${FOLDER[@]}"; do
mkdir -p "$folder"
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
]
++ (lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preMountHook != null) target.preMountHook}
${lib.optionalString (target.mountpoint != null) ''
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
fi
''}
${lib.optionalString (target.postMountHook != null) target.postMountHook}
''
) cfg.targets)
++ lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
${lib.optionalString (
target.mountpoint != null
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
''
) cfg.targets;
clan.core.backups.providers.localbackup = {
# TODO list needs to run locally or on the remote machine

View File

@@ -61,6 +61,7 @@ in
};
};
imports = [
../postgresql
(lib.mkRemovedOptionModule [
"clan"
"matrix-synapse"
@@ -105,56 +106,57 @@ in
};
};
clan.core.postgresql.enable = true;
clan.core.postgresql.users.matrix-synapse = { };
clan.core.postgresql.databases.matrix-synapse.create.options = {
clan.postgresql.users.matrix-synapse = { };
clan.postgresql.databases.matrix-synapse.create.options = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "matrix-synapse";
};
clan.core.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
clan.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
clan.core.vars.generators = {
"matrix-synapse" = {
files."synapse-registration_shared_secret" = { };
runtimeInputs = with pkgs; [
coreutils
pwgen
];
migrateFact = "matrix-synapse";
script = ''
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
'';
};
}
// lib.mapAttrs' (
name: user:
lib.nameValuePair "matrix-password-${user.name}" {
files."matrix-password-${user.name}" = { };
migrateFact = "matrix-password-${user.name}";
runtimeInputs = with pkgs; [ xkcdpass ];
script = ''
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
'';
clan.core.vars.generators =
{
"matrix-synapse" = {
files."synapse-registration_shared_secret" = { };
runtimeInputs = with pkgs; [
coreutils
pwgen
];
migrateFact = "matrix-synapse";
script = ''
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
'';
};
}
) cfg.users;
// lib.mapAttrs' (
name: user:
lib.nameValuePair "matrix-password-${user.name}" {
files."matrix-password-${user.name}" = { };
migrateFact = "matrix-password-${user.name}";
runtimeInputs = with pkgs; [ xkcdpass ];
script = ''
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
'';
}
) cfg.users;
systemd.services.matrix-synapse =
let
usersScript = ''
while ! ${pkgs.netcat}/bin/nc -z -v ::1 8008; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 1;
done
''
+ lib.concatMapStringsSep "\n" (user: ''
# only create user if it doesn't exist
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
'') (lib.attrValues cfg.users);
usersScript =
''
while ! ${pkgs.netcat}/bin/nc -z -v ::1 8008; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 1;
done
''
+ lib.concatMapStringsSep "\n" (user: ''
# only create user if it doesn't exist
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
'') (lib.attrValues cfg.users);
in
{
path = [ pkgs.curl ];

View File

@@ -38,6 +38,7 @@
recommendedOptimisation = lib.mkDefault true;
recommendedProxySettings = lib.mkDefault true;
recommendedTlsSettings = lib.mkDefault true;
recommendedZstdSettings = lib.mkDefault true;
# Nginx sends all the access logs to /var/log/nginx/access.log by default.
# instead of going to the journal!

View File

@@ -1,9 +1,224 @@
{ lib, ... }:
{
imports = [
(lib.mkRemovedOptionModule [
"clan"
"postgresql"
] "The postgresql module has been migrated to a clan core option. Use clan.core.postgresql instead")
];
pkgs,
lib,
config,
...
}:
let
createDatabaseState =
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
compression = lib.optionalString (lib.versionAtLeast config.services.postgresql.package.version "16") "--compress=zstd";
in
{
folders = [ folder ];
preBackupScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
mkdir -p "${folder}"
runuser -u postgres -- pg_dump ${compression} --dbname=${db.name} -Fc -c > "${current}.tmp"
mv "${current}.tmp" ${current}
'';
postRestoreScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
systemctl stop ${lib.concatStringsSep " " db.restore.stopOnRestore}
trap "systemctl start ${lib.concatStringsSep " " db.restore.stopOnRestore}" EXIT
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
'';
};
createDatabase = db: ''
CREATE DATABASE "${db.name}" ${
lib.concatStringsSep " " (
lib.mapAttrsToList (name: value: "${name} = '${value}'") db.create.options
)
}
'';
cfg = config.clan.postgresql;
userClauses = lib.mapAttrsToList (
_: user:
''$PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"' ''
) cfg.users;
databaseClauses = lib.mapAttrsToList (
name: db:
lib.optionalString db.create.enable ''$PSQL -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${name}'" | grep -q 1 || $PSQL -d postgres -c ${lib.escapeShellArg (createDatabase db)} ''
) cfg.databases;
in
{
options.clan.postgresql = {
# we are reimplemeting ensureDatabase and ensureUser options here to allow to create databases with options
databases = lib.mkOption {
description = "Databases to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "Database name.";
};
service = lib.mkOption {
type = lib.types.str;
default = name;
description = "Service name that we associate with the database.";
};
# set to false, in case the upstream module uses ensureDatabase option
create.enable = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Create the database if it does not exist.";
};
create.options = lib.mkOption {
description = "Options to pass to the CREATE DATABASE command.";
type = lib.types.lazyAttrsOf lib.types.str;
default = { };
example = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "foo";
};
};
restore.stopOnRestore = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = "List of systemd services to stop before restoring the database.";
};
};
}
)
);
};
users = lib.mkOption {
description = "Users to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options.name = lib.mkOption {
description = "User name";
type = lib.types.str;
default = name;
};
}
)
);
};
};
config = {
services.postgresql.settings = {
wal_level = "replica";
max_wal_senders = 3;
};
services.postgresql.enable = true;
# We are duplicating a bit the upstream module but allow to create databases with options
systemd.services.postgresql.postStart = ''
PSQL="psql --port=${builtins.toString config.services.postgresql.settings.port}"
while ! $PSQL -d postgres -c "" 2> /dev/null; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 0.1
done
${lib.concatStringsSep "\n" userClauses}
${lib.concatStringsSep "\n" databaseClauses}
'';
clan.core.state = lib.mapAttrs' (
_: db: lib.nameValuePair db.service (createDatabaseState db)
) config.clan.postgresql.databases;
environment.systemPackages = builtins.map (
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
in
pkgs.writeShellScriptBin "postgres-db-restore-command-${db.name}" ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
${lib.optionalString (db.restore.stopOnRestore != [ ]) ''
systemctl stop ${builtins.toString db.restore.stopOnRestore}
trap "systemctl start ${builtins.toString db.restore.stopOnRestore}" EXIT
''}
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
''
) (builtins.attrValues config.clan.postgresql.databases);
};
}

View File

@@ -12,7 +12,7 @@ After the system was installed/deployed the following command can be used to dis
clan vars get [machine_name] root-password/root-password
```
See also: [Vars](../../concepts/generators.md)
See also: [Vars](../../guides/vars-backend.md)
To regenerate the password run:
```

View File

@@ -18,12 +18,13 @@
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash = {
neededFor = "users";
}
// (lib.optionalAttrs (_class == "nixos") {
restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
});
files.password-hash =
{
neededFor = "users";
}
// (lib.optionalAttrs (_class == "nixos") {
restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
});
files.password = {
deploy = false;
};

View File

@@ -32,16 +32,17 @@ in
cfg.certificate.searchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys = [
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional cfg.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
hostKeys =
[
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional cfg.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
clan.core.vars.generators.openssh = {
@@ -61,8 +62,7 @@ in
hostNames = [
"localhost"
config.networking.hostName
]
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};

View File

@@ -1,27 +1,3 @@
---
description = "DEPRECATED: Statically configure syncthing peers through clan"
description = "Statically configure syncthing peers through clan"
---
# ⚠️ DEPRECATED
This module has been migrated to the new clanServices system.
Please use the new syncthing service instead:
```nix
{
services.syncthing = {
instances.default = {
roles.peer.machines = {
machine1 = { };
machine2 = { };
machine3 = {
excludeMachines = [ "machine4" ];
};
};
};
};
}
```
The new service provides the same functionality with better integration into clan's inventory system.

View File

@@ -4,8 +4,6 @@
pkgs,
...
}:
# DEPRECATED: This module has been migrated to clanServices/syncthing
# Please use the syncthing service instead: services.syncthing.instances.default.roles.peer.machines = { ... };
let
dir = config.clan.core.settings.directory;
machineVarDir = "${dir}/vars/per-machine/";
@@ -34,15 +32,14 @@ let
value = {
name = machine;
id = (lib.removeSuffix "\n" (builtins.readFile (syncthingPublicKeyPath machine)));
addresses = [
"dynamic"
]
++ (
if (lib.elem machine networkIpMachines) then
[ "tcp://[${(lib.removeSuffix "\n" (builtins.readFile (zerotierIpMachinePath machine)))}]:22000" ]
else
[ ]
);
addresses =
[ "dynamic" ]
++ (
if (lib.elem machine networkIpMachines) then
[ "tcp://[${(lib.removeSuffix "\n" (builtins.readFile (zerotierIpMachinePath machine)))}]:22000" ]
else
[ ]
);
};
}) syncthingPublicKeyMachines;
in

View File

@@ -16,7 +16,7 @@ After the system was installed/deployed the following command can be used to dis
clan vars get [machine_name] root-password/root-password
```
See also: [Vars](../../concepts/generators.md)
See also: [Vars](../../guides/vars-backend.md)
To regenerate the password run:
```

View File

@@ -10,6 +10,7 @@ in
{
imports = [
../postgresql
(lib.mkRemovedOptionModule [
"clan"
"vaultwarden"
@@ -56,17 +57,15 @@ in
config = {
clan.core.postgresql.enable = true;
clan.core.postgresql.users.vaultwarden = { };
clan.core.postgresql.databases.vaultwarden.create.options = {
clan.postgresql.users.vaultwarden = { };
clan.postgresql.databases.vaultwarden.create.options = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "vaultwarden";
};
clan.core.postgresql.databases.vaultwarden.restore.stopOnRestore = [ "vaultwarden" ];
clan.postgresql.databases.vaultwarden.restore.stopOnRestore = [ "vaultwarden" ];
services.nginx = {
enable = true;

View File

@@ -41,13 +41,25 @@
};
};
};
};
# We don't have a good way to specify dependencies between
# clanServices for now. When it get's implemtende, we should just
# use the ssh and users modules here.
imports = [
./ssh.nix
./root-password.nix
];
perInstance =
{ settings, ... }:
{
nixosModule =
{ ... }:
{
imports = [
# We don't have a good way to specify dependencies between
# clanServices for now. When it get's implemtende, we should just
# use the ssh and users modules here.
./ssh.nix
./root-password.nix
];
_module.args = { inherit settings; };
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
};
};
};
}

View File

@@ -1,55 +1,39 @@
# We don't have a way of specifying dependencies between clanServices for now.
# When it get's added this file should be removed and the users module used instead.
{
roles.default.perInstance =
{ ... }:
{
nixosModule =
{
config,
pkgs,
...
}:
{
config,
pkgs,
...
}:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash.neededFor = "users";
clan.core.vars.generators.root-password = {
files.password-hash.neededFor = "users";
files.password.deploy = false;
files.password.deploy = false;
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
prompts.password.display = {
group = "Root User";
label = "Password";
required = false;
helperText = ''
Your password will be encrypted and stored securely using the secret store you've configured.
'';
};
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "Leave empty to generate automatically";
script = ''
prompt_value="$(cat "$prompts"/password)"
if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/password
else
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
fi
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
'';
};
};
};
script = ''
prompt_value="$(cat "$prompts"/password)"
if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/password
else
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
fi
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
'';
};
}

View File

@@ -1,124 +1,115 @@
{
roles.default.perInstance =
{ settings, ... }:
{
nixosModule =
config,
pkgs,
lib,
settings,
...
}:
let
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
domains = stringSet settings.certificateSearchDomains;
in
{
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
settings.HostCertificate = lib.mkIf (
settings.certificateSearchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys =
[
{
config,
pkgs,
lib,
...
}:
let
stringSet = list: builtins.attrNames (builtins.groupBy lib.id list);
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.rsaHostKey.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
domains = stringSet settings.certificateSearchDomains;
clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
migrateFact = "openssh";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
'';
};
in
{
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues settings.allowedKeys;
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
'';
};
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
settings.HostCertificate = lib.mkIf (
settings.certificateSearchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys = [
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.rsaHostKey.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
clan.core.vars.generators.openssh = {
files."ssh.id_ed25519" = { };
files."ssh.id_ed25519.pub".secret = false;
migrateFact = "openssh";
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/ssh.id_ed25519
'';
};
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
]
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
clan.core.vars.generators.openssh-rsa = lib.mkIf settings.rsaHostKey.enable {
files."ssh.id_rsa" = { };
files."ssh.id_rsa.pub".secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.openssh
];
script = ''
ssh-keygen -t rsa -b 4096 -N "" -C "" -f "$out"/ssh.id_rsa
'';
};
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
files."ssh.id_ed25519-cert.pub".secret = false;
dependencies = [
"openssh"
"openssh-ca"
];
validation = {
name = config.clan.core.settings.machine.name;
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
};
runtimeInputs = [
pkgs.openssh
pkgs.jq
];
script = ''
ssh-keygen \
-s $in/openssh-ca/id_ed25519 \
-I ${config.clan.core.settings.machine.name} \
-h \
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
$in/openssh/ssh.id_ed25519.pub
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
'';
};
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
share = true;
files.id_ed25519.deploy = false;
files."id_ed25519.pub" = {
deploy = false;
secret = false;
};
runtimeInputs = [
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
'';
};
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
certAuthority = true;
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
};
};
clan.core.vars.generators.openssh-cert = lib.mkIf (settings.certificateSearchDomains != [ ]) {
files."ssh.id_ed25519-cert.pub".secret = false;
dependencies = [
"openssh"
"openssh-ca"
];
validation = {
name = config.clan.core.settings.machine.name;
domains = lib.genAttrs settings.certificateSearchDomains lib.id;
};
runtimeInputs = [
pkgs.openssh
pkgs.jq
];
script = ''
ssh-keygen \
-s $in/openssh-ca/id_ed25519 \
-I ${config.clan.core.settings.machine.name} \
-h \
-n ${lib.concatMapStringsSep "," (d: "${config.clan.core.settings.machine.name}.${d}") domains} \
$in/openssh/ssh.id_ed25519.pub
mv $in/openssh/ssh.id_ed25519-cert.pub "$out"/ssh.id_ed25519-cert.pub
'';
};
clan.core.vars.generators.openssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
share = true;
files.id_ed25519.deploy = false;
files."id_ed25519.pub" = {
deploy = false;
secret = false;
};
runtimeInputs = [
pkgs.openssh
];
script = ''
ssh-keygen -t ed25519 -N "" -C "" -f "$out"/id_ed25519
'';
};
programs.ssh.knownHosts.ssh-ca = lib.mkIf (settings.certificateSearchDomains != [ ]) {
certAuthority = true;
extraHostNames = builtins.map (domain: "*.${domain}") settings.certificateSearchDomains;
publicKey = config.clan.core.vars.generators.openssh-ca.files."id_ed25519.pub".value;
};
}

View File

@@ -1,59 +1,9 @@
## Usage
BorgBackup (short: Borg) gives you:
```nix
inventory.instances = {
borgbackup = {
module = {
name = "borgbackup";
input = "clan";
};
roles.client.machines."jon".settings = {
destinations."storagebox" = {
repo = "username@$hostname:/./borgbackup";
rsh = ''ssh -oPort=23 -i /run/secrets/vars/borgbackup/borgbackup.ssh'';
};
};
roles.server.machines = { };
};
};
```
The input should be named according to your flake input. Jon is configured as a
client machine with a destination pointing to a Hetzner Storage Box.
## Overview
This guide explains how to set up and manage
[BorgBackup](https://borgbackup.readthedocs.io/) for secure, efficient backups
in a clan network. BorgBackup provides:
- Space efficient storage of backups with deduplication
- Secure, authenticated encryption
- Compression: lz4, zstd, zlib, lzma or none
- Mountable backups with FUSE
- Space efficient storage of backups.
- Secure, authenticated encryption.
- Compression: lz4, zstd, zlib, lzma or none.
- Mountable backups with FUSE.
- Easy installation on multiple platforms: Linux, macOS, BSD, …
- Free software (BSD license).
- Backed by a large and active open-source community.
## Roles
### 1. Client
Clients are machines that create and send backups to various destinations. Each
client can have multiple backup destinations configured.
### 2. Server
Servers act as backup repositories, receiving and storing backups from client
machines. They can be dedicated backup servers within your clan network.
## Backup destinations
This service allows you to perform backups to multiple `destinations`.
Destinations can be:
- **Local**: Local disk storage
- **Server**: Your own borgbackup server (using the `server` role)
- **Third-party services**: Such as Hetzner's Storage Box
For a more comprehensive guide on backups look into the guide section.

View File

@@ -0,0 +1,29 @@
{
lib,
config,
settings,
...
}:
{
services.data-mesher.initNetwork =
let
# for a given machine, read it's public key and remove any new lines
readHostKey =
machine:
let
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
in
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
in
{
enable = true;
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
tld = settings.network.tld;
hostTTL = settings.network.hostTTL;
# admin and signer host public keys
signingKeys = builtins.map readHostKey (builtins.attrNames settings.bootstrapNodes);
};
}

View File

@@ -5,15 +5,31 @@ let
{
options = {
bootstrapNodes = lib.mkOption {
type = lib.types.nullOr (lib.types.listOf lib.types.str);
type = lib.types.nullOr (lib.types.attrsOf lib.types.str);
# the default bootstrap nodes are any machines with the admin or signers role
# we iterate through those machines, determining an IP address for them based on their VPN
# currently only supports zerotier
# default = builtins.foldl' (
# urls: name:
# let
# ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
# in
# if builtins.pathExists ipPath then
# let
# ip = builtins.readFile ipPath;
# in
# urls ++ [ "[${ip}]:${builtins.toString settings.network.port}" ]
# else
# urls
# ) [ ] (dmLib.machines config).bootstrap;
description = ''
A list of bootstrap nodes that act as an initial gateway when joining
the cluster.
'';
example = [
"192.168.1.1:7946"
"192.168.1.2:7946"
];
example = {
"node1" = "192.168.1.1:7946";
"node2" = "192.168.1.2:7946";
};
};
network = {
@@ -39,59 +55,6 @@ let
};
};
};
mkBootstrapNodes =
{
config,
lib,
roles,
settings,
}:
lib.mkDefault (
builtins.foldl' (
urls: name:
let
ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
in
if builtins.pathExists ipPath then
let
ip = builtins.readFile ipPath;
in
urls ++ [ "[${ip}]:${builtins.toString settings.network.port}" ]
else
urls
) [ ] (builtins.attrNames ((roles.admin.machines or { }) // (roles.signer.machines or { })))
);
mkDmService = dmSettings: config: {
enable = true;
openFirewall = true;
settings = {
log_level = "warn";
state_dir = "/var/lib/data-mesher";
# read network id from vars
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
host = {
names = [ config.networking.hostName ];
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
};
cluster = {
port = dmSettings.network.port;
join_interval = "30s";
push_pull_interval = "30s";
interface = dmSettings.network.interface;
bootstrap_nodes = dmSettings.bootstrapNodes;
};
http.port = 7331;
http.interface = "lo";
};
};
in
{
_class = "clan.service";
@@ -104,9 +67,11 @@ in
interface =
{ lib, ... }:
{
imports = [ sharedInterface ];
options = {
network = {
tld = lib.mkOption {
type = lib.types.str;
@@ -124,117 +89,54 @@ in
};
};
perInstance =
{ settings, roles, ... }:
{
extendSettings,
roles,
lib,
...
}:
{
nixosModule =
{ config, ... }:
let
settings = extendSettings {
bootstrapNodes = mkBootstrapNodes {
inherit
config
lib
roles
settings
;
};
};
in
{
imports = [ ./shared.nix ];
services.data-mesher = (mkDmService settings config) // {
initNetwork =
let
# for a given machine, read it's public key and remove any new lines
readHostKey =
machine:
let
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
in
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
in
{
enable = true;
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
tld = settings.network.tld;
hostTTL = settings.network.hostTTL;
# admin and signer host public keys
signingKeys = builtins.map readHostKey (
builtins.attrNames ((roles.admin.machines or { }) // (roles.signer.machines or { }))
);
};
};
};
nixosModule = {
imports = [
./admin.nix
./shared.nix
];
_module.args = { inherit settings roles; };
};
};
};
roles.signer = {
interface = sharedInterface;
interface =
{ ... }:
{
imports = [ sharedInterface ];
};
perInstance =
{ settings, roles, ... }:
{
extendSettings,
lib,
roles,
...
}:
{
nixosModule =
{ config, ... }:
let
settings = extendSettings {
bootstrapNodes = mkBootstrapNodes {
inherit
config
lib
roles
settings
;
};
};
in
{
imports = [ ./shared.nix ];
services.data-mesher = (mkDmService settings config);
};
nixosModule = {
imports = [
./signer.nix
./shared.nix
];
_module.args = { inherit settings roles; };
};
};
};
roles.peer = {
interface = sharedInterface;
interface =
{ ... }:
{
imports = [ sharedInterface ];
};
perInstance =
{ settings, roles, ... }:
{
extendSettings,
lib,
roles,
...
}:
{
nixosModule =
{ config, ... }:
let
settings = extendSettings {
bootstrapNodes = mkBootstrapNodes {
inherit
config
lib
roles
settings
;
};
};
in
{
imports = [ ./shared.nix ];
services.data-mesher = (mkDmService settings config);
};
nixosModule = {
imports = [
./peer.nix
./shared.nix
];
_module.args = { inherit settings roles; };
};
};
};
}

View File

@@ -1,9 +1,39 @@
{
config,
settings,
...
}:
{
services.data-mesher = {
enable = true;
openFirewall = true;
settings = {
log_level = "warn";
state_dir = "/var/lib/data-mesher";
# read network id from vars
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
host = {
names = [ config.networking.hostName ];
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
};
cluster = {
port = settings.network.port;
join_interval = "30s";
push_pull_interval = "30s";
interface = settings.network.interface;
bootstrap_nodes = (builtins.attrValues settings.bootstrapNodes);
};
http.port = 7331;
http.interface = "lo";
};
};
# Generate host key.
clan.core.vars.generators.data-mesher-host-key = {
files =

View File

@@ -16,11 +16,11 @@
instances = {
data-mesher =
let
bootstrapNodes = [
"[2001:db8:1::1]:7946" # admin
"[2001:db8:1::2]:7946" # peer
# "2001:db8:1::3:7946" #signer
];
bootstrapNodes = {
admin = "[2001:db8:1::1]:7946";
peer = "[2001:db8:1::2]:7946";
# signer = "2001:db8:1::3:7946";
};
in
{
roles.peer.machines.peer.settings = {

View File

@@ -1,86 +0,0 @@
A Dynamic-DNS (DDNS) service continuously keeps one or more DNS records in sync with the current public IP address of your machine.
In *clan* this service is backed by [qdm12/ddns-updater](https://github.com/qdm12/ddns-updater).
> Info
> ddns-updater itself is **heavily opinionated and version-specific**. Whenever you need the exhaustive list of flags or
> provider-specific fields refer to its *versioned* documentation **not** the GitHub README
---
# 1. Configuration model
Internally ddns-updater consumes a single file named `config.json`.
A minimal configuration for the registrar *Namecheap* looks like:
```json
{
"settings": [
{
"provider": "namecheap",
"domain": "sub.example.com",
"password": "e5322165c1d74692bfa6d807100c0310"
}
]
}
```
Another example for *Porkbun*:
```json
{
"settings": [
{
"provider": "porkbun",
"domain": "domain.com",
"api_key": "sk1_…",
"secret_api_key": "pk1_…",
"ip_version": "ipv4",
"ipv6_suffix": ""
}
]
}
```
When you write a `clan.nix` the **common** fields (`provider`, `domain`, `period`, …) are already exposed as typed
*Nix options*.
Registrar-specific or very new keys can be passed through an open attribute set called **extraSettings**.
---
# 2. Full Porkbun example
Manage three records `@`, `home` and `test` of the domain
`jon.blog` and refresh them every 15 minutes:
```nix title="clan.nix" hl_lines="10-11"
inventory.instances = {
dyndns = {
roles.default.machines."jon" = { };
roles.default.settings = {
period = 15; # minutes
settings = {
"all-jon-blog" = {
provider = "porkbun";
domain = "jon.blog";
# (1) tell the secret-manager which key we are going to store
secret_field_name = "secret_api_key";
# everything below is copied verbatim into config.json
extraSettings = {
host = "@,home,test"; # (2) comma-separated list of sub-domains
ip_version = "ipv4";
ipv6_suffix = "";
api_key = "pk1_4bb2b231275a02fdc23b7e6f3552s01S213S"; # (3) public safe to commit
};
};
};
};
};
};
```
1. `secret_field_name` tells the *vars-generator* to store the entered secret under the specified JSON field name in the configuration.
2. ddns-updater allows multiple hosts by separating them with a comma.
3. The `api_key` above is *public*; the corresponding **private key** is retrieved through `secret_field_name`.

View File

@@ -1,277 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/dyndns";
manifest.description = "A dynamic DNS service to update domain IPs";
manifest.categories = [ "Network" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
interface =
{ lib, ... }:
{
options = {
server = {
enable = lib.mkEnableOption "dyndns webserver";
domain = lib.mkOption {
type = lib.types.str;
description = "Domain to serve the webservice on";
};
port = lib.mkOption {
type = lib.types.int;
default = 54805;
description = "Port to listen on";
};
acmeEmail = lib.mkOption {
type = lib.types.str;
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
};
period = lib.mkOption {
type = lib.types.int;
default = 5;
description = "Domain update period in minutes";
};
settings = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ ... }:
{
options = {
provider = lib.mkOption {
example = "namecheap";
type = lib.types.str;
description = "The dyndns provider to use";
};
domain = lib.mkOption {
type = lib.types.str;
example = "example.com";
description = "The top level domain to update.";
};
secret_field_name = lib.mkOption {
example = "api_key";
type = lib.types.enum [
"password"
"token"
"api_key"
"secret_api_key"
];
default = "password";
description = "The field name for the secret";
};
extraSettings = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
description = ''
Extra settings for the provider.
Provider specific settings: https://github.com/qdm12/ddns-updater#configuration
'';
};
};
}
)
);
default = { };
description = "Configuration for which domains to update";
};
};
};
perInstance =
{ settings, ... }:
{
nixosModule =
{
config,
lib,
pkgs,
...
}:
let
name = "dyndns";
cfg = settings;
# We dedup secrets if they have the same provider + base domain
secret_id = opt: "${name}-${opt.provider}-${opt.domain}";
secret_path =
opt: config.clan.core.vars.generators."${secret_id opt}".files."${secret_id opt}".path;
# We check that a secret has not been set in extraSettings.
extraSettingsSafe =
opt:
if (builtins.hasAttr opt.secret_field_name opt.extraSettings) then
throw "Please do not set ${opt.secret_field_name} in extraSettings, it is automatically set by the dyndns module."
else
opt.extraSettings;
service_config = {
settings = builtins.catAttrs "value" (
builtins.attrValues (
lib.mapAttrs (_: opt: {
value =
(extraSettingsSafe opt)
// {
domain = opt.domain;
provider = opt.provider;
}
// {
"${opt.secret_field_name}" = secret_id opt;
};
}) cfg.settings
)
);
};
secret_generator = _: opt: {
name = secret_id opt;
value = {
share = true;
migrateFact = "${secret_id opt}";
prompts.${secret_id opt} = {
type = "hidden";
persist = true;
};
};
};
in
{
imports = lib.optional cfg.server.enable (
lib.modules.importApply ./nginx.nix {
inherit config;
inherit settings;
inherit lib;
}
);
clan.core.vars.generators = lib.mkIf (cfg.settings != { }) (
lib.mapAttrs' secret_generator cfg.settings
);
users.groups.${name} = lib.mkIf (cfg.settings != { }) { };
users.users.${name} = lib.mkIf (cfg.settings != { }) {
group = name;
isSystemUser = true;
description = "User for ${name} service";
home = "/var/lib/${name}";
createHome = true;
};
services.nginx = lib.mkIf cfg.server.enable {
virtualHosts = {
"${cfg.server.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://localhost:${toString cfg.server.port}";
};
};
};
};
systemd.services.${name} = lib.mkIf (cfg.settings != { }) {
path = [ ];
description = "Dynamic DNS updater";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
MYCONFIG = "${builtins.toJSON service_config}";
SERVER_ENABLED = if cfg.server.enable then "yes" else "no";
PERIOD = "${toString cfg.period}m";
LISTENING_ADDRESS = ":${toString cfg.server.port}";
GODEBUG = "netdns=go"; # We need to set this untill this has been merged. https://github.com/NixOS/nixpkgs/pull/432758
};
serviceConfig =
let
pyscript =
pkgs.writers.writePython3Bin "generate_secret_config.py"
{
libraries = [ ];
doCheck = false;
}
''
import json
from pathlib import Path
import os
cred_dir = Path(os.getenv("CREDENTIALS_DIRECTORY"))
config_str = os.getenv("MYCONFIG")
def get_credential(name):
secret_p = cred_dir / name
with open(secret_p, 'r') as f:
return f.read().strip()
config = json.loads(config_str)
print(f"Config: {config}")
for attrset in config["settings"]:
if "password" in attrset:
attrset['password'] = get_credential(attrset['password'])
elif "token" in attrset:
attrset['token'] = get_credential(attrset['token'])
elif "secret_api_key" in attrset:
attrset['secret_api_key'] = get_credential(attrset['secret_api_key'])
elif "api_key" in attrset:
attrset['api_key'] = get_credential(attrset['api_key'])
else:
raise ValueError(f"Missing secret field in {attrset}")
# create directory data if it does not exist
data_dir = Path('data')
data_dir.mkdir(mode=0o770, exist_ok=True)
# Create a temporary config file
# with appropriate permissions
tmp_config_path = data_dir / '.config.json'
tmp_config_path.touch(mode=0o660, exist_ok=False)
# Write the config with secrets back
with open(tmp_config_path, 'w') as f:
f.write(json.dumps(config, indent=4))
# Move config into place
config_path = data_dir / 'config.json'
tmp_config_path.rename(config_path)
# Set file permissions to read
# and write only by the user and group
for file in data_dir.iterdir():
file.chmod(0o660)
'';
in
{
ExecStartPre = lib.getExe pyscript;
ExecStart = lib.getExe pkgs.ddns-updater;
LoadCredential = lib.mapAttrsToList (_: opt: "${secret_id opt}:${secret_path opt}") cfg.settings;
User = name;
Group = name;
NoNewPrivileges = true;
PrivateTmp = true;
ProtectSystem = "strict";
ReadOnlyPaths = "/";
PrivateDevices = "yes";
ProtectKernelModules = "yes";
ProtectKernelTunables = "yes";
WorkingDirectory = "/var/lib/${name}";
ReadWritePaths = [
"/proc/self"
"/var/lib/${name}"
];
Restart = "always";
RestartSec = 60;
};
};
};
};
};
}

View File

@@ -1,19 +0,0 @@
{ lib, ... }:
let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules = {
dyndns = module;
};
perSystem =
{ ... }:
{
clan.nixosTests.dyndns = {
imports = [ ./tests/vm/default.nix ];
clan.modules."@clan/dyndns" = module;
};
};
}

View File

@@ -1,50 +0,0 @@
{
config,
lib,
settings,
...
}:
{
security.acme.acceptTerms = true;
security.acme.defaults.email = settings.server.acmeEmail;
networking.firewall.allowedTCPPorts = [
443
80
];
services.nginx = {
enable = true;
statusPage = lib.mkDefault true;
recommendedBrotliSettings = lib.mkDefault true;
recommendedGzipSettings = lib.mkDefault true;
recommendedOptimisation = lib.mkDefault true;
recommendedProxySettings = lib.mkDefault true;
recommendedTlsSettings = lib.mkDefault true;
# Nginx sends all the access logs to /var/log/nginx/access.log by default.
# instead of going to the journal!
commonHttpConfig = "access_log syslog:server=unix:/dev/log;";
resolver.addresses =
let
isIPv6 = addr: builtins.match ".*:.*:.*" addr != null;
escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr;
cloudflare = [
"1.1.1.1"
"2606:4700:4700::1111"
];
resolvers =
if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
in
map escapeIPv6 resolvers;
sslDhparam = config.security.dhparams.params.nginx.path;
};
security.dhparams = {
enable = true;
params.nginx = { };
};
}

View File

@@ -1,77 +0,0 @@
{
pkgs,
...
}:
{
name = "service-dyndns";
clan = {
directory = ./.;
inventory = {
machines.server = { };
instances = {
dyndns-test = {
module.name = "@clan/dyndns";
module.input = "self";
roles.default.machines."server".settings = {
server = {
enable = true;
domain = "test.example.com";
port = 54805;
acmeEmail = "test@example.com";
};
period = 1;
settings = {
"test.example.com" = {
provider = "namecheap";
domain = "example.com";
secret_field_name = "password";
extraSettings = {
host = "test";
server = "dynamicdns.park-your-domain.com";
};
};
};
};
};
};
};
};
nodes = {
server = {
# Disable firewall for testing
networking.firewall.enable = false;
# Mock ACME for testing (avoid real certificate requests)
security.acme.defaults.server = "https://localhost:14000/dir";
};
};
testScript = ''
start_all()
# Test that dyndns service starts (will fail without secrets, but that's expected)
server.wait_for_unit("multi-user.target")
# Test that nginx service is running
server.wait_for_unit("nginx.service")
# Test that nginx is listening on expected ports
server.wait_for_open_port(80)
server.wait_for_open_port(443)
# Test that the dyndns user was created
# server.succeed("getent passwd dyndns")
# server.succeed("getent group dyndns")
#
# Test that the home directory was created
server.succeed("test -d /var/lib/dyndns")
# Test that nginx configuration includes our domain
server.succeed("${pkgs.nginx}/bin/nginx -t")
print("All tests passed!")
'';
}

View File

@@ -1,9 +1,3 @@
# Example clan service. See https://docs.clan.lol/guides/services/community/
# for more details
# The test for this module in ./tests/vm/default.nix shows an example of how
# the service is used.
{ packages }:
{ ... }:
{
@@ -11,94 +5,30 @@
manifest.name = "clan-core/hello-word";
manifest.description = "This is a test";
# This service provides two roles: "morning" and "evening". Roles can be
# defined in this file directly (e.g. the "morning" role) or split up into a
# separate file (e.g. the "evening" role)
roles.morning = {
roles.peer = {
interface =
{ lib, ... }:
{
# Here we define the settings for this role. They will be accessible
# via `roles.morning.settings` in the role
options.greeting = lib.mkOption {
options.foo = lib.mkOption {
type = lib.types.str;
default = "Good morning";
description = "The greeting to use";
};
};
# Maps over all instances and produces one result per instance.
perInstance =
{
# Role settings for this machine/instance
settings,
# The name of this instance of the service
instanceName,
# The current machine
machine,
# All roles of this service, with their assigned machines
roles,
...
}:
{
# Analog to 'perSystem' of flake-parts.
# For every instance of this service we will add a nixosModule to a morning-machine
nixosModule =
{ config, ... }:
{
# Interaction examples what you could do here:
# - Get some settings of this machine
# settings.ipRanges
#
# - Get all evening names:
# allEveningNames = lib.attrNames roles.evening.machines
#
# - Get all roles of the machine:
# machine.roles
#
# - Get the settings that where applied to a specific evening machine:
# roles.evening.machines.peer1.settings
imports = [ ];
environment.etc.hello.text = "${settings.greeting} World!";
};
};
};
# The impnlementation of the evening role is in a separate file. We have kept
# the interface here, so we can see all settings of the service in one place,
# but you can also move it to the respective file
roles.evening = {
interface =
{ lib, ... }:
{
options.greeting = lib.mkOption {
type = lib.types.str;
default = "Good evening";
description = "The greeting to use";
# default = "";
description = "Some option";
};
};
};
imports = [ ./evening.nix ];
# This part gets applied to all machines, regardless of their role.
perMachine =
{ machine, ... }:
{
nixosModule =
{ pkgs, ... }:
{
environment.systemPackages = [
(pkgs.writeShellScriptBin "greet-world" ''
#!${pkgs.bash}/bin/bash
set -euo pipefail
cat /etc/hello
echo " I'm ${machine.name}"
'')
];
nixosModule = {
clan.core.vars.generators.hello = {
files.hello = {
secret = false;
};
script = ''
echo "Hello world from ${machine.name}" > $out/hello
'';
};
};
};
}

View File

@@ -1,12 +0,0 @@
{
roles.evening.perInstance =
{ settings, ... }:
{
nixosModule =
{ ... }:
{
imports = [ ];
environment.etc.hello.text = "${settings.greeting} World!";
};
};
}

View File

@@ -27,10 +27,20 @@ let
module.name = "hello-world";
module.input = "self";
roles.evening.machines.jon = { };
roles.peer.machines.jon = { };
};
};
};
# NOTE:
# If you wonder why 'self-zerotier-redux':
# A local module has prefix 'self', otherwise it is the name of the 'input'
# The rest is the name of the service as in the instance 'module.name';
#
# -> ${module.input}-${module.name}
# In this case it is 'self-zerotier-redux'
# This is usually only used internally, but we can use it to test the evaluation of service module in isolation
# evaluatedService =
# testFlake.clanInternals.inventoryClass.distributedServices.importedModulesEvaluated.self-zerotier-redux.config;
in
{
test_simple = {

View File

@@ -5,35 +5,22 @@
directory = ./.;
inventory = {
machines.peer1 = { };
machines.peer2 = { };
instances."test" = {
module.name = "hello-service";
module.input = "self";
# Assign the roles to the two machines
roles.morning.machines.peer1 = { };
roles.evening.machines.peer2 = {
# Set roles settings for the peers, where we want to differ from
# the role defaults
settings = {
greeting = "Good night";
};
};
roles.peer.machines.peer1 = { };
};
};
};
testScript =
{ ... }:
{ nodes, ... }:
''
start_all()
value = peer1.succeed("greet-world")
assert value.strip() == "Good morning World! I'm peer1", value
value = peer2.succeed("greet-world")
assert value.strip() == "Good night World! I'm peer2", value
# peer1 should have the 'hello' file
value = peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.hello.files.hello.path}")
assert value.strip() == "Hello world from peer1", value
'';
}

View File

@@ -1,47 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/internet";
manifest.description = "direct access (or via ssh jumphost) to machines";
manifest.categories = [
"System"
"Network"
];
roles.default = {
interface =
{ lib, ... }:
{
options = {
host = lib.mkOption {
type = lib.types.str;
description = ''
ip address or hostname (domain) of the machine
'';
};
jumphosts = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
optional list of jumphosts to use to connect to the machine
'';
};
};
};
perInstance =
{
roles,
lib,
settings,
...
}:
{
exports.networking = {
# TODO add user space network support to clan-cli
peers = lib.mapAttrs (_name: machine: {
host.plain = machine.settings.host;
SSHOptions = map (_x: "-J x") machine.settings.jumphosts;
}) roles.default.machines;
};
};
};
}

View File

@@ -1,9 +0,0 @@
{ lib, ... }:
let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules = {
internet = module;
};
}

View File

@@ -1,35 +0,0 @@
## Features
- Creates incremental snapshots using rsnapshot
- Supports multiple backup targets
- Mount/unmount hooks for external storage
- Pre/post backup hooks for custom scripts
- Configurable snapshot retention
- Automatic state folder detection
## Usage
Enable the localbackup service and configure backup targets:
```nix
instances = {
localbackup = {
module.name = "@clan/localbackup";
module.input = "self";
roles.default.machines."machine".settings = {
targets.external= {
directory = "/mnt/backup";
mountpoint = "/mnt/backup";
};
};
};
};
```
## Commands
The service provides these commands:
- `localbackup-create`: Create a new backup
- `localbackup-list`: List available backups
- `localbackup-restore`: Restore from backup (requires NAME and FOLDERS environment variables)

View File

@@ -1,267 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "localbackup";
manifest.description = "Automatically backups current machine to local directory.";
manifest.categories = [ "System" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
interface =
{ lib, ... }:
{
options = {
targets = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
default = name;
description = "the name of the backup job";
};
directory = lib.mkOption {
type = lib.types.str;
description = "the directory to backup";
};
mountpoint = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "mountpoint of the directory to backup. If set, the directory will be mounted before the backup and unmounted afterwards";
};
preMountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the directory is mounted";
};
postMountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the directory is mounted";
};
preUnmountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the directory is unmounted";
};
postUnmountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the directory is unmounted";
};
preBackupHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the backup";
};
postBackupHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the backup";
};
};
}
)
);
# default = { };
description = "List of directories where backups are stored";
};
snapshots = lib.mkOption {
type = lib.types.int;
default = 20;
description = "Number of snapshots to keep";
};
};
};
perInstance =
{
settings,
...
}:
{
nixosModule =
{
config,
lib,
pkgs,
...
}:
let
mountHook = target: ''
if [[ -x /run/current-system/sw/bin/localbackup-mount-${target.name} ]]; then
/run/current-system/sw/bin/localbackup-mount-${target.name}
fi
if [[ -x /run/current-system/sw/bin/localbackup-unmount-${target.name} ]]; then
trap "/run/current-system/sw/bin/localbackup-unmount-${target.name}" EXIT
fi
'';
uniqueFolders = lib.unique (
lib.flatten (lib.mapAttrsToList (_name: state: state.folders) config.clan.core.state)
);
rsnapshotConfig = target: ''
config_version 1.2
snapshot_root ${target.directory}
sync_first 1
cmd_cp ${pkgs.coreutils}/bin/cp
cmd_rm ${pkgs.coreutils}/bin/rm
cmd_rsync ${pkgs.rsync}/bin/rsync
cmd_ssh ${pkgs.openssh}/bin/ssh
cmd_logger ${pkgs.inetutils}/bin/logger
cmd_du ${pkgs.coreutils}/bin/du
cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff
${lib.optionalString (target.postBackupHook != null) ''
cmd_postexec ${pkgs.writeShellScript "postexec.sh" ''
set -efu -o pipefail
${target.postBackupHook}
''}
''}
retain snapshot ${builtins.toString settings.snapshots}
${lib.concatMapStringsSep "\n" (folder: ''
backup ${folder} ${config.networking.hostName}/
'') uniqueFolders}
'';
in
{
environment.systemPackages = [
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
${mountHook target}
echo "Creating backup '${target.name}'"
${lib.optionalString (target.preBackupHook != null) ''
(
${target.preBackupHook}
)
''}
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (builtins.attrValues config.clan.core.state)}
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
'') (builtins.attrValues settings.targets)}'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
(
${mountHook target}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
)
'') (builtins.attrValues settings.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
if [[ "''${NAME:-}" == "" ]]; then
echo "No backup name given via NAME environment variable"
exit 1
fi
if [[ "''${FOLDERS:-}" == "" ]]; then
echo "No folders given via FOLDERS environment variable"
exit 1
fi
name=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$name::}
if command -v localbackup-mount-$name; then
localbackup-mount-$name
fi
if command -v localbackup-unmount-$name; then
trap "localbackup-unmount-$name" EXIT
fi
if [[ ! -d $backupname ]]; then
echo "No backup found $backupname"
exit 1
fi
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
for folder in "''${FOLDER[@]}"; do
mkdir -p "$folder"
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
]
++ (lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preMountHook != null) target.preMountHook}
${lib.optionalString (target.mountpoint != null) ''
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
fi
''}
${lib.optionalString (target.postMountHook != null) target.postMountHook}
''
) settings.targets)
++ lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
${lib.optionalString (
target.mountpoint != null
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
''
) settings.targets;
clan.core.backups.providers.localbackup = {
# TODO list needs to run locally or on the remote machine
list = "localbackup-list";
create = "localbackup-create";
restore = "localbackup-restore";
};
};
};
};
}

View File

@@ -1,16 +0,0 @@
{ lib, ... }:
let
module = lib.modules.importApply ./default.nix { };
in
{
clan.modules.localbackup = module;
perSystem =
{ ... }:
{
clan.nixosTests.localbackup = {
imports = [ ./tests/vm/default.nix ];
clan.modules."@clan/localbackup" = module;
};
};
}

View File

@@ -1,62 +0,0 @@
{ ... }:
{
name = "service-localbackup";
clan = {
directory = ./.;
test.useContainers = true;
inventory = {
machines.machine = { };
instances = {
localbackup = {
module.name = "@clan/localbackup";
module.input = "self";
roles.default.machines."machine".settings = {
targets.hdd = {
directory = "/mnt/external-disk";
preMountHook = ''
touch /run/mount-external-disk
'';
postUnmountHook = ''
touch /run/unmount-external-disk
'';
};
};
};
};
};
};
nodes.machine = {
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
};
testScript = ''
import json
start_all()
machine.systemctl("start network-online.target")
machine.wait_for_unit("network-online.target")
# dummy data
machine.succeed("mkdir -p /var/test-backups")
machine.succeed("echo testing > /var/test-backups/somefile")
# create
machine.succeed("localbackup-create >&2")
machine.wait_until_succeeds("! systemctl is-active localbackup-job-serverone >&2")
# list
snapshot_list = machine.succeed("localbackup-list").strip()
assert json.loads(snapshot_list)[0]["name"].strip() == "hdd::/mnt/external-disk/snapshot.0"
# borgbackup restore
machine.succeed("rm -f /var/test-backups/somefile")
machine.succeed("NAME=/mnt/external-disk/snapshot.0 FOLDERS=/var/test-backups /run/current-system/sw/bin/localbackup-restore >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
'';
}

View File

@@ -184,24 +184,24 @@
settings.certificate.searchDomains != [ ]
) config.clan.core.vars.generators.openssh-cert.files."ssh.id_ed25519-cert.pub".path;
hostKeys = [
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
hostKeys =
[
{
path = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519".path;
type = "ed25519";
}
]
++ lib.optional settings.hostKeys.rsa.enable {
path = config.clan.core.vars.generators.openssh-rsa.files."ssh.id_rsa".path;
type = "rsa";
};
};
programs.ssh.knownHosts.clan-sshd-self-ed25519 = {
hostNames = [
"localhost"
config.networking.hostName
]
++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
] ++ (lib.optional (config.networking.domain != null) config.networking.fqdn);
publicKey = config.clan.core.vars.generators.openssh.files."ssh.id_ed25519.pub".value;
};
};

View File

@@ -1,20 +0,0 @@
## Usage
```nix
{
instances.syncthing = {
roles.peer.tags.all = { };
roles.peer.settings.folders = {
documents = {
path = "~/syncthing/documents";
};
};
};
}
```
Now the folder `~/syncthing/documents` will be shared with all your machines.
## Documentation
Extensive documentation is available on the [Syncthing](https://docs.syncthing.net/) website.

View File

@@ -1,243 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/syncthing";
manifest.description = "Syncthing is a continuous file synchronization program with automatic peer discovery";
manifest.categories = [
"Utility"
"System"
"Network"
];
manifest.readme = builtins.readFile ./README.md;
roles.peer = {
interface =
{ lib, ... }:
{
options.openDefaultPorts = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to open the default syncthing ports in the firewall.
'';
};
options.folders = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule {
options = {
path = lib.mkOption {
type = lib.types.str;
description = "Path to the folder to sync";
};
devices = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = "List of device names to share this folder with. Empty list means all peers and extraDevices.";
};
ignorePerms = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Ignore permission changes";
};
rescanIntervalS = lib.mkOption {
type = lib.types.int;
default = 3600;
description = "Rescan interval in seconds";
};
type = lib.mkOption {
type = lib.types.enum [
"sendreceive"
"sendonly"
"receiveonly"
];
default = "sendreceive";
description = "Folder type";
};
versioning = lib.mkOption {
type = lib.types.nullOr (
lib.types.submodule {
options = {
type = lib.mkOption {
type = lib.types.enum [
"external"
"simple"
"staggered"
"trashcan"
];
description = "Versioning type";
};
params = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
description = "Versioning parameters";
};
};
}
);
default = null;
description = "Versioning configuration";
};
};
}
);
default = { };
description = "Folders to synchronize between all peers";
};
options.extraDevices = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule {
options = {
id = lib.mkOption {
type = lib.types.str;
description = "Device ID of the external syncthing device";
example = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2";
};
addresses = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ "dynamic" ];
description = "List of addresses for the device";
example = [
"dynamic"
"tcp://192.168.1.100:22000"
];
};
name = lib.mkOption {
type = lib.types.str;
default = "";
description = "Human readable name for the device";
};
};
}
);
default = { };
description = "External syncthing devices not managed by clan (e.g., mobile phones)";
example = lib.literalExpression ''
{
phone = {
id = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2";
name = "My Phone";
addresses = [ "dynamic" ];
};
}
'';
};
};
perInstance =
{
settings,
roles,
...
}:
{
nixosModule =
{
config,
lib,
...
}:
let
allPeerMachines = lib.attrNames roles.peer.machines;
readMachineVar =
machine: varPath: default:
let
fullPath = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/${varPath}";
in
if builtins.pathExists fullPath then
lib.removeSuffix "\n" (builtins.readFile fullPath)
else
default;
peerDevices = lib.listToAttrs (
lib.forEach allPeerMachines (machine: {
name = machine;
value = {
name = machine;
id = readMachineVar machine "syncthing/id/value" "";
addresses = [
"dynamic"
]
++
lib.optional (readMachineVar machine "zerotier/zerotier-ip/value" null != null)
"tcp://[${readMachineVar machine "zerotier/zerotier-ip/value" ""}]:22000";
};
})
);
extraDevicesConfig = lib.mapAttrs (deviceName: deviceConfig: {
inherit (deviceConfig) id addresses;
name = if deviceConfig.name != "" then deviceConfig.name else deviceName;
}) settings.extraDevices;
allDevices = peerDevices // extraDevicesConfig;
validDevices = lib.filterAttrs (_: device: device.id != "") allDevices;
syncthingFolders = lib.mapAttrs (
_folderName: folderConfig:
let
targetDevices =
if folderConfig.devices == [ ] then lib.attrNames validDevices else folderConfig.devices;
in
folderConfig
// {
devices = targetDevices;
}
) settings.folders;
in
{
imports = [
./vars.nix
];
config = lib.mkMerge [
{
services.syncthing = {
enable = true;
configDir = "/var/lib/syncthing";
group = "syncthing";
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null;
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null;
settings = {
devices = validDevices;
folders = syncthingFolders;
};
};
}
# Conditionally open firewall ports
(lib.mkIf settings.openDefaultPorts {
services.syncthing.openDefaultPorts = true;
# Syncthing ports: 8384 for remote access to GUI
# 22000 TCP and/or UDP for sync traffic
# 21027/UDP for discovery
# source: https://docs.syncthing.net/users/firewall.html
networking.firewall.interfaces."zt+".allowedTCPPorts = [
8384
22000
];
networking.firewall.interfaces."zt+".allowedUDPPorts = [
22000
21027
];
})
];
};
};
};
perMachine = _: {
nixosModule =
{ lib, ... }:
{
# Activates inotify compatibility on syncthing
# use mkOverride 900 here as it otherwise would collide with the default of the
# upstream nixos xserver.nix
boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkOverride 900 524288;
};
};
}

View File

@@ -1,54 +0,0 @@
{
self,
lib,
inputs,
...
}:
let
module = lib.modules.importApply ./default.nix {
inherit (self) packages;
};
in
{
clan.modules = {
syncthing = module;
};
perSystem =
let
unit-test-module = (
self.clanLib.test.flakeModules.makeEvalChecks {
inherit module;
inherit inputs;
fileset = lib.fileset.unions [
# The zerotier service being tested
../../clanServices/syncthing
# Required modules
../../nixosModules/clanCore
# Dependencies like clan-cli
../../pkgs/clan-cli
];
testName = "syncthing";
tests = ./tests/eval-tests.nix;
testArgs = { };
}
);
in
{ ... }:
{
imports = [
unit-test-module
];
/**
1. Prepare the test vars
nix run .#generate-test-vars -- clanServices/syncthing/tests/vm syncthing-service
2. To run the test
nix build .#checks.x86_64-linux.syncthing-service
*/
clan.nixosTests.syncthing-service = {
imports = [ ./tests/vm/default.nix ];
clan.modules.syncthing-service = module;
};
};
}

View File

@@ -1,62 +0,0 @@
{
module,
clanLib,
lib,
...
}:
let
testFlake =
(clanLib.clan {
self = { };
directory = ./vm;
machines.machine1 = {
nixpkgs.hostPlatform = "x86_64-linux";
};
machines.machine2 = {
nixpkgs.hostPlatform = "x86_64-linux";
};
machines.machine3 = {
nixpkgs.hostPlatform = "x86_64-linux";
};
machines.machine4 = {
nixpkgs.hostPlatform = "x86_64-linux";
};
modules.syncthing = module;
inventory.instances = {
default = {
module.name = "syncthing";
module.input = "self";
roles.peer.tags.all = { };
roles.peer.settings.extraDevices.phone = {
id = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2";
};
};
};
}).config;
in
{
test_machine1_peers = {
expr = {
devices = lib.attrNames testFlake.nixosConfigurations.machine1.config.services.syncthing.settings.devices;
machine4_ID =
testFlake.nixosConfigurations.machine1.config.services.syncthing.settings.devices.machine1.id;
externalPhoneId =
testFlake.nixosConfigurations.machine1.config.services.syncthing.settings.devices.phone.id;
};
expected = {
devices = [
"machine1"
"machine2"
"machine3"
"machine4"
"phone"
];
machine4_ID = "LJOGYGS-RQPWIHV-HD4B3GK-JZPVPK6-VI3IAY5-CWQWIXK-NJSQMFH-KXHOHA4";
externalPhoneId = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2";
};
};
}

View File

@@ -1,95 +0,0 @@
{
name = "service-syncthing-service";
clan = {
directory = ./.;
test.useContainers = true;
inventory = {
machines.machine1 = { };
machines.machine2 = { };
machines.machine3 = { };
machines.machine4 = { };
instances.default = {
module.name = "syncthing-service";
module.input = "self";
roles.peer.tags.all = { };
roles.peer.settings.folders = {
documents = {
path = "/var/lib/syncthing/documents";
type = "sendreceive";
};
partly_shared = {
devices = [
"machine1"
"machine4"
];
path = "~/music";
type = "sendreceive";
};
};
};
instances.small = {
module.name = "syncthing-service";
module.input = "self";
roles.peer.machines = {
machine3 = { };
machine4 = { };
};
roles.peer.settings.folders = {
pictures = {
path = "~/pictures";
type = "sendreceive";
};
};
};
};
};
testScript =
{ ... }:
''
start_all()
machine1.wait_for_unit("syncthing.service")
machine2.wait_for_unit("syncthing.service")
machine3.wait_for_unit("syncthing.service")
machine4.wait_for_unit("syncthing.service")
machine1.wait_for_open_port(8384)
machine2.wait_for_open_port(8384)
machine3.wait_for_open_port(8384)
machine4.wait_for_open_port(8384)
machine1.wait_for_open_port(22000)
machine2.wait_for_open_port(22000)
machine3.wait_for_open_port(22000)
machine4.wait_for_open_port(22000)
# Check that the correct folders are synchronized
# documents - all
machine1.wait_for_file("/var/lib/syncthing/documents")
machine2.wait_for_file("/var/lib/syncthing/documents")
machine3.wait_for_file("/var/lib/syncthing/documents")
machine4.wait_for_file("/var/lib/syncthing/documents")
# music - machine 1 & 4
machine1.wait_for_file("/var/lib/syncthing/music")
machine4.wait_for_file("/var/lib/syncthing/music")
# pictures - machine 3 & 4
machine3.wait_for_file("/var/lib/syncthing/pictures")
machine4.wait_for_file("/var/lib/syncthing/pictures")
machine1.succeed("echo document > /var/lib/syncthing/documents/document")
machine1.succeed("echo music > /var/lib/syncthing/music/music")
machine3.succeed("echo picture > /var/lib/syncthing/pictures/picture")
machine2.wait_for_file("/var/lib/syncthing/documents/document", 20)
machine3.wait_for_file("/var/lib/syncthing/documents/document", 20)
machine4.wait_for_file("/var/lib/syncthing/documents/document", 20)
machine4.wait_for_file("/var/lib/syncthing/music/music", 20)
machine4.wait_for_file("/var/lib/syncthing/pictures/picture", 20)
'';
}

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1numxr6m52fxrm9a7sdw4vdpkp463mm8qtuf5d0p0jde04wydfgtscwdx78",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1aqng9vmlgth5aucu5ty2wa0kk9tvk7erj4s07hq03s6emu72fgxsqkrqql",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1kul02mg50nxccsl38nvma0enrgx454wq0qdefllj4l0adqkllvls5wuhfr",
"type": "age"
}
]

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age1kqgx4elusxx4u8409gml5z6tvrsayqsphewsl93mtqn7pl2p5dwq9lujpj",
"type": "age"
}
]

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:XqOUvbQOkOUrxQAMrUSRGfOl2eYjGZGI5st1iZl9Re8ymQa0LtokGzknfSiSucwfP50YTo7NoQsNCRsq/nS0RWbby0TQceqSVSw=,iv:GT3UrKdanFy4aMRPTwuOIyvP7pT6MXGRIstgKZmvG/4=,tag:Bg3ZzCPpw3KHXwy2Hazqrg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmWWJuTjBCaUhUeXhSRmZS\nVW9uQ09yOXVJVFFybzJsdmVmaEpOZVdEVVJNCmxxcUU3eG44cTlrY1lnVUFjdlZK\nOXNvR2pDMVJtZXFCbjJlTW1xVGoyTlUKLS0tIDlkMS9FNVJuZGZyODJzZzVRNzJQ\nNWpuaUxpMnRaUWt5bmw1TjBJV0dUOHcKmuHPHwzTUtkl7eZjZ4422C8cxGUnnzg1\nBPwAewDXEgq+clTXpU3UWjSvUNpqIkh9GDRE+qYfYWa7m36TrzACig==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:20:56Z",
"mac": "ENC[AES256_GCM,data:INCJWY2hsAwF049zFpYUTccJvczfgrPQwu+YUt8s/vfdrAk+tmLqCVFh9idG97l/OIEVPLv1gFE3SlcC+kKCNQV4SAyZA62CTeeNdgSSqDDipjrFn4fr1L8ZenCn56/giW0GIB2bBCa5WUYaHtGDoG8f6HSqNIhjnY9/qmDLUWQ=,iv:Utda22592sXOEEKFRSgfP+yLgi6FQGeEFiT61ZiKcws=,tag:UWyEnSNt0ztxTh0FGRzTtA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:bDwReDwUe9t0lXtTRU5r6WY3OvV1qOvkkByG2PbjXtArF4w6y3X/i6SYCnoys3wMvAY32+uH13ETXDS7LR/6W4/+pCAGAb3ATqQ=,iv:84dC7UP6QkHVkLwSvmb/NjXCA9OJ+of49UfitU0Cuoo=,tag:oO15dzpH7BKjawPGI1+F0Q==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBVXpNTkhTU3A1OU5HZm9p\nb2t5YTIxZWU4Z3hZR0ljTUNRT2hvQXEwNjJRCjh4SXY1NWhPVFFTMWswaWo2cFJB\ndUx4V1RiRFQ1UkNQU24zaGlSV3ZYeE0KLS0tIGJ5ak9RS2JnNk0vU3BaMEZLTFI0\nZit5RDlZbWE1QzY0RHQzcU5pZy84TUkK3/HHFBGfA9EpU+WDrlM9w5rbmgOb7ZAi\nVQiO08PVGTOvsEDed5A2oIXImRopcuBATzKoi4DNXpYlpSI/scYuAA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:08Z",
"mac": "ENC[AES256_GCM,data:qdNz02bNRHaWhTLxHqgvMRokpXZHvijyoP2c+KXMQxWP/7884SWQ4xoPSiUeLOPatLXpjXWQ/OBiDqQAzLkbC0/ujJH+BJQtV75xl6ytOjLbYw+hwatAR9C5GxiuFgKjLSsdaqzRlXJsULbf56edXeT+U/u5G8542WjF4AUat00=,iv:FYQRJQaRR0aPFyHyl0QCVPpjTXePXiZ2LjccPaybWh4=,tag:W/xkIgTUoHiTX9azluinAQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:2B9BeSPThkGX4sBAtm9c5Uc8vz5q+hSs8y0ibQ520s2EOYNWEEwP0e+9CBUmU1n0TKR4FP3HMHA0N7LKHBEXEb513I5lNHWgREc=,iv:qkmur96UjZ1BoWgvvihWk/7VRDg4snMeDA0s1MWdLb4=,tag:3djouRryDjcOFPv9Z8nLvg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxd2wxQWQwSHl5R0ZlRkxX\nWXBuck04TStoTHRYMDJaQkNRcDQvZFV6U0U0ClROOVpiVXhCVTdzeXpVQlRTYWRP\naWE2R0VtaGwvUk9UcGRWbEZtQlh5ckkKLS0tIDNjdm40UCthbDBwSE9ZYXBwVC9h\nRENrMFV2a21QRjBBYWJ4bFZ3K2NxWDAKr9hgDOuG4lR6dChQCvw/VOQdRNF0Mj1k\nzQRfQaC8nfZGGOJtU09zv5+ZPYJdheRuz91M18qY5uA5qrch5Yrvww==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:21Z",
"mac": "ENC[AES256_GCM,data:F6Ombz312QLl0BAkDbhN37KQ8gxMoukRqz8XkqANcdm8HyqfWL/V5Hxdt5Ve3HW4KzLO1lxb9M0/rQbtCj1hKMSnfM4XjP1R4ClLkU0knxeuLwbOtc4nN1qkaIs5fPcR0G9+gv+FnhZqDzosrhogDSWAayqiArwRWscMY4l716w=,iv:KYlcVQQGcWPIsL81VmTRmEBmC6PPT71i3ywcEbSfc5k=,tag:ogDnTv1KtXVtBDvhkjV1pw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,15 +0,0 @@
{
"data": "ENC[AES256_GCM,data:chE6ezkSGLHwvfqrUPiTLEoB63NhzBJgHQnGLe/0mwuR9iST3SpthUlKv7K8cezOvq7VVqJUoOGQSbqtuA04NUvwShVCBbqPjeA=,iv:50IV6ggQVnQpUpU+jfBB2z5/BZYs3lktGMfsWsCtJUs=,tag:BjtV31lmGOT5FposJA91WA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBZSVAzVXcrMWdUQmNCNHo4\neWw5NnJjYkdHbkE1dmJPcDlNb1dpT3kxbFN3Cm1HWVFxblgrNkpaZmtOVHBpdEVF\nWXRMamdSUGxkSk5ic3p3cEZrNG5QZEkKLS0tIDdYOGZRclREL0o1bWZmVTZoMUlu\nQzJPZFlUdFE4NkE0di94QVRzRXgwb1EKkqzL7yOdALLd8I2GnKPG3d61XTbNMs4+\n52M+o+wA8h+mnImbyVOtU9D6AkxvbKywZLmNRukkPqnkLqu5IXoSMQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:34Z",
"mac": "ENC[AES256_GCM,data:DI61cHhur+0q+gGtXHA6uFyZyKY/hCTvo2MLhhWPCSUWzYJ+j37fHfH1KUZcol+5prhnUIRrC4AoVt+0MeoXrNj+3SyptTVo1MgqNBayQkBMxKYp++SbqlXnlkLD+XOohpw6+f67rGNecjNc/OwCcfXu7PZmFhAFkwC39hUm7l0=,iv:lyinQFzoXo37Zs1QtbM1Jla+KRSMSUcph7bIR6/X1nw=,tag:nFfq7KtaTwZkQ9joCUOE2w==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:H+qlkDBtl4xYPz7X56gamUTQfuDE5+m/BkDPK9963A9F,iv:paPRFOnxW8aSt/NYg4afp+M+8svt0j4G6mZaJms/c7o=,tag:9uxvemHUr6Jm0MzvSf9cxQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1numxr6m52fxrm9a7sdw4vdpkp463mm8qtuf5d0p0jde04wydfgtscwdx78",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBM1ZUaEwwOGVFSVF0dlRX\nQzdMRjJFZU5kSTNDTzVrZ1JzNHdROXpGNlI0CkZXQ0JkalloT2xNNEh5ZG5NT2Vn\nVlFKZTV6aTYwNmF2bVpHM01YdUtLeGMKLS0tIDRCVDZrNk5UdkVOODdnZFptbDlZ\nVnNNMGc4U0FvOHlsU2dwTE03TUtyL1kKoikATM2w95ca39e7K9mOezI9z4hSgUAA\nUiq4GH9Sn4jtuew2FXK2ZqedzgSQuSOob8iaYBUMGdWL7bfn2gZsHQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBNMUQxZmxrZDR0OU0vd2tl\ndjlCNEFSeTVFRFRxa01xNXhRTDdFRWJtQkhFCitPV2hDNG9uWXE0a0gyK3NuSnc3\nblBKVjNkMzkyWHVKMzBKV1NzdFVsbXMKLS0tIDdkMTVrSk9pcjhlWkVISWNoQVU4\nTm1sQVlDbUVObFIrRkNObG5nMUdWVE0KWwIpPYL8HwcbUDUS6TG4DyJgAz+xz9lA\nLLRYARWF0zNJlOTHAIEksT14YSU3dplMxunwas2mLBjfYH0DIg2Org==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:20:56Z",
"mac": "ENC[AES256_GCM,data:vN/zqTJO+szaDV1aJc/ZYAbe0ISc7dJvQg8prjVkYIZA+aAh5Hk2Kec6iz+gH23CqapYcsEYJ/qd24eRrSsxPSJuvYWJAgyrPasNXNuGNrsVnkvTyClTUGiBbhoac6yHu66BHVpH15BJUgeSmO7iVD7HD+KPy0p66agJwWQEcn0=,iv:GLRGWh131n5otLJjBGElmMGxGOL1bljGZ8zzTE/fLn4=,tag:a65wuRUzsBtO2rO+hJN0RA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:F5FWqPeOBJvuwzuomHd4IkWgGeBBmYmgSVeH/TSJKKG4BHAwgam8dT1oTWiKEd/SfLj6LY33N+s4fKqe3bdCCa5/CPC3P7ZP1RW0/fyAdftlXFpbmXjsFf1rsTcTPMPBxOdwbu6+w6ji16sEkKXsN/GQsk6bhT7gxEgy4ZXY1kiiE08TtIG8p6aIcES/sTXhNqvZvWkJeESh1pkVhfP0OSWp0x05RPvxg2FoU1IMcqmySZwL/Ltqp4I3pKNHmLzuTFfFmK10fC9OptBbPmPtp75k0pJcQ0xbzJAyMM1riGzhOOMO1Qj/DXcSPDVugrMYDZRpEqNGg+LPBuCF1l2e7UitPLY4/1g669lCAPR3stiSW3S2NZIsDsW/ubN2qYeMBSBM8NWpQPgcsrKELCgUExD3MFMO9G0Rl3sU1BsYw+jf+kZNZWDL/w9W6vYt73q3Nmu2DfPpGzwKLrnbBpqjgMJKev/9JG/gMejbMe5AK1leyr5TDi97ku3zTvQfur4YMWABiBswmIbgbjBZAJOIKzy10AJTFQbqNoGGaOz22i2RBJLVHU7CKoKyJL99Iun0MjSlIIv5hTZMXDnHPZxvLq4w0htA5U2VJNKg04YKaPpc5ceDLVt9kgEF6OKG7/5nwDoQ/cgjrwbzl85sWpWQgXxwpSDPN8A0rkyZkgxNch/tUASgUoOExJRd6ht0iJiFM3tW25z8NU1ZWijs287hCkXNac63ZnXCa/2gB2zj+iHHLzgGvsxfhFiM2smjC0CSNbteTYr3sgZszrJSDdyI3Aerx92UwN336Iu0pXAvAGh8SBLBdz1CcmAlldK08Z8tFxplqOaqCQ8AtuklfQQeh8/Lx0VW09HIGkjQp88xotW4bZ803rPb7pqJNTzQmSF/4JWtIJXEG8eRbyHXpTM9p9RYvlY7/yK75EH/vwq6eAu6Cm2AwUxLjqMv9uoAad0gij66DflQ93D3hOfJaF+yNLa+HTce/p0Ymqeo/YM3/UUQ/ZHVbPonULXkylYs8JFFVkf3DBJTGYB85UCZ/ObNyXFtk/GuzQ==,iv:zV2Wm02uMdGmdKR0qiYnTdTlyRec85wXwUJOE3mAeWo=,tag:8fw+x1O1/53ff0qOuoph/A==,type:str]",
"sops": {
"age": [
{
"recipient": "age1numxr6m52fxrm9a7sdw4vdpkp463mm8qtuf5d0p0jde04wydfgtscwdx78",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvbTg5alA5SzJoUTZRSFM0\ndk1XaTlkOXo3NVNxYUJCc1FWWG16Q3IwMERnCkU1S1hlT3ZxcTRhaWs4c1BaVkNi\ndEJ0dTNTV1MxN0QrV2dJUzUvRTlsV3MKLS0tIExSb20zTDhuVExUMG9JZGd2bG00\nL0NTanIwZE92N1IxNWR5NW5PTktpK0EKsBMTH1ln0H/dK2AgUBWiGtdcY2ujvu1H\nTR+X3MZfFHQdOIvW4dkhUJt7BnZ5cEOCUizE5oI1+DifjDqk1dd+VQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzNHJsaElGRWc3ZTQrQlBs\nb0NLMmJUNzhyOHFielFZa1VnRDcwQ2JaREU0CnhrR3hGVXF3SnpRZWE5dlc5VFVV\nT1A4SGNoT2s0MU1jdHJ5U0ZvK3l0emcKLS0tIDZULzBXZ294NXBSRVJVRHlwUkpr\nWi90bUpNQlAremhpaWZkVXpRcE5yL1UKp5KUA5g/trDODHcXI7SWjbWR/ozXpxRp\ngHbONszf4y4hjkHOCVvtQ2NmR/cF64nhCswZYlfssUb4aJBVrMlVEw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:20:56Z",
"mac": "ENC[AES256_GCM,data:3l51gIfAbCnYvWjp6meJGsWQJPI7x4yswFe3WPLGtDJMPrRdOWW5UtU2Qb/rcoKGoSee5OJblOGvkloWTp++HebS2TxKEQ26qU9ycOq27vFt6mfmu4IVcmTqoVM3BPRh9md590lqdLyIsy/BULFBhe0jGBUQbXfDkfewronDOdU=,iv:JkVkjT9G5Z2+u/ZYDaoM8sRk5cBVWYR8fm48Gwfbr/Y=,tag:L9xZSAP3zQZfy6malNIFDw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
LJOGYGS-RQPWIHV-HD4B3GK-JZPVPK6-VI3IAY5-CWQWIXK-NJSQMFH-KXHOHA4

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine1

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:oYhwmtmsMpBKoUnC2sze5M7Qx3y9+ukpt7apmicJJOaDNliI8+NrrYntl56X70U4i+DN3Iz7qwPGGBxjveV0z7kIUEvlDiu3GgQmC/34omktyogb8dH/POoO+Z/E1CxKJEIn+rEU6Oqhg4aPKt/JbveW5wlevlQsMwW1oAtS701JJeY6KU6AfDerU0cS4K5+xw+dFg28pIvIIe02uDzTnFEXZYzvPk7UxfOE0IFxs6/zzPFoBbeFo2WIZZBMQYzP4AQMEiFxxaK7qthBwGuCEc7yMpW1uwVe9CZBfkVkg+wIX4DSm+0j8TiyikobsBxkvPUgqYHNyQaINJp82aGPOzFLsJ6ixf4RqgOzPikzdsyU8phi7waiDPDLf2rhZmnR,iv:9zhjW5wBlNtCxV1kBqzUlZaVRjAbr9LpxypJaIk4RPw=,tag:u1qxkOdHIqkelp/bchb4bg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1numxr6m52fxrm9a7sdw4vdpkp463mm8qtuf5d0p0jde04wydfgtscwdx78",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBFZGxnQXhHUWhONitsWnRk\nbSsvSHRYbHJLc1BjOGgyb3Fyem5VeXk0MENRCkpkdGVmb01rK2FCV25Oc1JGY3U4\nK0xIb0UvTCsrV3lDRkIwZmhscDN4TUkKLS0tIG1GOTFPZTlvcXVOMElHYWN5ZFBU\nbjB4Y3FmY2ZMMmZ6cUFlZ0NUL2c3Z2MK+Fn2tf9eX6Iy6GMsa5//fzKDAUUCFa4x\neeCStPrxTbmKWb4T2NxXrZYK73kvOAmEQenUasddZ24/SDhnnrlXbw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0N0V1Q1l1NS9JK1R1RG8z\nVE9rRnVqQW9mRGNvWHViZWlKS21qSkF4RENFCjl0OU1rMUFHUHVDR3NjSXc1Tnpr\ndE1HUGxQYXMzWW11Q2tlNUdjdXpqdk0KLS0tIGtDZGJpQ3FrbnFZVDJuYjN6bFFN\nZGZqUFNKMmpFSUd5QXFtSFVSWFlpelUKFzm5m/+ReOVDHpvgqAKs5Vwq4XVCPH0K\nzmxtSIAwey9lUxnWNkuKOUG07o89ACsVe6pVPLLqHpGLotvne68GRw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:20:56Z",
"mac": "ENC[AES256_GCM,data:k92YniWJ8U75G0YJ8yt/Xgi5NtyeoJrSNCCMd+Znj9/JL8mWwevJ9aeR387TK7OjVyKp8TOXTSky3bOqZbr5BTHUQDRr0LlQDO6ozmN3CVOdKpK31hfIotTHNRCUcpq+CoEB4VyjeAAdnzCkvyhckZ/L/CtQpJkSGI02M+mHcwE=,iv:cG5Q75Y0zeGj6EezNY1rRD0ZLtzIxkcKKsSHV8Og1aM=,tag:dsSFoW/XzoeOHmH5sqJzzg==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:ZMxZz2bWSw4lKwXp+kdeblLJxpLxdWyZ9ZPBMkZeZGmf,iv:Dnpmvt8YomnwKeJ9ULWy8iAw9OspZ99glCYDtr2Ymkw=,tag:aBHwYdO8lZmtb8PbjCEeKA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1aqng9vmlgth5aucu5ty2wa0kk9tvk7erj4s07hq03s6emu72fgxsqkrqql",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHSWh4aDdUVnowNDBGRllw\nWitjU1pKQkhSNTdmMjZOUkJ6a1UwY2FkWEhzCjlIeGJvV2JpeTBXcVFCQzY2MXJC\nVlFsY3Uzd3R3cEJuVlZYYXd3dXkxSUEKLS0tIHpnT2Jjb1EzNUpXVlhseTBSdDd6\nYmorNlo3bXY4OGl3WXJQZ2dHbHlUVFUKdVPZd/2QtR/ELpf+vy5sXdGC0tS1N4uE\n5zsRpMBWQptheOUF0tNZAbw264gbYX/fece/myTJLISAPM9wZQd7ug==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5YzdiaDBxYURVcTVLQkF6\nMEVDd04wSXhYV1hKWXlhYVJVaUJtYUxmRGwwClU1SXNpUzJQNXVTTGQ2d2x0QmRV\ndWgyL2hMR2pTZWE5bGsvWmVYWFY5akkKLS0tICtLRHFuVjBlUldZQ2FCWTAza0Yy\nMWRSQTd4WWNjcGtYT1RqcS9ybWp1ZWMKidHF/OqLKVYWo02zHnSLm8BEN+qyJ2+i\nBqg62OqGLt8pbE/r1bxnxia4ZCgfc+xKtsWuARKq/BUBrKlMrBZGEA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:08Z",
"mac": "ENC[AES256_GCM,data:JInxA9c2UQBDFzetaHU8KYNnp1JS3eBPyAMjsP6sNwOsRJBY8enckQANvhXF4Mo6eksx9RUhdcqd5zqO/uupnqV4AuARxwI8Sq3ak1r1fnH4uzOC/wV5hWIHdOfKrePuE1+ayo/mR0xqM3ipxuUfJ/cM0ktYrTvJypR1brHkJyg=,iv:Qs1M3UMUOUuPVG564xJAJAhRsj7GGrs+jqNbFjupGDg=,tag:WjTJT3N7FkVmdv2giNVECQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:AiUO8delOdyU9pl1q9DuSegdy/MMh8q30M1hXuinpKVC9Wdtw/NFUVfKSM6wX5d3hkBnZ+BmG75bMIkZXXJ6Cl3Q7l3M60HcBrDQaUYJ3tfuO5dJGSi9Ab5RtnWKmaA/5wtb71sM08PbmmkPXnVI2KgXwX23VnNPdlMXFRYdWuzbTD7U+S7BPKiINjMCyLlMlNfw7YidGufwCkYb6YbpJ+Z6EkfArkH6pGKlB38vaz0rSiVSBa4fxzD4rgx2lh98yJ8VC80O/PdpvYhWub1aHpK1j6gq6InptvLNIyUh0i/9YXHbjpXc/D7hhphF/tA0JUOFMyN0Ses8cvBPHLOMU3uM82aYk9lBfnn74vKsNk5IWeiPskzN6dwmHr6yJHQmDiFp9ll7Ujf9GU5DzJhQsVUFf1xb2wq05YMomEpDdDo0xz35L/w8hFQg5kKLDvzKCiDQuvYz4KdaDZqobZbJ+ICNx0dZHlBg6NGvYK3Q1TpNkR6qHDnOGYC3dbZzeskuLDnD6VScPrLvf0duOrNwE2QP5Eqtk/5r+yP4SJzNK9UMZhYcXIF2koel6dxhlEmJMBLryR/56+GFIWOHIebKUGJhtumj0cl21QAcZgeQ42oy6mza+K17u+SpRwb+Vl+d10zSauxV6jqHE/qec4ZxKyFJH1TDR4R3FcqY3Xb2323r0Lx3Z6h3QSuXaoSz3lZj1FnXNh5x4Xctb87cNd6u/woxL3X105OwMOxaI4SIt81WWSA7do9WFzfNjOUahyullq5RWNOapLPAmMjuSdJBeImFEbKPUst25vgkzs3x4PZkS02s6R+o3h3PoeTZPDIpNIv6Ye6sRy0txhF4nBmHKFNjoClxaDG6esdDlsyu+eyZKRy6/Hvfe/iimlyb+iQMLlmTLoES9B7J0F1JAwJhetdnfoa3pbgZsehb2b0BmqgHbytgA/nOlNWcbae+aX8HCj4Ju0RnYwObbjaVqHQoST/LjnnVJCo1y0UZ4CnOg7MNEKrtxRb3BV8s9Xp3BR3UPFN/gKWBGh/xyHoR3ZEJ0mxt3vjdymqMtBQ=,iv:4slGBU5rnFPaiFXr6Nzuk54ku76XL9+cFOF3IPWbZn4=,tag:+XOCNKwG2PqzY/PTzruouA==,type:str]",
"sops": {
"age": [
{
"recipient": "age1aqng9vmlgth5aucu5ty2wa0kk9tvk7erj4s07hq03s6emu72fgxsqkrqql",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBYbVZ0c2tqSEZJNkxTbGpC\nd1U3YVQwQnhmY21pY0R5SnJHL0lVZC81SFFzCjEyV2VwQng5ejc2L0F0ZUFrWDB6\nY1k0blJQbUhRQ0kweGM5Ty9uQXVGS0EKLS0tIG96S1NOQWlidXZORDlrYWZWcFUy\nb2RLa2pKNEhkSFlwdVFMRXlEOStkNVkKYahEQPOfncD2TGeT1JNPWCOG0ScUCJ0K\n2cYRCBxayd/ssfyEL3jV+TVSxFnKrAhtGJzP2xEAZFYJsHoLG+56sQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQcWdtYmpwYmJQV3BNL01x\nY0FURkczRjBKYWlzOFplcDZ4cktGMTNMN0VVCm5FQmorTzRMUzVSd2Y4VS94R050\nR0V5MXUvODh2Q0IxeU9RelJLTUZoWmsKLS0tIFFjT1pyS3NwWi9OY24xTytLN3NZ\nUDdqUEp4ek5tTGxvdFU4SGduR1VBeUkKX013r6b+KL1i3glEcpwgv/KzBEE9N9sj\nqgyZ3S5dhCY1LOquP/xnS7kqZzN+znv61F7577wV1pBy53KPOhl47Q==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:08Z",
"mac": "ENC[AES256_GCM,data:Oiq0mU3K+zgaHhdlniCGgx22Sa1tEOmO+ddfDdD2IsMm1c9vAI4O+/5PsF/DE4h0WoORJsn88qfzPDva90YUJKQAJsflMHNk8jjExGhtVLVjxkpZk2EtpBzfg311riYzKl5vSJxLqp67Ks2dHyl8qHEH71dSgkXxVMIhPDUmJCg=,iv:B+Hi7qoRbBT53wH3cBSLoQocdXCcfKenkwnErTweMwY=,tag:88c4zp8a6WNfnJkRZvNFqw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
K2FAKIB-HUUM7M7-CUPDN5Y-NORZCJP-F43EFQY-UPV4AUO-GAI3VZW-PH5TRQX

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine2

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:9U+BF9rhGbNCokJoZEl347m1jJm9n+JOJFXSVrqYM+r2A7NPTDGBKIrnVpGdOM+8VmOIzAIROGb1h+wgQoHbr98YtVRx88D0n9bA7ZtSCNiACXzgTl/swaqYCPgJ5FtPvylNagK92SDwjpVSV7jfLA0kdE49HESg9dWIUlPS2CoORhW+FY4wynXdMEsgZmeRgtYFMubqshX/Ep07qnF9HjQmmDy5XKQ4uEm3HydlZOZvW6USwyRYIcqH8x5p1xzif5zYgUsu1iHAmbhtqzULJoe2rzuM3+7Av0LeGcQpXe3gkX/3+aA/SgGN5mJOtLalcwClow5HH0atlOopgy4523Z+jANvc4XwExpJbSYeJuqUTV20q1X16Kyph1O+XYOW,iv:dS7CcP6oPfdByqIW3sz4AUm2BfVInFmDDkxxbGhySmc=,tag:T5ZbP2hDaKNR/c2J/mlS6g==,type:str]",
"sops": {
"age": [
{
"recipient": "age1aqng9vmlgth5aucu5ty2wa0kk9tvk7erj4s07hq03s6emu72fgxsqkrqql",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvcDZLcXpaODVma1RUNHE0\nMElMRkpRSWJwa2Q3dnFEQ0J2a1JpblZuWVFJCk1tUTV1U3VpYUJqajZKd3l3OHNE\nWDk4TzhQSlluOGdnNlcrUTBxaXJrRVEKLS0tIHdiOVhRTHI1emE5TnMxbEZJdm1W\nR2NxMVRhQ0xDT2o0cE1POXRQT2ZwMmMK+3x5+5pVrGPILDpCpqPKuVX5emmfDvFt\n2HjLIwy/2mWcsktXh2gk8KAD4WWL/JjatTuP+EP5zoOFcL5/U3S5Pg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArS3B3SjQ5ejRLU0xUT245\nWGNkaUErdTQ4ODNqSzY2aVFPeUlabHl2MkNnCnlzcUh0ZjlUcFBXbEJtZ2pDUmNr\nYWRvRG1EbkwrZVE3NlU2cWc4VSswQ3MKLS0tIGYvZFZWOXV3U2hMVU1XOHVCSXdp\nSm1vaC83c01TZms3Z0NqMHhqbzRNUGMKKiRAy58NZDz5rjNA3xYv8/dU4H5L0fo/\nQc3WsEXIIwnVHO0X6WN+qUpF8uTPAZW26wecspdsf/5NOJtM7o8+EA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:08Z",
"mac": "ENC[AES256_GCM,data:QGID6lY6g1qvYXhtXrlNA3E+IjJZRKiB8+CDDLYGwx5vtX32M/WNVfzEs8bUdUDEJTUnfFI6tBHjFbKUVNFmbH2utZ3aiUGVJadVUzuWQf3HO/McyPwRMm47gt29U0iagUTSdVbzSROwdwCfXfZeyJUPXXNZgyA5LqL8puPS+jo=,iv:kZzMQQ+1shjScxxmMVSqK5ckXckgCtxbqa8uBZ9RTZI=,tag:do0RhBzfHmFZc8L90WXTqQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine3

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:1yqn7/zQsDblJjKK5nK9wLQjmvBTmsr4WAkWvjRJ2FLX,iv:F9+nsOQSgbyr/q498oI1wU/KiOsEvSUec/2VC9bzv8E=,tag:tklcKqwd9MmuJany0pg79g==,type:str]",
"sops": {
"age": [
{
"recipient": "age1kul02mg50nxccsl38nvma0enrgx454wq0qdefllj4l0adqkllvls5wuhfr",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBMa054akc5ekFzYVBYZk1Z\ndFoxRGxUcGZaNzF6RjlPRTRXZjh6b2xxTzBjCktuV2Q1WnhnM2NMdGRncFhXWHpG\nRHhlU01PVVNUMEF4K2x0NEhreHRKKzAKLS0tIG5YQ0YzaTlxNGtPTURNY3VtNGhO\ndUxPa1BwdXZ0SVJnZmNqYngwaW1CYjQKngYlXRD28yZ0j7WMmACTRCbi7aJ6xNlN\nor7I530+HUv1s067PRwMiBWbNesiAHHhhZjOsnSiyQ/IYr0R77huyg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBdnhOU2MwY2ZXM0p5Rlg2\nL2FmaDZYT3pIL3llenV1MVZDT0xiVklXWFdFCko2S1BxMFpPc2QxRjExVnhXTVNp\nZStpdis4RDlHcmNobmpNK1FtbFhaSTAKLS0tIHRJblljNTJxS0ZKdWY0dFF1cURP\nZGFDRGZ6ZnozU3NJT3EvSGNEaTlralEKywGQQwjS3bJ2yI8839ycWeknES+r4oMH\ncBG/zSbYTznyMQq2uEhAe74a8zf3pe48EnwPksiCcFLIG5IP1SE6Iw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:21Z",
"mac": "ENC[AES256_GCM,data:06hwqU0gL2x8brDOB3y69igVBHwIkgUiuxYin6FPsjAQO84qsT8suQVGHmqPxHXrPM9fMvu9/Xqio0w/ar8NElQLmUlTkOads/pb/ykuC5mU/645q1spvV65i4C/spGrIA4XtSaeY9APuQGDr8wiQKdnvuAXSsuVSQvEhTpbBqs=,iv:UguEbHUFqZoV2g7DLeUpI8dd3tdoWhfIDLnj0h5i1Wg=,tag:CJ5/uOYKhleP2N1oTlvdxQ==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine3

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:Az/IU5qbln8kN2rkY9MK4dMtCgaIQDjXw3aqbFs/Kjh3wLy4jYM4XkvQBeZPqgmonbGtJ50eDLS14g6ABYCmpQtDPHUAVllMPlKJMiGL1/+YPXGfsQyPgOXmgWA6hFI24ZPEsQYCRwbXoDZLJMkTxUMlMQqZTWsXkICbrIYzyJUjkbK0uaSjqbu7vLt7HVgb6WBoFtdXdP3rD2BVo6LEZUTd4mQW9PB0FPms7WKsVnoWQpKmowentae/2cdx1KCvEmoJfvnAGB09woHDGJWzs3VSrlenPVOu1dN9NUaGI61WRLJT4/h47PBEt1Pw0ec5YK2h1Po0pZa8LaYNQ9ewx9cIXsvWpKgvi73zSe70Q4uWT7xHglZHhncR6wECXIpClpQW4Z0a7CgMujfMexK6Dh8fJZ72hA5ZN5RwoxuAlhN0HujFBMk77w5t1QrulyJs8+UQIqfNhrK0LGQKRn4AnW1R0Hc7bOzeE3C50oKmyU1kDyoDChce9A6fGXQ758kUxOYTZy0095LIEL5Q0OdCujdz30a1NiORuuuiXSCCIAfH1rUMeO49Q1T3QF8IRJScvD2Q81ggqeewjzt83d8KbCHWOtPiaAAWzNOHPS7N/x9efTqZmf8YxajnSeQDbqqyRayjFPs5AaTyk1pqFiIRKYH9cKJeEXr/CY6j+ykgDXDksAvg3fVXikdt4E5O/HaWxU55CJZmBfshdC8GWEetRtl6pfA5poTT4L79B6edF6TxtX4R9Js322UTTKxOTJ8fkx/0grXUa+KxUreb+jbN826AyL7kWr5Zt2clXbCFetjASoJn+tMQKeUmE8xe2V4qibTmPlkzuhUIVdVG7G8xg8w31lBSErF9/M4AaaWba93s6GCgqdJ5T7Duf9DnfaiQr011eCmKpfJxsUTGm1Gs55+YIDQZEraOgLr9BWjBGy1gCwQIhZtY/udKzFXvqB/KBr2/mzXNzPNmnrcElKkjxpcHF0RkJ5JGo630k2AOab3+KJzHoRFPI8/VzR2dh3rHQ7SjN2cdiJiGO18sD9Xc+9Me6jgVLrMf8NY=,iv:TIaxmvvQUzzM1hLdb2caQJbTTfgeea1LijeGa/5Jid8=,tag:e77+V1Ok2qNkI3MdUntqIw==,type:str]",
"sops": {
"age": [
{
"recipient": "age1kul02mg50nxccsl38nvma0enrgx454wq0qdefllj4l0adqkllvls5wuhfr",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwaGRjSEo3K2xKZE9HSDVX\nU3lRWWlnbm5QcEx6VG5nS1RyUmUxcnBPL0Y0ClE3VWZjclJybEJCZi9NdVJqeTBG\nM0hhV1FPdzVBT0NLRGZvdFdDMFNYN3MKLS0tIForRURHM1NrU2M3ZnhJb0FFWmwy\nUWo4QmJBS293aWtyRHQ3bTR5RWFpZWMKoGjITft9CoH3cVRXRGx02PTFFQMN8bPU\ngxNLcbWyLVSUvUZE1xYiVMh5aGMLlMoGwgwJBUAb/Cm744Yh6gz5Hg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5RkZxY0E1N0FOd2x4ZFlv\nZW5PbmIwZk9uS200N3N2SS9pU0ZzaU1ibUVVClRYRWlQbDRZNXVjZlp5cXpJRmh6\nUllUT3llVjRHSEpTNC94WjRFeTlIWUEKLS0tIEMvZ2M2VDByeGtpaUp6UFdtSWZC\naGFId2w1OXg4bnd0bFlCNUtSZDJlVTAKFLc3mW7y7M+HOO5TIEPPxznqXkWvUWEl\nVy8DA+hZAAsoRfGseFTD7ny9qJOLwnmpmW2m5dZTuPcRAFk/29DrGw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:21Z",
"mac": "ENC[AES256_GCM,data:Y+XjgRgXHWrLZlmes1j2C7OLXxttm24aNTUUf7voSCUK/z3iIrqhsRs4OZfpLs1JyHNPYMhRu5kT1cjIQfZPwSNWPfw0QVvMGpgoNV8MGQW+G9mUxIvEy+F5yRGXiMd5bybyDgitADHG/6OxBd43IoEfkCpMs06yaEAievk+P9s=,iv:iZ9jP0cNmwHD/EViXzzmM8/6xIY+T2q+aMnQRH/JC1E=,tag:vT9/XUbuq7pltl5d1zLaew==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

View File

@@ -1 +0,0 @@
../../../../../../sops/users/admin

View File

@@ -1 +0,0 @@
UWOQP5L-DE3OCK6-BIQLRGV-JFZ7JTJ-VUYUFDN-F3PJNH4-VS2AJKR-PQJKQQD

View File

@@ -1 +0,0 @@
../../../../../../sops/machines/machine3

View File

@@ -1,19 +0,0 @@
{
"data": "ENC[AES256_GCM,data:pzKV11EWtow3USkwNwSO0qLAijfG8UbeVvDDkQJqd/S/lWF5gBTt+33uPTsBj2A0tyVglwHrGIuPl/WSxZqt4jIKogjeWrODzN6YCQC8I9/3JgCaCYKlDM2b4GTZxzC35YXD0CBmCcpGUgAvP3Awr5ga4SmAQLVHSR4MHODODRa2cj4TsReLLIf2JgFd1m4Jd6RS9YrWYBSTOp3btFzdJSISqKAHcIcEzMbb2uS8tBqmoWpG56snrzmchrBifB5jGe2RgN/HLN6+gp8p1MStEVWSBbmGszDtUH4k2hMKYpY+HEqHL2onbc+LIOo4ntpXagwD034Iw/mEfmGkgrf+AmnX8Gm6j/6CW8ILlypRYAvDucKvKRNOnWWrkR6XmiPq,iv:Zp/B5it4q49i3B1XT1F5II8Ajc9xjGpJ35SJQtLsr60=,tag:tqc3YsaNIPrvTR/6AVH8ww==,type:str]",
"sops": {
"age": [
{
"recipient": "age1kul02mg50nxccsl38nvma0enrgx454wq0qdefllj4l0adqkllvls5wuhfr",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA4SVJ4S1gvQjJYVTJjTVk4\nUkRrTXBhekhDUWlZQmtJWHJyQkk0Q1Nxcmh3CjdnazFxdmZGaEoxRCtkVnRwOGFW\nMnMzSHZOWUF3aWtmTGk3cTNlWjlpNkEKLS0tIE95VVFDQjRTRTdFNUNud2g5c21z\neHBHeHVMUTYvOHdFVW9kVWtSeFpodzQKXndWkTpSrJbzUpHStyWb4Q5p4p9ASjTc\nGQ4wcd3RLjsox/LfFw7NPXuRLQh7kRFgWxx3akjzeaVCbNOg7N2fKg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBuQjBmeFAwQmNiVGhUTmtk\neDhaNTBaRmMyNHdaRVVJanorWE1Mc3kwVDFzCkxEMTFMRjdxYytLRk9HaWlaaTJv\nYTR5ZXRvMllYWHRDTlNXWFZlaXBwRmsKLS0tIDkxRVI3bXdKaXNMUGNWNTBLcVpw\nSXc4WHZDOUg0dFlDZnE3Mi9uMGFMcWMKsdxeoutdG3nwU24n2/qy9oTkhOueXKJ1\nbEuSY7OsQWp2tc6bVwx05G/R9bI9sYtA00FjJLLIR0Xk9M3ngor5/w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-07-31T13:21:21Z",
"mac": "ENC[AES256_GCM,data:GKWyUqU/FeOQ3um7h+82ZYh4aylxII+li8F55iRvFXRLK4A8J6z37AiDI7TOxqQ2XZIh6b8HZA0BpFN6HlkL+q4DpiuV8eTmke+hLq4H74M/m3Kgv4rQtrBupk2OUdXdsUu6ERJiIxW7XKvAhX6Hvm6GEDSSttbba5L5esi/xQg=,iv:qIy/MQXUpTzcDF6VvY7ERUDLs2XJUK/dNxTIFgfu9Wc=,tag:D2ocRXrxvEtTHX/t85EtpA==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.10.2"
}
}

Some files were not shown because too many files have changed in this diff Show More