Compare commits

..

1 Commits

Author SHA1 Message Date
pinpox
61f238210f Remove clanModules 2025-07-17 12:15:22 +02:00
272 changed files with 1960 additions and 10746 deletions

View File

@@ -1,20 +0,0 @@
name: Build Clan App (Darwin)
on:
schedule:
# Run every 4 hours
- cron: "0 */4 * * *"
workflow_dispatch:
push:
branches:
- main
jobs:
build-clan-app-darwin:
runs-on: nix
steps:
- uses: actions/checkout@v4
- name: Build clan-app for x86_64-darwin
run: |
nix build .#packages.x86_64-darwin.clan-app --system x86_64-darwin --log-format bar-with-logs

View File

@@ -1,7 +1,6 @@
#!/bin/sh
#!/usr/bin/env bash
# Shared script for creating pull requests in Gitea workflows
set -eu
set -euo pipefail
# Required environment variables:
# - CI_BOT_TOKEN: Gitea bot token for authentication
@@ -9,22 +8,22 @@ set -eu
# - PR_TITLE: Title of the pull request
# - PR_BODY: Body/description of the pull request
if [ -z "${CI_BOT_TOKEN:-}" ]; then
if [[ -z "${CI_BOT_TOKEN:-}" ]]; then
echo "Error: CI_BOT_TOKEN is not set" >&2
exit 1
fi
if [ -z "${PR_BRANCH:-}" ]; then
if [[ -z "${PR_BRANCH:-}" ]]; then
echo "Error: PR_BRANCH is not set" >&2
exit 1
fi
if [ -z "${PR_TITLE:-}" ]; then
if [[ -z "${PR_TITLE:-}" ]]; then
echo "Error: PR_TITLE is not set" >&2
exit 1
fi
if [ -z "${PR_BODY:-}" ]; then
if [[ -z "${PR_BODY:-}" ]]; then
echo "Error: PR_BODY is not set" >&2
exit 1
fi
@@ -44,12 +43,9 @@ resp=$(nix run --inputs-from . nixpkgs#curl -- -X POST \
}" \
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls")
if ! pr_number=$(echo "$resp" | jq -r '.number'); then
echo "Error parsing response from pull request creation" >&2
exit 1
fi
pr_number=$(echo "$resp" | jq -r '.number')
if [ "$pr_number" = "null" ]; then
if [[ "$pr_number" == "null" ]]; then
echo "Error creating pull request:" >&2
echo "$resp" | jq . >&2
exit 1
@@ -68,11 +64,8 @@ while true; do
"delete_branch_after_merge": true
}' \
"https://git.clan.lol/api/v1/repos/clan/clan-core/pulls/$pr_number/merge")
if ! msg=$(echo "$resp" | jq -r '.message'); then
echo "Error parsing merge response" >&2
exit 1
fi
if [ "$msg" != "Please try again later" ]; then
msg=$(echo "$resp" | jq -r '.message')
if [[ "$msg" != "Please try again later" ]]; then
break
fi
echo "Retrying in 2 seconds..."

View File

@@ -24,7 +24,7 @@ If you're new to Clan and eager to dive in, start with our quickstart guide and
In the Clan ecosystem, security is paramount. Learn how to handle secrets effectively:
- **Secrets Management**: Securely manage secrets by consulting [Vars](https://docs.clan.lol/guides/vars-backend/)<!-- [secrets.md](docs/site/guides/vars-backend.md) -->.
- **Secrets Management**: Securely manage secrets by consulting [secrets](https://docs.clan.lol/guides/getting-started/secrets/)<!-- [secrets.md](docs/site/guides/getting-started/secrets.md) -->.
### Contributing to Clan

View File

@@ -1,210 +0,0 @@
{ self, ... }:
{
clan.machines.test-backup = {
imports = [ self.nixosModules.test-backup ];
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
};
clan.inventory.services = {
borgbackup.test-backup = {
roles.client.machines = [ "test-backup" ];
roles.server.machines = [ "test-backup" ];
};
};
flake.nixosModules = {
test-backup =
{
pkgs,
lib,
...
}:
let
dependencies =
[
pkgs.stdenv.drvPath
]
++ builtins.map (i: i.outPath) (builtins.attrValues (builtins.removeAttrs self.inputs [ "self" ]));
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
imports = [
# Do not import inventory modules. They should be configured via 'clan.inventory'
#
# TODO: Configure localbackup via inventory
self.clanModules.localbackup
];
# Borgbackup overrides
services.borgbackup.repos.test-backups = {
path = "/var/lib/borgbackup/test-backups";
authorizedKeys = [ (builtins.readFile ../assets/ssh/pubkey) ];
};
clan.borgbackup.destinations.test-backup.repo = lib.mkForce "borg@machine:.";
clan.core.networking.targetHost = "machine";
networking.hostName = "machine";
programs.ssh.knownHosts = {
machine.hostNames = [ "machine" ];
machine.publicKey = builtins.readFile ../assets/ssh/pubkey;
};
services.openssh = {
enable = true;
settings.UsePAM = false;
settings.UseDns = false;
hostKeys = [
{
path = "/root/.ssh/id_ed25519";
type = "ed25519";
}
];
};
users.users.root.openssh.authorizedKeys.keyFiles = [ ../assets/ssh/pubkey ];
# This is needed to unlock the user for sshd
# Because we use sshd without setuid binaries
users.users.borg.initialPassword = "hello";
systemd.tmpfiles.settings."vmsecrets" = {
"/root/.ssh/id_ed25519" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/ssh.id_ed25519" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.ssh" = {
C.argument = "${../assets/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
user = "root";
};
};
};
clan.core.facts.secretStore = "vm";
clan.core.vars.settings.secretStore = "vm";
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc.install-closure.source = "${closureInfo}/store-paths";
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
};
system.extraDependencies = dependencies;
clan.core.state.test-backups.folders = [ "/var/test-backups" ];
clan.core.state.test-service = {
preBackupScript = ''
touch /var/test-service/pre-backup-command
'';
preRestoreScript = ''
touch /var/test-service/pre-restore-command
'';
postRestoreScript = ''
touch /var/test-service/post-restore-command
'';
folders = [ "/var/test-service" ];
};
fileSystems."/mnt/external-disk" = {
device = "/dev/vdb"; # created in tests with virtualisation.emptyDisks
autoFormat = true;
fsType = "ext4";
options = [
"defaults"
"noauto"
];
};
clan.localbackup.targets.hdd = {
directory = "/mnt/external-disk";
preMountHook = ''
touch /run/mount-external-disk
'';
postUnmountHook = ''
touch /run/unmount-external-disk
'';
};
};
};
perSystem =
{ pkgs, ... }:
let
clanCore = self.checks.x86_64-linux.clan-core-for-checks;
in
{
checks = pkgs.lib.mkIf pkgs.stdenv.isLinux {
nixos-test-backups = self.clanLib.test.containerTest {
name = "nixos-test-backups";
nodes.machine = {
imports =
[
self.nixosModules.clanCore
# Some custom overrides for the backup tests
self.nixosModules.test-backup
]
++
# import the inventory generated nixosModules
self.clan.clanInternals.inventoryClass.machines.test-backup.machineImports;
clan.core.settings.directory = ./.;
};
testScript = ''
import json
start_all()
# dummy data
machine.succeed("mkdir -p /var/test-backups /var/test-service")
machine.succeed("echo testing > /var/test-backups/somefile")
# create
machine.succeed("clan backups create --debug --flake ${clanCore} test-backup")
machine.wait_until_succeeds("! systemctl is-active borgbackup-job-test-backup >&2")
machine.succeed("test -f /run/mount-external-disk")
machine.succeed("test -f /run/unmount-external-disk")
# list
backup_id = json.loads(machine.succeed("borg-job-test-backup list --json"))["archives"][0]["archive"]
out = machine.succeed("clan backups list --debug --flake ${clanCore} test-backup").strip()
print(out)
assert backup_id in out, f"backup {backup_id} not found in {out}"
localbackup_id = "hdd::/mnt/external-disk/snapshot.0"
assert localbackup_id in out, "localbackup not found in {out}"
## borgbackup restore
machine.succeed("rm -f /var/test-backups/somefile")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup borgbackup 'test-backup::borg@machine:.::{backup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
machine.succeed("test -f /var/test-service/pre-backup-command")
## localbackup restore
machine.succeed("rm -rf /var/test-backups/somefile /var/test-service/ && mkdir -p /var/test-service")
machine.succeed(f"clan backups restore --debug --flake ${clanCore} test-backup localbackup '{localbackup_id}' >&2")
assert machine.succeed("cat /var/test-backups/somefile").strip() == "testing", "restore failed"
machine.succeed("test -f /var/test-service/pre-restore-command")
machine.succeed("test -f /var/test-service/post-restore-command")
machine.succeed("test -f /var/test-service/pre-backup-command")
'';
} { inherit pkgs self; };
};
};
}

View File

@@ -81,14 +81,13 @@ in
# Base Tests
nixos-test-secrets = self.clanLib.test.baseTest ./secrets nixosTestArgs;
nixos-test-borgbackup-legacy = self.clanLib.test.baseTest ./borgbackup-legacy nixosTestArgs;
nixos-test-wayland-proxy-virtwl = self.clanLib.test.baseTest ./wayland-proxy-virtwl nixosTestArgs;
# Container Tests
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs;
# nixos-test-zt-tcp-relay = self.clanLib.test.containerTest ./zt-tcp-relay nixosTestArgs;
# nixos-test-matrix-synapse = self.clanLib.test.containerTest ./matrix-synapse nixosTestArgs;
# nixos-test-postgresql = self.clanLib.test.containerTest ./postgresql nixosTestArgs;
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;

View File

@@ -16,7 +16,6 @@ nixosLib.runTest (
# This tests the compatibility of the inventory
# With the test framework
# - legacy-modules
# - clan.service modules
name = "service-dummy-test-from-flake";
@@ -45,9 +44,6 @@ nixosLib.runTest (
start_all()
admin1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("multi-user.target")
# Provided by the legacy module
print(admin1.succeed("systemctl status dummy-service"))
print(peer1.succeed("systemctl status dummy-service"))
# peer1 should have the 'hello' file
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")

View File

@@ -15,12 +15,6 @@
meta.name = "foo";
machines.peer1 = { };
machines.admin1 = { };
services = {
legacy-module.default = {
roles.peer.machines = [ "peer1" ];
roles.admin.machines = [ "admin1" ];
};
};
instances."test" = {
module.name = "new-service";
@@ -28,9 +22,6 @@
roles.peer.machines.peer1 = { };
};
modules = {
legacy-module = ./legacy-module;
};
};
modules.new-service = {

View File

@@ -1,10 +0,0 @@
---
description = "Set up dummy-module"
categories = ["System"]
features = [ "inventory" ]
[constraints]
roles.admin.min = 1
roles.admin.max = 1
---

View File

@@ -1,5 +0,0 @@
{
imports = [
../shared.nix
];
}

View File

@@ -1,5 +0,0 @@
{
imports = [
../shared.nix
];
}

View File

@@ -1,34 +0,0 @@
{ config, ... }:
{
systemd.services.dummy-service = {
enable = true;
description = "Dummy service";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
generated_password_path="${config.clan.core.vars.generators.dummy-generator.files.generated-password.path}"
if [ ! -f "$generated_password_path" ]; then
echo "Generated password file not found: $generated_password_path"
exit 1
fi
host_id_path="${config.clan.core.vars.generators.dummy-generator.files.host-id.path}"
if [ ! -e "$host_id_path" ]; then
echo "Host ID file not found: $host_id_path"
exit 1
fi
'';
};
# TODO: add and prompt and make it work in the test framework
clan.core.vars.generators.dummy-generator = {
files.host-id.secret = false;
files.generated-password.secret = true;
script = ''
echo $RANDOM > "$out"/host-id
echo $RANDOM > "$out"/generated-password
'';
};
}

View File

@@ -15,7 +15,6 @@ nixosLib.runTest (
# This tests the compatibility of the inventory
# With the test framework
# - legacy-modules
# - clan.service modules
name = "service-dummy-test";
@@ -24,12 +23,6 @@ nixosLib.runTest (
inventory = {
machines.peer1 = { };
machines.admin1 = { };
services = {
legacy-module.default = {
roles.peer.machines = [ "peer1" ];
roles.admin.machines = [ "admin1" ];
};
};
instances."test" = {
module.name = "new-service";
@@ -37,9 +30,6 @@ nixosLib.runTest (
roles.peer.machines.peer1 = { };
};
modules = {
legacy-module = ./legacy-module;
};
};
modules.new-service = {
_class = "clan.service";
@@ -78,9 +68,6 @@ nixosLib.runTest (
start_all()
admin1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("multi-user.target")
# Provided by the legacy module
print(admin1.succeed("systemctl status dummy-service"))
print(peer1.succeed("systemctl status dummy-service"))
# peer1 should have the 'hello' file
peer1.succeed("cat ${nodes.peer1.clan.core.vars.generators.new-service.files.not-a-secret.path}")

View File

@@ -1,5 +0,0 @@
---
description = "Convenient Administration for the Clan App"
categories = ["Utility"]
features = [ "inventory", "deprecated" ]
---

View File

@@ -1,3 +0,0 @@
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,30 +0,0 @@
{ lib, config, ... }:
{
options.clan.admin = {
allowedKeys = lib.mkOption {
default = { };
type = lib.types.attrsOf lib.types.str;
description = "The allowed public keys for ssh access to the admin user";
example = {
"key_1" = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD...";
};
};
};
# Bad practice.
# Should we add 'clanModules' to specialArgs?
imports = [
../../sshd
../../root-password
];
config = {
warnings = [
"The clan.admin module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues config.clan.admin.allowedKeys;
};
}

View File

@@ -1,8 +0,0 @@
---
description = "Set up automatic upgrades"
categories = ["System"]
features = [ "inventory", "deprecated" ]
---
Whether to periodically upgrade NixOS to the latest version. If enabled, a
systemd timer will run `nixos-rebuild switch --upgrade` once a day.

View File

@@ -1,32 +0,0 @@
{
config,
lib,
...
}:
let
cfg = config.clan.auto-upgrade;
in
{
options.clan.auto-upgrade = {
flake = lib.mkOption {
type = lib.types.str;
description = "Flake reference";
};
};
config = {
warnings = [
"The clan.auto-upgrade module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
system.autoUpgrade = {
inherit (cfg) flake;
enable = true;
dates = "02:00";
randomizedDelaySec = "45min";
};
};
}

View File

@@ -1,16 +0,0 @@
---
description = "Statically configure borgbackup with sane defaults."
---
!!! Danger "Deprecated"
Use [borgbackup](borgbackup.md) instead.
Don't use borgbackup-static through [inventory](../../guides/inventory.md).
This module implements the `borgbackup` backend and implements sane defaults
for backup management through `borgbackup` for members of the clan.
Configure target machines where the backups should be sent to through `targets`.
Configure machines that should be backuped either through `includeMachines`
which will exclusively add the included machines to be backuped, or through
`excludeMachines`, which will add every machine except the excluded machine to the backup.

View File

@@ -1,104 +0,0 @@
{ lib, config, ... }:
let
dir = config.clan.core.settings.directory;
machineDir = dir + "/machines/";
in
{
imports = [ ../borgbackup ];
options.clan.borgbackup-static = {
excludeMachines = lib.mkOption {
type = lib.types.listOf lib.types.str;
example = lib.literalExpression "[ config.clan.core.settings.machine.name ]";
default = [ ];
description = ''
Machines that should not be backuped.
Mutually exclusive with includeMachines.
If this is not empty, every other machine except the targets in the clan will be backuped by this module.
If includeMachines is set, only the included machines will be backuped.
'';
};
includeMachines = lib.mkOption {
type = lib.types.listOf lib.types.str;
example = lib.literalExpression "[ config.clan.core.settings.machine.name ]";
default = [ ];
description = ''
Machines that should be backuped.
Mutually exclusive with excludeMachines.
'';
};
targets = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Machines that should act as target machines for backups.
'';
};
};
config.services.borgbackup.repos =
let
machines = builtins.readDir machineDir;
borgbackupIpMachinePath = machines: machineDir + machines + "/facts/borgbackup.ssh.pub";
filteredMachines =
if ((builtins.length config.clan.borgbackup-static.includeMachines) != 0) then
lib.filterAttrs (name: _: (lib.elem name config.clan.borgbackup-static.includeMachines)) machines
else
lib.filterAttrs (name: _: !(lib.elem name config.clan.borgbackup-static.excludeMachines)) machines;
machinesMaybeKey = lib.mapAttrsToList (
machine: _:
let
fullPath = borgbackupIpMachinePath machine;
in
if builtins.pathExists fullPath then machine else null
) filteredMachines;
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
hosts = builtins.map (machine: {
name = machine;
value = {
path = "/var/lib/borgbackup/${machine}";
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
};
}) machinesWithKey;
in
lib.mkIf
(builtins.any (
target: target == config.clan.core.settings.machine.name
) config.clan.borgbackup-static.targets)
(if (builtins.listToAttrs hosts) != null then builtins.listToAttrs hosts else { });
config.clan.borgbackup.destinations =
let
destinations = builtins.map (d: {
name = d;
value = {
repo = "borg@${d}:/var/lib/borgbackup/${config.clan.core.settings.machine.name}";
};
}) config.clan.borgbackup-static.targets;
in
lib.mkIf (builtins.any (
target: target == config.clan.core.settings.machine.name
) config.clan.borgbackup-static.includeMachines) (builtins.listToAttrs destinations);
config.assertions = [
{
assertion =
!(
((builtins.length config.clan.borgbackup-static.excludeMachines) != 0)
&& ((builtins.length config.clan.borgbackup-static.includeMachines) != 0)
);
message = ''
The options:
config.clan.borgbackup-static.excludeMachines = [${builtins.toString config.clan.borgbackup-static.excludeMachines}]
and
config.clan.borgbackup-static.includeMachines = [${builtins.toString config.clan.borgbackup-static.includeMachines}]
are mutually exclusive.
Use excludeMachines to exclude certain machines and backup the other clan machines.
Use include machines to only backup certain machines.
'';
}
];
config.warnings = lib.optional (
builtins.length config.clan.borgbackup-static.targets > 0
) "The borgbackup-static module is deprecated use the service via the inventory interface instead.";
}

View File

@@ -1,14 +0,0 @@
---
description = "Efficient, deduplicating backup program with optional compression and secure encryption."
categories = ["System"]
features = [ "inventory", "deprecated" ]
---
BorgBackup (short: Borg) gives you:
- Space efficient storage of backups.
- Secure, authenticated encryption.
- Compression: lz4, zstd, zlib, lzma or none.
- Mountable backups with FUSE.
- Easy installation on multiple platforms: Linux, macOS, BSD, …
- Free software (BSD license).
- Backed by a large and active open-source community.

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/client.nix ];
}

View File

@@ -1,63 +0,0 @@
{ config, lib, ... }:
let
dir = config.clan.core.settings.directory;
machineDir = dir + "/vars/per-machine/";
machineName = config.clan.core.settings.machine.name;
# Instances might be empty, if the module is not used via the inventory
#
# Type: { ${instanceName} :: { roles :: Roles } }
# Roles :: { ${role_name} :: { machines :: [string] } }
instances = config.clan.inventory.services.borgbackup or { };
allClients = lib.foldlAttrs (
acc: _instanceName: instanceConfig:
acc
++ (
if (builtins.elem machineName instanceConfig.roles.server.machines) then
instanceConfig.roles.client.machines
else
[ ]
)
) [ ] instances;
in
{
options = {
clan.borgbackup.directory = lib.mkOption {
type = lib.types.str;
default = "/var/lib/borgbackup";
description = ''
The directory where the borgbackup repositories are stored.
'';
};
};
config.services.borgbackup.repos =
let
borgbackupIpMachinePath = machine: machineDir + machine + "/borgbackup/borgbackup.ssh.pub/value";
machinesMaybeKey = builtins.map (
machine:
let
fullPath = borgbackupIpMachinePath machine;
in
if builtins.pathExists fullPath then
machine
else
lib.warn ''
Machine ${machine} does not have a borgbackup key at ${fullPath},
run `clan vars generate ${machine}` to generate it.
'' null
) allClients;
machinesWithKey = lib.filter (x: x != null) machinesMaybeKey;
hosts = builtins.map (machine: {
name = machine;
value = {
path = "${config.clan.borgbackup.directory}/${machine}";
authorizedKeys = [ (builtins.readFile (borgbackupIpMachinePath machine)) ];
};
}) machinesWithKey;
in
if (builtins.listToAttrs hosts) != [ ] then builtins.listToAttrs hosts else { };
}

View File

@@ -1,10 +0,0 @@
---
description = "Set up data-mesher"
categories = ["System"]
features = [ "inventory" ]
[constraints]
roles.admin.min = 1
roles.admin.max = 1
---

View File

@@ -1,19 +0,0 @@
lib: {
machines =
config:
let
instanceNames = builtins.attrNames config.clan.inventory.services.data-mesher;
instanceName = builtins.head instanceNames;
dataMesherInstances = config.clan.inventory.services.data-mesher.${instanceName};
uniqueStrings = list: builtins.attrNames (builtins.groupBy lib.id list);
in
rec {
admins = dataMesherInstances.roles.admin.machines or [ ];
signers = dataMesherInstances.roles.signer.machines or [ ];
peers = dataMesherInstances.roles.peer.machines or [ ];
bootstrap = uniqueStrings (admins ++ signers);
};
}

View File

@@ -1,58 +0,0 @@
{ lib, config, ... }:
let
cfg = config.clan.data-mesher;
dmLib = import ../lib.nix lib;
in
{
imports = [
../shared.nix
];
options.clan.data-mesher = {
network = {
tld = lib.mkOption {
type = lib.types.str;
default = (config.networking.domain or "clan");
description = "Top level domain to use for the network";
};
hostTTL = lib.mkOption {
type = lib.types.str;
default = "672h"; # 28 days
example = "24h";
description = "The TTL for hosts in the network, in the form of a Go time.Duration";
};
};
};
config = {
warnings = [
"The clan.admin module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
services.data-mesher.initNetwork =
let
# for a given machine, read it's public key and remove any new lines
readHostKey =
machine:
let
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
in
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
in
{
enable = true;
keyPath = config.clan.core.vars.generators.data-mesher-network-key.files.private_key.path;
tld = cfg.network.tld;
hostTTL = cfg.network.hostTTL;
# admin and signer host public keys
signingKeys = builtins.map readHostKey (dmLib.machines config).bootstrap;
};
};
}

View File

@@ -1,5 +0,0 @@
{
imports = [
../shared.nix
];
}

View File

@@ -1,5 +0,0 @@
{
imports = [
../shared.nix
];
}

View File

@@ -1,152 +0,0 @@
{
config,
lib,
...
}:
let
cfg = config.clan.data-mesher;
dmLib = import ./lib.nix lib;
# the default bootstrap nodes are any machines with the admin or signers role
# we iterate through those machines, determining an IP address for them based on their VPN
# currently only supports zerotier
defaultBootstrapNodes = builtins.foldl' (
urls: name:
let
ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
in
if builtins.pathExists ipPath then
let
ip = builtins.readFile ipPath;
in
urls ++ [ "[${ip}]:${builtins.toString cfg.network.port}" ]
else
urls
) [ ] (dmLib.machines config).bootstrap;
in
{
options.clan.data-mesher = {
bootstrapNodes = lib.mkOption {
type = lib.types.nullOr (lib.types.listOf lib.types.str);
default = null;
description = ''
A list of bootstrap nodes that act as an initial gateway when joining
the cluster.
'';
example = [
"192.168.1.1:7946"
"192.168.1.2:7946"
];
};
network = {
interface = lib.mkOption {
type = lib.types.str;
description = ''
The interface over which cluster communication should be performed.
All the ip addresses associate with this interface will be part of
our host claim, including both ipv4 and ipv6.
This should be set to an internal/VPN interface.
'';
example = "tailscale0";
};
port = lib.mkOption {
type = lib.types.port;
default = 7946;
description = ''
Port to listen on for cluster communication.
'';
};
};
};
config = {
services.data-mesher = {
enable = true;
openFirewall = true;
settings = {
log_level = "warn";
state_dir = "/var/lib/data-mesher";
# read network id from vars
network.id = config.clan.core.vars.generators.data-mesher-network-key.files.public_key.value;
host = {
names = [ config.networking.hostName ];
key_path = config.clan.core.vars.generators.data-mesher-host-key.files.private_key.path;
};
cluster = {
port = cfg.network.port;
join_interval = "30s";
push_pull_interval = "30s";
interface = cfg.network.interface;
bootstrap_nodes = if cfg.bootstrapNodes == null then defaultBootstrapNodes else cfg.bootstrapNodes;
};
http.port = 7331;
http.interface = "lo";
};
};
# Generate host key.
clan.core.vars.generators.data-mesher-host-key = {
files =
let
owner = config.users.users.data-mesher.name;
in
{
private_key = {
inherit owner;
};
public_key.secret = false;
};
runtimeInputs = [
config.services.data-mesher.package
];
script = ''
data-mesher generate keypair \
--public-key-path "$out"/public_key \
--private-key-path "$out"/private_key
'';
};
clan.core.vars.generators.data-mesher-network-key = {
# generated once per clan
share = true;
files =
let
owner = config.users.users.data-mesher.name;
in
{
private_key = {
inherit owner;
};
public_key.secret = false;
};
runtimeInputs = [
config.services.data-mesher.package
];
script = ''
data-mesher generate keypair \
--public-key-path "$out"/public_key \
--private-key-path "$out"/private_key
'';
};
};
}

View File

@@ -1,17 +0,0 @@
---
description = "Email-based instant messaging for Desktop."
categories = ["Social"]
features = [ "inventory", "deprecated" ]
---
!!! info
This module will automatically configure an email server on the machine for handling the e-mail messaging seamlessly.
## Features
- [x] **Email-based**: Uses any email account as its backend.
- [x] **End-to-End Encryption**: Supports Autocrypt to automatically encrypt messages.
- [x] **No Phone Number Required**: Uses your email address instead of a phone number.
- [x] **Cross-Platform**: Available on desktop and mobile platforms.
- [x] **Automatic Server Setup**: Includes your own DeltaChat server for enhanced control and privacy.
- [ ] **Bake a cake**: This module cannot cake a bake.

View File

@@ -1,3 +0,0 @@
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,153 +0,0 @@
{
config,
pkgs,
...
}:
{
warnings = [
"The clan.deltachat module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 25 ]; # smtp with other hosts
environment.systemPackages = [ pkgs.deltachat-desktop ];
services.maddy =
let
domain = "${config.clan.core.settings.machine.name}.local";
in
{
enable = true;
primaryDomain = domain;
config = ''
# Minimal configuration with TLS disabled, adapted from upstream example
# configuration here https://github.com/foxcpp/maddy/blob/master/maddy.conf
# Do not use this in unencrypted networks!
auth.pass_table local_authdb {
table sql_table {
driver sqlite3
dsn credentials.db
table_name passwords
}
}
storage.imapsql local_mailboxes {
driver sqlite3
dsn imapsql.db
}
table.chain local_rewrites {
optional_step regexp "(.+)\+(.+)@(.+)" "$1@$3"
optional_step static {
entry postmaster postmaster@$(primary_domain)
}
optional_step file /etc/maddy/aliases
}
msgpipeline local_routing {
destination postmaster $(local_domains) {
modify {
replace_rcpt &local_rewrites
}
deliver_to &local_mailboxes
}
default_destination {
reject 550 5.1.1 "User doesn't exist"
}
}
smtp tcp://[::]:25 {
limits {
all rate 20 1s
all concurrency 10
}
dmarc yes
check {
require_mx_record
dkim
spf
}
source $(local_domains) {
reject 501 5.1.8 "Use Submission for outgoing SMTP"
}
default_source {
destination postmaster $(local_domains) {
deliver_to &local_routing
}
default_destination {
reject 550 5.1.1 "User doesn't exist"
}
}
}
submission tcp://[::1]:587 {
limits {
all rate 50 1s
}
auth &local_authdb
source $(local_domains) {
check {
authorize_sender {
prepare_email &local_rewrites
user_to_email identity
}
}
destination postmaster $(local_domains) {
deliver_to &local_routing
}
default_destination {
modify {
dkim $(primary_domain) $(local_domains) default
}
deliver_to &remote_queue
}
}
default_source {
reject 501 5.1.8 "Non-local sender domain"
}
}
target.remote outbound_delivery {
limits {
destination rate 20 1s
destination concurrency 10
}
mx_auth {
dane
mtasts {
cache fs
fs_dir mtasts_cache/
}
local_policy {
min_tls_level encrypted
min_mx_level none
}
}
}
target.queue remote_queue {
target &outbound_delivery
autogenerated_msg_domain $(primary_domain)
bounce {
destination postmaster $(local_domains) {
deliver_to &local_routing
}
default_destination {
reject 550 5.0.0 "Refusing to send DSNs to non-local addresses"
}
}
}
imap tcp://[::1]:143 {
auth &local_authdb
storage &local_mailboxes
}
'';
ensureAccounts = [ "user@${domain}" ];
ensureCredentials = {
"user@${domain}".passwordFile = pkgs.writeText "dummy" "foobar";
};
};
}

View File

@@ -1,5 +0,0 @@
---
description = "Generates a uuid for use in disk device naming"
features = [ "inventory" ]
categories = [ "System" ]
---

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env bash
# Read 16 bytes from /dev/urandom
uuid=$(dd if=/dev/urandom bs=1 count=16 2>/dev/null | od -An -tx1 | tr -d ' \n')
# Break the UUID into pieces and apply the required modifications
byte6=${uuid:12:2}
byte8=${uuid:16:2}
# Construct the correct version and variant
hex_byte6=$(printf "%x" $((0x$byte6 & 0x0F | 0x40)))
hex_byte8=$(printf "%x" $((0x$byte8 & 0x3F | 0x80)))
# Rebuild the UUID with the correct fields
uuid_v4="${uuid:0:12}${hex_byte6}${uuid:14:2}${hex_byte8}${uuid:18:14}"
# Format the UUID correctly 8-4-4-4-12
uuid_formatted="${uuid_v4:0:8}-${uuid_v4:8:4}-${uuid_v4:12:4}-${uuid_v4:16:4}-${uuid_v4:20:12}"
echo -n "$uuid_formatted"

View File

@@ -1,6 +0,0 @@
---
description = "A dynamic DNS service to update domain IPs"
---
To understand the possible options that can be set visit the documentation of [ddns-updater](https://github.com/qdm12/ddns-updater?tab=readme-ov-file#versioned-documentation)

View File

@@ -1,257 +0,0 @@
{
config,
pkgs,
lib,
...
}:
let
name = "dyndns";
cfg = config.clan.${name};
# We dedup secrets if they have the same provider + base domain
secret_id = opt: "${name}-${opt.provider}-${opt.domain}";
secret_path =
opt: config.clan.core.vars.generators."${secret_id opt}".files."${secret_id opt}".path;
# We check that a secret has not been set in extraSettings.
extraSettingsSafe =
opt:
if (builtins.hasAttr opt.secret_field_name opt.extraSettings) then
throw "Please do not set ${opt.secret_field_name} in extraSettings, it is automatically set by the dyndns module."
else
opt.extraSettings;
/*
We go from:
{home.example.com:{value:{domain:example.com,host:home, provider:namecheap}}}
To:
{settings: [{domain: example.com, host: home, provider: namecheap, password: dyndns-namecheap-example.com}]}
*/
service_config = {
settings = builtins.catAttrs "value" (
builtins.attrValues (
lib.mapAttrs (_: opt: {
value =
(extraSettingsSafe opt)
// {
domain = opt.domain;
provider = opt.provider;
}
// {
"${opt.secret_field_name}" = secret_id opt;
};
}) cfg.settings
)
);
};
secret_generator = _: opt: {
name = secret_id opt;
value = {
share = true;
migrateFact = "${secret_id opt}";
prompts.${secret_id opt} = {
type = "hidden";
persist = true;
};
};
};
in
{
options.clan.${name} = {
server = {
enable = lib.mkEnableOption "dyndns webserver";
domain = lib.mkOption {
type = lib.types.str;
description = "Domain to serve the webservice on";
};
port = lib.mkOption {
type = lib.types.int;
default = 54805;
description = "Port to listen on";
};
};
period = lib.mkOption {
type = lib.types.int;
default = 5;
description = "Domain update period in minutes";
};
settings = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ ... }:
{
options = {
provider = lib.mkOption {
example = "namecheap";
type = lib.types.str;
description = "The dyndns provider to use";
};
domain = lib.mkOption {
type = lib.types.str;
example = "example.com";
description = "The top level domain to update.";
};
secret_field_name = lib.mkOption {
example = [
"password"
"api_key"
];
type = lib.types.enum [
"password"
"token"
"api_key"
"secret_api_key"
];
default = "password";
description = "The field name for the secret";
};
# TODO: Ideally we would create a gigantic list of all possible settings / types
# optimally we would have a way to generate the options from the source code
extraSettings = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
description = ''
Extra settings for the provider.
Provider specific settings: https://github.com/qdm12/ddns-updater#configuration
'';
};
};
}
)
);
default = [ ];
description = "Configuration for which domains to update";
};
};
imports = [
../nginx
];
config = lib.mkMerge [
(lib.mkIf (cfg.settings != { }) {
clan.core.vars.generators = lib.mapAttrs' secret_generator cfg.settings;
users.groups.${name} = { };
users.users.${name} = {
group = name;
isSystemUser = true;
description = "User for ${name} service";
home = "/var/lib/${name}";
createHome = true;
};
services.nginx = lib.mkIf cfg.server.enable {
enable = true;
virtualHosts = {
"${cfg.server.domain}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://localhost:${toString cfg.server.port}";
};
};
};
};
systemd.services.${name} = {
path = [ ];
description = "Dynamic DNS updater";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
MYCONFIG = "${builtins.toJSON service_config}";
SERVER_ENABLED = if cfg.server.enable then "yes" else "no";
PERIOD = "${toString cfg.period}m";
LISTENING_ADDRESS = ":${toString cfg.server.port}";
};
serviceConfig =
let
pyscript =
pkgs.writers.writePython3Bin "generate_secret_config.py"
{
libraries = [ ];
doCheck = false;
}
''
import json
from pathlib import Path
import os
cred_dir = Path(os.getenv("CREDENTIALS_DIRECTORY"))
config_str = os.getenv("MYCONFIG")
def get_credential(name):
secret_p = cred_dir / name
with open(secret_p, 'r') as f:
return f.read().strip()
config = json.loads(config_str)
print(f"Config: {config}")
for attrset in config["settings"]:
if "password" in attrset:
attrset['password'] = get_credential(attrset['password'])
elif "token" in attrset:
attrset['token'] = get_credential(attrset['token'])
elif "secret_api_key" in attrset:
attrset['secret_api_key'] = get_credential(attrset['secret_api_key'])
elif "api_key" in attrset:
attrset['api_key'] = get_credential(attrset['api_key'])
else:
raise ValueError(f"Missing secret field in {attrset}")
# create directory data if it does not exist
data_dir = Path('data')
data_dir.mkdir(mode=0o770, exist_ok=True)
# Create a temporary config file
# with appropriate permissions
tmp_config_path = data_dir / '.config.json'
tmp_config_path.touch(mode=0o660, exist_ok=False)
# Write the config with secrets back
with open(tmp_config_path, 'w') as f:
f.write(json.dumps(config, indent=4))
# Move config into place
config_path = data_dir / 'config.json'
tmp_config_path.rename(config_path)
# Set file permissions to read
# and write only by the user and group
for file in data_dir.iterdir():
file.chmod(0o660)
'';
in
{
ExecStartPre = lib.getExe pyscript;
ExecStart = lib.getExe pkgs.ddns-updater;
LoadCredential = lib.mapAttrsToList (_: opt: "${secret_id opt}:${secret_path opt}") cfg.settings;
User = name;
Group = name;
NoNewPrivileges = true;
PrivateTmp = true;
ProtectSystem = "strict";
ReadOnlyPaths = "/";
PrivateDevices = "yes";
ProtectKernelModules = "yes";
ProtectKernelTunables = "yes";
WorkingDirectory = "/var/lib/${name}";
ReadWritePaths = [
"/proc/self"
"/var/lib/${name}"
];
Restart = "always";
RestartSec = 60;
};
};
})
];
}

View File

@@ -1,5 +0,0 @@
---
description = "A modern IRC server"
categories = ["Social"]
features = [ "inventory", "deprecated" ]
---

View File

@@ -1,3 +0,0 @@
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,21 +0,0 @@
_: {
warnings = [
"The clan.ergochat module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
services.ergochat = {
enable = true;
settings = {
datastore = {
autoupgrade = true;
path = "/var/lib/ergo/ircd.db";
};
};
};
clan.core.state.ergochat.folders = [ "/var/lib/ergo" ];
}

View File

@@ -1,51 +1,23 @@
{ lib, ... }:
{ ... }:
let
inherit (lib)
filterAttrs
pathExists
;
error = builtins.throw ''
clanModules have been removed!
Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services for migration.
'';
in
{
# only import available files, as this allows to filter the files for tests.
flake.clanModules = filterAttrs (_name: pathExists) {
auto-upgrade = ./auto-upgrade;
admin = ./admin;
borgbackup = ./borgbackup;
borgbackup-static = ./borgbackup-static;
deltachat = ./deltachat;
data-mesher = ./data-mesher;
disk-id = ./disk-id;
dyndns = ./dyndns;
ergochat = ./ergochat;
garage = ./garage;
heisenbridge = ./heisenbridge;
importer = ./importer;
iwd = ./iwd;
localbackup = ./localbackup;
localsend = ./localsend;
matrix-synapse = ./matrix-synapse;
moonlight = ./moonlight;
mumble = ./mumble;
mycelium = ./mycelium;
nginx = ./nginx;
packages = ./packages;
postgresql = ./postgresql;
root-password = ./root-password;
single-disk = ./single-disk;
sshd = ./sshd;
state-version = ./state-version;
static-hosts = ./static-hosts;
sunshine = ./sunshine;
syncthing = ./syncthing;
syncthing-static-peers = ./syncthing-static-peers;
thelounge = ./thelounge;
trusted-nix-caches = ./trusted-nix-caches;
user-password = ./user-password;
vaultwarden = ./vaultwarden;
wifi = ./wifi;
xfce = ./xfce;
zerotier = ./zerotier;
zerotier-static-peers = ./zerotier-static-peers;
zt-tcp-relay = ./zt-tcp-relay;
flake.clanModules = {
outPath = "removed-clan-modules";
value = error;
};
# builtins.listToAttrs (
# map (name: {
# inherit name;
# value = error;
# }) modnames
# );
}

View File

@@ -1,11 +0,0 @@
---
description = "S3-compatible object store for small self-hosted geo-distributed deployments"
categories = ["System"]
features = [ "inventory", "deprecated" ]
---
This module generates garage specific keys automatically.
Also shares the `rpc_secret` between instances.
Options: [NixosModuleOptions](https://search.nixos.org/options?channel=unstable&size=50&sort=relevance&type=packages&query=garage)
Documentation: https://garagehq.deuxfleurs.fr/

View File

@@ -1,3 +0,0 @@
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,50 +0,0 @@
{ config, pkgs, ... }:
{
warnings = [
"The clan.ergochat module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
systemd.services.garage.serviceConfig = {
LoadCredential = [
"rpc_secret_path:${config.clan.core.vars.generators.garage-shared.files.rpc_secret.path}"
"admin_token_path:${config.clan.core.vars.generators.garage.files.admin_token.path}"
"metrics_token_path:${config.clan.core.vars.generators.garage.files.metrics_token.path}"
];
Environment = [
"GARAGE_ALLOW_WORLD_READABLE_SECRETS=true"
"GARAGE_RPC_SECRET_FILE=%d/rpc_secret_path"
"GARAGE_ADMIN_TOKEN_FILE=%d/admin_token_path"
"GARAGE_METRICS_TOKEN_FILE=%d/metrics_token_path"
];
};
clan.core.vars.generators.garage = {
files.admin_token = { };
files.metrics_token = { };
runtimeInputs = [
pkgs.coreutils
pkgs.openssl
];
script = ''
openssl rand -base64 -out "$out"/admin_token 32
openssl rand -base64 -out "$out"/metrics_token 32
'';
};
clan.core.vars.generators.garage-shared = {
share = true;
files.rpc_secret = { };
runtimeInputs = [
pkgs.coreutils
pkgs.openssl
];
script = ''
openssl rand -hex -out "$out"/rpc_secret 32
'';
};
clan.core.state.garage.folders = [ config.services.garage.settings.metadata_dir ];
}

View File

@@ -1,5 +0,0 @@
---
description = "A matrix bridge to communicate with IRC"
categories = ["Social"]
features = [ "inventory", "deprecated" ]
---

View File

@@ -1,3 +0,0 @@
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,27 +0,0 @@
{
lib,
...
}:
{
imports = [
(lib.mkRemovedOptionModule [
"clan"
"heisenbridge"
"enable"
] "Importing the module will already enable the service.")
];
config = {
warnings = [
"The clan.heisenbridge module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
services.heisenbridge = {
enable = true;
homeserver = "http://localhost:8008"; # TODO: Sync with matrix-synapse
};
services.matrix-synapse.settings.app_service_config_files = [
"/var/lib/heisenbridge/registration.yml"
];
};
}

View File

@@ -1 +0,0 @@
{ }

View File

@@ -1,9 +0,0 @@
---
description = "Automatically provisions wifi credentials"
features = [ "inventory", "deprecated" ]
categories = [ "Network" ]
---
!!! Warning
If you've been using network manager + wpa_supplicant and now are switching to IWD read this migration guide:
https://archive.kernel.org/oldwiki/iwd.wiki.kernel.org/networkmanager.html#converting_network_profiles

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,106 +0,0 @@
{
lib,
config,
pkgs,
...
}:
let
cfg = config.clan.iwd;
secret_path = ssid: config.clan.core.vars.generators."iwd.${ssid}".files."iwd.${ssid}".path;
secret_generator = name: value: {
name = "iwd.${value.ssid}";
value =
let
secret_name = "iwd.${value.ssid}";
in
{
prompts.${secret_name} = {
description = "Wifi password for '${value.ssid}'";
persist = true;
};
migrateFact = secret_name;
# ref. man iwd.network
script = ''
config="
[Settings]
AutoConnect=${if value.AutoConnect then "true" else "false"}
[Security]
Passphrase=$(echo -e "$prompt_value/${secret_name}" | ${lib.getExe pkgs.gnused} "s=\\\=\\\\\\\=g;s=\t=\\\t=g;s=\r=\\\r=g;s=^ =\\\s=")
"
echo "$config" > "$out/${secret_name}"
'';
};
};
in
{
options.clan.iwd = {
networks = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
ssid = lib.mkOption {
type = lib.types.str;
default = name;
description = "The name of the wifi network";
};
AutoConnect = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Automatically try to join this wifi network";
};
};
}
)
);
default = { };
description = "Wifi networks to predefine";
};
};
imports = [
(lib.mkRemovedOptionModule [
"clan"
"iwd"
"enable"
] "Just define clan.iwd.networks to enable it")
];
config = lib.mkMerge [
(lib.mkIf (cfg.networks != { }) {
# Systemd tmpfiles rule to create /var/lib/iwd/example.psk file
systemd.tmpfiles.rules = lib.mapAttrsToList (
_: value: "C /var/lib/iwd/${value.ssid}.psk 0600 root root - ${secret_path value.ssid}"
) cfg.networks;
clan.core.vars.generators = lib.mapAttrs' secret_generator cfg.networks;
# TODO: restart the iwd.service if something changes
})
{
warnings = [
"The clan.iwd module is deprecated and will be removed on 2025-07-15. Please migrate to a user-maintained configuration or use the wifi service."
];
# disable wpa supplicant
networking.wireless.enable = false;
# Set the network manager backend to iwd
networking.networkmanager.wifi.backend = "iwd";
# Use iwd instead of wpa_supplicant. It has a user friendly CLI
networking.wireless.iwd = {
enable = true;
settings = {
Network = {
EnableIPv6 = true;
RoutePriorityOffset = 300;
};
Settings.AutoConnect = true;
};
};
}
];
}

View File

@@ -1,3 +0,0 @@
---
description = "Automatically backups current machine to local directory."
---

View File

@@ -1,242 +0,0 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.clan.localbackup;
uniqueFolders = lib.unique (
lib.flatten (lib.mapAttrsToList (_name: state: state.folders) config.clan.core.state)
);
rsnapshotConfig = target: ''
config_version 1.2
snapshot_root ${target.directory}
sync_first 1
cmd_cp ${pkgs.coreutils}/bin/cp
cmd_rm ${pkgs.coreutils}/bin/rm
cmd_rsync ${pkgs.rsync}/bin/rsync
cmd_ssh ${pkgs.openssh}/bin/ssh
cmd_logger ${pkgs.inetutils}/bin/logger
cmd_du ${pkgs.coreutils}/bin/du
cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff
${lib.optionalString (target.postBackupHook != null) ''
cmd_postexec ${pkgs.writeShellScript "postexec.sh" ''
set -efu -o pipefail
${target.postBackupHook}
''}
''}
retain snapshot ${builtins.toString config.clan.localbackup.snapshots}
${lib.concatMapStringsSep "\n" (folder: ''
backup ${folder} ${config.networking.hostName}/
'') uniqueFolders}
'';
in
{
options.clan.localbackup = {
targets = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.strMatching "^[a-zA-Z0-9._-]+$";
default = name;
description = "the name of the backup job";
};
directory = lib.mkOption {
type = lib.types.str;
description = "the directory to backup";
};
mountpoint = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "mountpoint of the directory to backup. If set, the directory will be mounted before the backup and unmounted afterwards";
};
preMountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the directory is mounted";
};
postMountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the directory is mounted";
};
preUnmountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the directory is unmounted";
};
postUnmountHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the directory is unmounted";
};
preBackupHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run before the backup";
};
postBackupHook = lib.mkOption {
type = lib.types.nullOr lib.types.lines;
default = null;
description = "Shell commands to run after the backup";
};
};
}
)
);
default = { };
description = "List of directories where backups are stored";
};
snapshots = lib.mkOption {
type = lib.types.int;
default = 20;
description = "Number of snapshots to keep";
};
};
config =
let
mountHook = target: ''
if [[ -x /run/current-system/sw/bin/localbackup-mount-${target.name} ]]; then
/run/current-system/sw/bin/localbackup-mount-${target.name}
fi
if [[ -x /run/current-system/sw/bin/localbackup-unmount-${target.name} ]]; then
trap "/run/current-system/sw/bin/localbackup-unmount-${target.name}" EXIT
fi
'';
in
lib.mkIf (cfg.targets != { }) {
environment.systemPackages =
[
(pkgs.writeShellScriptBin "localbackup-create" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsnapshot
pkgs.coreutils
pkgs.util-linux
]
}
${lib.concatMapStringsSep "\n" (target: ''
${mountHook target}
echo "Creating backup '${target.name}'"
${lib.optionalString (target.preBackupHook != null) ''
(
${target.preBackupHook}
)
''}
declare -A preCommandErrors
${lib.concatMapStringsSep "\n" (
state:
lib.optionalString (state.preBackupCommand != null) ''
echo "Running pre-backup command for ${state.name}"
if ! /run/current-system/sw/bin/${state.preBackupCommand}; then
preCommandErrors["${state.name}"]=1
fi
''
) (builtins.attrValues config.clan.core.state)}
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" sync
rsnapshot -c "${pkgs.writeText "rsnapshot.conf" (rsnapshotConfig target)}" snapshot
'') (builtins.attrValues cfg.targets)}'')
(pkgs.writeShellScriptBin "localbackup-list" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.jq
pkgs.findutils
pkgs.coreutils
pkgs.util-linux
]
}
(${
lib.concatMapStringsSep "\n" (target: ''
(
${mountHook target}
find ${lib.escapeShellArg target.directory} -mindepth 1 -maxdepth 1 -name "snapshot.*" -print0 -type d \
| jq -Rs 'split("\u0000") | .[] | select(. != "") | { "name": ("${target.name}::" + .)}'
)
'') (builtins.attrValues cfg.targets)
}) | jq -s .
'')
(pkgs.writeShellScriptBin "localbackup-restore" ''
set -efu -o pipefail
export PATH=${
lib.makeBinPath [
pkgs.rsync
pkgs.coreutils
pkgs.util-linux
pkgs.gawk
]
}
if [[ "''${NAME:-}" == "" ]]; then
echo "No backup name given via NAME environment variable"
exit 1
fi
if [[ "''${FOLDERS:-}" == "" ]]; then
echo "No folders given via FOLDERS environment variable"
exit 1
fi
name=$(awk -F'::' '{print $1}' <<< $NAME)
backupname=''${NAME#$name::}
if command -v localbackup-mount-$name; then
localbackup-mount-$name
fi
if command -v localbackup-unmount-$name; then
trap "localbackup-unmount-$name" EXIT
fi
if [[ ! -d $backupname ]]; then
echo "No backup found $backupname"
exit 1
fi
IFS=':' read -ra FOLDER <<< "''$FOLDERS"
for folder in "''${FOLDER[@]}"; do
mkdir -p "$folder"
rsync -a "$backupname/${config.networking.hostName}$folder/" "$folder"
done
'')
]
++ (lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-mount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preMountHook != null) target.preMountHook}
${lib.optionalString (target.mountpoint != null) ''
if ! ${pkgs.util-linux}/bin/mountpoint -q ${lib.escapeShellArg target.mountpoint}; then
${pkgs.util-linux}/bin/mount -o X-mount.mkdir ${lib.escapeShellArg target.mountpoint}
fi
''}
${lib.optionalString (target.postMountHook != null) target.postMountHook}
''
) cfg.targets)
++ lib.mapAttrsToList (
name: target:
pkgs.writeShellScriptBin ("localbackup-unmount-" + name) ''
set -efu -o pipefail
${lib.optionalString (target.preUnmountHook != null) target.preUnmountHook}
${lib.optionalString (
target.mountpoint != null
) "${pkgs.util-linux}/bin/umount ${lib.escapeShellArg target.mountpoint}"}
${lib.optionalString (target.postUnmountHook != null) target.postUnmountHook}
''
) cfg.targets;
clan.core.backups.providers.localbackup = {
# TODO list needs to run locally or on the remote machine
list = "localbackup-list";
create = "localbackup-create";
restore = "localbackup-restore";
};
};
}

View File

@@ -1,5 +0,0 @@
---
description = "Securely sharing files and messages over a local network without internet connectivity."
categories = ["Utility"]
features = [ "inventory", "deprecated" ]
---

View File

@@ -1,3 +0,0 @@
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,22 +0,0 @@
{
lib,
writers,
writeShellScriptBin,
localsend,
alias ? null,
}:
let
localsend-ensure-config = writers.writePython3 "localsend-ensure-config" {
flakeIgnore = [
# We don't live in the dark ages anymore.
# Languages like Python that are whitespace heavy will overrun
# 79 characters..
"E501"
];
} (builtins.readFile ./localsend-ensure-config.py);
in
writeShellScriptBin "localsend" ''
set -xeu
${localsend-ensure-config} ${lib.optionalString (alias != null) alias}
${lib.getExe localsend}
''

View File

@@ -1,64 +0,0 @@
import json
import sys
from pathlib import Path
def load_json(file_path: Path) -> dict[str, any]:
try:
with file_path.open("r") as file:
return json.load(file)
except FileNotFoundError:
return {}
def save_json(file_path: Path, data: dict[str, any]) -> None:
with file_path.open("w") as file:
json.dump(data, file, indent=4)
def update_json(file_path: Path, updates: dict[str, any]) -> None:
data = load_json(file_path)
data.update(updates)
save_json(file_path, data)
def config_location() -> str:
config_file = "shared_preferences.json"
config_directory = ".local/share/org.localsend.localsend_app"
config_path = Path.home() / Path(config_directory) / Path(config_file)
return config_path
def ensure_config_directory() -> None:
config_directory = Path(config_location()).parent
config_directory.mkdir(parents=True, exist_ok=True)
def load_config() -> dict[str, any]:
return load_json(config_location())
def save_config(data: dict[str, any]) -> None:
save_json(config_location(), data)
def update_username(username: str, data: dict[str, any]) -> dict[str, any]:
data["flutter.ls_alias"] = username
return data
def main(argv: list[str]) -> None:
try:
display_name = argv[1]
except IndexError:
# This is not an error, just don't update the name
print("No display name provided.")
sys.exit(0)
ensure_config_directory()
updated_data = update_username(display_name, load_config())
save_config(updated_data)
if __name__ == "__main__":
main(sys.argv[:2])

View File

@@ -1,69 +0,0 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.clan.localsend;
in
{
# Integration can be improved, if the following issues get implemented:
# - cli frontend: https://github.com/localsend/localsend/issues/11
# - ipv6 support: https://github.com/localsend/localsend/issues/549
options.clan.localsend = {
displayName = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "The name that localsend will use to display your instance.";
};
package = lib.mkPackageOption pkgs "localsend" { };
ipv4Addr = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "192.168.56.2/24";
description = "Optional IPv4 address for ZeroTier network.";
};
};
imports = [
(lib.mkRemovedOptionModule [
"clan"
"localsend"
"enable"
] "Importing the module will already enable the service.")
];
config = {
warnings = [
"The clan.localsend module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
clan.core.state.localsend.folders = [
"/var/localsend"
];
environment.systemPackages = [
(pkgs.callPackage ./localsend-ensure-config {
localsend = config.clan.localsend.package;
alias = config.clan.localsend.displayName;
})
];
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 53317 ];
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 53317 ];
#TODO: This is currently needed because there is no ipv6 multicasting support yet
systemd.network.networks = lib.mkIf (cfg.ipv4Addr != null) {
"09-zerotier" = {
networkConfig = {
Address = cfg.ipv4Addr;
};
};
};
};
}

View File

@@ -1,3 +0,0 @@
---
description = "A federated messaging server with end-to-end encryption."
---

View File

@@ -1,209 +0,0 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.clan.matrix-synapse;
element-web =
pkgs.runCommand "element-web-with-config" { nativeBuildInputs = [ pkgs.buildPackages.jq ]; }
''
cp -r ${pkgs.element-web} $out
chmod -R u+w $out
jq '."default_server_config"."m.homeserver" = { "base_url": "https://${cfg.app_domain}:443", "server_name": "${cfg.server_tld}" }' \
> $out/config.json < ${pkgs.element-web}/config.json
ln -s $out/config.json $out/config.${cfg.app_domain}.json
'';
in
# FIXME: This was taken from upstream. Drop this when our patch is upstream
{
options.services.matrix-synapse.package = lib.mkOption { readOnly = false; };
options.clan.matrix-synapse = {
server_tld = lib.mkOption {
type = lib.types.str;
description = "The address that is suffixed after your username i.e @alice:example.com";
example = "example.com";
};
app_domain = lib.mkOption {
type = lib.types.str;
description = "The matrix server hostname also serves the element client";
example = "matrix.example.com";
};
users = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "The name of the user";
};
admin = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Whether the user should be an admin";
};
};
}
)
);
description = "A list of users. Not that only new users will be created and existing ones are not modified.";
example.alice = {
admin = true;
};
};
};
imports = [
../postgresql
(lib.mkRemovedOptionModule [
"clan"
"matrix-synapse"
"enable"
] "Importing the module will already enable the service.")
../nginx
];
config = {
services.matrix-synapse = {
enable = true;
settings = {
server_name = cfg.server_tld;
database = {
args.user = "matrix-synapse";
args.database = "matrix-synapse";
name = "psycopg2";
};
turn_uris = [
"turn:turn.matrix.org?transport=udp"
"turn:turn.matrix.org?transport=tcp"
];
registration_shared_secret_path = "/run/synapse-registration-shared-secret";
listeners = [
{
port = 8008;
bind_addresses = [ "::1" ];
type = "http";
tls = false;
x_forwarded = true;
resources = [
{
names = [ "client" ];
compress = true;
}
{
names = [ "federation" ];
compress = false;
}
];
}
];
};
};
clan.postgresql.users.matrix-synapse = { };
clan.postgresql.databases.matrix-synapse.create.options = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "matrix-synapse";
};
clan.postgresql.databases.matrix-synapse.restore.stopOnRestore = [ "matrix-synapse" ];
clan.core.vars.generators =
{
"matrix-synapse" = {
files."synapse-registration_shared_secret" = { };
runtimeInputs = with pkgs; [
coreutils
pwgen
];
migrateFact = "matrix-synapse";
script = ''
echo -n "$(pwgen -s 32 1)" > "$out"/synapse-registration_shared_secret
'';
};
}
// lib.mapAttrs' (
name: user:
lib.nameValuePair "matrix-password-${user.name}" {
files."matrix-password-${user.name}" = { };
migrateFact = "matrix-password-${user.name}";
runtimeInputs = with pkgs; [ xkcdpass ];
script = ''
xkcdpass -n 4 -d - > "$out"/${lib.escapeShellArg "matrix-password-${user.name}"}
'';
}
) cfg.users;
systemd.services.matrix-synapse =
let
usersScript =
''
while ! ${pkgs.netcat}/bin/nc -z -v ::1 8008; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 1;
done
''
+ lib.concatMapStringsSep "\n" (user: ''
# only create user if it doesn't exist
/run/current-system/sw/bin/matrix-synapse-register_new_matrix_user --exists-ok --password-file ${
config.clan.core.vars.generators."matrix-password-${user.name}".files."matrix-password-${user.name}".path
} --user "${user.name}" ${if user.admin then "--admin" else "--no-admin"}
'') (lib.attrValues cfg.users);
in
{
path = [ pkgs.curl ];
serviceConfig.ExecStartPre = lib.mkBefore [
"+${pkgs.coreutils}/bin/install -o matrix-synapse -g matrix-synapse ${
lib.escapeShellArg
config.clan.core.vars.generators.matrix-synapse.files."synapse-registration_shared_secret".path
} /run/synapse-registration-shared-secret"
];
serviceConfig.ExecStartPost = [
''+${pkgs.writeShellScript "matrix-synapse-create-users" usersScript}''
];
};
services.nginx = {
enable = true;
virtualHosts = {
"${cfg.server_tld}" = {
locations."= /.well-known/matrix/server".extraConfig = ''
add_header Content-Type application/json;
return 200 '${builtins.toJSON { "m.server" = "${cfg.app_domain}:443"; }}';
'';
locations."= /.well-known/matrix/client".extraConfig = ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${
builtins.toJSON {
"m.homeserver" = {
"base_url" = "https://${cfg.app_domain}";
};
"m.identity_server" = {
"base_url" = "https://vector.im";
};
}
}';
'';
forceSSL = true;
enableACME = true;
};
"${cfg.app_domain}" = {
forceSSL = true;
enableACME = true;
locations."/".root = element-web;
locations."/_matrix".proxyPass = "http://localhost:8008"; # TODO: We should make the port configurable
locations."/_synapse".proxyPass = "http://localhost:8008";
};
};
};
};
}

View File

@@ -1,5 +0,0 @@
---
description = "A desktop streaming client optimized for remote gaming and synchronized movie viewing."
---
**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future.

View File

@@ -1,91 +0,0 @@
{ pkgs, config, ... }:
let
ms-accept = pkgs.callPackage ../../pkgs/moonlight-sunshine-accept { };
defaultPort = 48011;
in
{
warnings = [
"The clan.moonlight module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration."
];
hardware.opengl.enable = true;
environment.systemPackages = [
pkgs.moonlight-qt
ms-accept
];
systemd.tmpfiles.rules = [
"d '/var/lib/moonlight' 0770 'user' 'users' - -"
"C '/var/lib/moonlight/moonlight.cert' 0644 'user' 'users' - ${
config.clan.core.vars.generators.moonlight.files."moonlight.cert".path or ""
}"
"C '/var/lib/moonlight/moonlight.key' 0644 'user' 'users' - ${
config.clan.core.vars.generators.moonlight.files."moonlight.key".path or ""
}"
];
systemd.user.services.init-moonlight = {
enable = false;
description = "Initializes moonlight";
wantedBy = [ "graphical-session.target" ];
script = ''
${ms-accept}/bin/moonlight-sunshine-accept moonlight init-config --key /var/lib/moonlight/moonlight.key --cert /var/lib/moonlight/moonlight.cert
'';
serviceConfig = {
user = "user";
Type = "oneshot";
WorkingDirectory = "/home/user/";
RunTimeDirectory = "moonlight";
TimeoutSec = "infinity";
Restart = "on-failure";
RemainAfterExit = true;
ReadOnlyPaths = [
"/var/lib/moonlight/moonlight.key"
"/var/lib/moonlight/moonlight.cert"
];
};
};
systemd.user.services.moonlight-join = {
description = "Join sunshine hosts";
script = ''${ms-accept}/bin/moonlight-sunshine-accept moonlight join --port ${builtins.toString defaultPort} --cert '${
config.clan.core.vars.generators.moonlight.files."moonlight.cert".value or ""
}' --host fd2e:25da:6035:c98f:cd99:93e0:b9b8:9ca1'';
serviceConfig = {
Type = "oneshot";
TimeoutSec = "infinity";
Restart = "on-failure";
ReadOnlyPaths = [
"/var/lib/moonlight/moonlight.key"
"/var/lib/moonlight/moonlight.cert"
];
};
};
systemd.user.timers.moonlight-join = {
description = "Join sunshine hosts";
wantedBy = [ "timers.target" ];
timerConfig = {
OnUnitActiveSec = "5min";
OnBootSec = "0min";
Persistent = true;
Unit = "moonlight-join.service";
};
};
clan.core.vars.generators.moonlight = {
migrateFact = "moonlight";
files."moonlight.key" = { };
files."moonlight.cert" = { };
files."moonlight.cert".secret = false;
runtimeInputs = [
pkgs.coreutils
ms-accept
];
script = ''
moonlight-sunshine-accept moonlight init
mv credentials/cakey.pem "$out"/moonlight.key
cp credentials/cacert.pem "$out"/moonlight.cert
mv credentials/cacert.pem "$out"/moonlight.cert
'';
};
}

View File

@@ -1,19 +0,0 @@
---
description = "Open Source, Low Latency, High Quality Voice Chat."
categories = ["Audio", "Social"]
features = [ "inventory" ]
[constraints]
roles.server.min = 1
---
The mumble clan module gives you:
- True low latency voice communication.
- Secure, authenticated encryption.
- Free software.
- Backed by a large and active open-source community.
This all set up in a way that allows peer-to-peer hosting.
Every machine inside the clan can be a host for mumble,
and thus it doesn't matter who in the network is online - as long as two people are online they are able to chat with each other.

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/server.nix ];
}

View File

@@ -1,247 +0,0 @@
import argparse
import json
import sqlite3
from pathlib import Path
def ensure_config(path: Path, db_path: Path) -> None:
# Default JSON structure if the file doesn't exist
default_json = {
"misc": {
"audio_wizard_has_been_shown": True,
"database_location": str(db_path),
"viewed_server_ping_consent_message": True,
},
"settings_version": 1,
}
# Check if the file exists
if path.exists():
data = json.loads(path.read_text())
else:
data = default_json
# Create the file with default JSON structure
with path.open("w") as file:
json.dump(data, file, indent=4)
# TODO: make sure to only update the diff
updated_data = {**default_json, **data}
# Write the modified JSON object back to the file
with path.open("w") as file:
json.dump(updated_data, file, indent=4)
def initialize_database(db_location: str) -> None:
"""
Initializes the database. If the database or the servers table does not exist, it creates them.
:param db_location: The path to the SQLite database
"""
conn = sqlite3.connect(db_location)
try:
cursor = conn.cursor()
# Create the servers table if it doesn't exist
cursor.execute("""
CREATE TABLE IF NOT EXISTS servers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
hostname TEXT NOT NULL,
port INTEGER NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL,
url TEXT
)
""")
# Commit the changes
conn.commit()
except sqlite3.Error as e:
print(f"An error occurred while initializing the database: {e}")
finally:
conn.close()
def initialize_certificates(
db_location: str, hostname: str, port: str, digest: str
) -> None:
# Connect to the SQLite database
conn = sqlite3.connect(db_location)
try:
# Create a cursor object
cursor = conn.cursor()
# TODO: check if cert already there
# if server_check(cursor, name, hostname):
# print(
# f"Server with name '{name}' and hostname '{hostname}' already exists."
# )
# return
# SQL command to insert data into the servers table
insert_query = """
INSERT INTO cert (hostname, port, digest)
VALUES (?, ?, ?)
"""
# Data to be inserted
data = (hostname, port, digest)
# Execute the insert command with the provided data
cursor.execute(insert_query, data)
# Commit the changes
conn.commit()
print("Data has been successfully inserted.")
except sqlite3.Error as e:
print(f"An error occurred: {e}")
finally:
# Close the connection
conn.close()
def calculate_digest(cert: str) -> str:
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
cert = cert.strip()
cert = cert.encode("utf-8")
cert = x509.load_pem_x509_certificate(cert, default_backend())
digest = cert.fingerprint(hashes.SHA1()).hex()
return digest
def server_check(cursor: str, name: str, hostname: str) -> bool:
"""
Check if a server with the given name and hostname already exists.
:param cursor: The database cursor
:param name: The name of the server
:param hostname: The hostname of the server
:return: True if the server exists, False otherwise
"""
check_query = """
SELECT 1 FROM servers WHERE name = ? AND hostname = ?
"""
cursor.execute(check_query, (name, hostname))
return cursor.fetchone() is not None
def insert_server(
name: str,
hostname: str,
port: str,
username: str,
password: str,
url: str,
db_location: str,
) -> None:
"""
Inserts a new server record into the servers table.
:param name: The name of the server
:param hostname: The hostname of the server
:param port: The port number
:param username: The username
:param password: The password
:param url: The URL
"""
# Connect to the SQLite database
conn = sqlite3.connect(db_location)
try:
# Create a cursor object
cursor = conn.cursor()
if server_check(cursor, name, hostname):
print(
f"Server with name '{name}' and hostname '{hostname}' already exists."
)
return
# SQL command to insert data into the servers table
insert_query = """
INSERT INTO servers (name, hostname, port, username, password, url)
VALUES (?, ?, ?, ?, ?, ?)
"""
# Data to be inserted
data = (name, hostname, port, username, password, url)
# Execute the insert command with the provided data
cursor.execute(insert_query, data)
# Commit the changes
conn.commit()
print("Data has been successfully inserted.")
except sqlite3.Error as e:
print(f"An error occurred: {e}")
finally:
# Close the connection
conn.close()
if __name__ == "__main__":
port = 64738
password = ""
url = None
parser = argparse.ArgumentParser(
prog="initialize_mumble",
)
subparser = parser.add_subparsers(dest="certificates")
# cert_parser = subparser.add_parser("certificates")
parser.add_argument("--cert")
parser.add_argument("--digest")
parser.add_argument("--machines")
parser.add_argument("--servers")
parser.add_argument("--username")
parser.add_argument("--db-location")
parser.add_argument("--ensure-config", type=Path)
args = parser.parse_args()
print(args)
if args.ensure_config:
ensure_config(args.ensure_config, args.db_location)
print("Initialized config")
exit(0)
if args.servers:
print(args.servers)
servers = json.loads(f"{args.servers}")
db_location = args.db_location
for server in servers:
digest = calculate_digest(server.get("value"))
name = server.get("name")
initialize_certificates(db_location, name, port, digest)
print("Initialized certificates")
exit(0)
initialize_database(args.db_location)
# Insert the server into the database
print(args.machines)
machines = json.loads(f"{args.machines}")
print(machines)
print(list(machines))
for machine in list(machines):
print(f"Inserting {machine}.")
insert_server(
machine,
machine,
port,
args.username,
password,
url,
args.db_location,
)

View File

@@ -1,150 +0,0 @@
{
lib,
config,
pkgs,
...
}:
let
dir = config.clan.core.settings.directory;
# TODO: this should actually use the inventory to figure out which machines to use.
machineDir = dir + "/vars/per-machine";
machinesFileSet = builtins.readDir machineDir;
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
machineJson = builtins.toJSON machines;
certificateMachinePath = machines: machineDir + "/${machines}" + "/mumble/mumble-cert/value";
certificatesUnchecked = builtins.map (
machine:
let
fullPath = certificateMachinePath machine;
in
if builtins.pathExists fullPath then machine else null
) machines;
certificate = lib.filter (machine: machine != null) certificatesUnchecked;
machineCert = builtins.map (
machine: (lib.nameValuePair machine (builtins.readFile (certificateMachinePath machine)))
) certificate;
machineCertJson = builtins.toJSON machineCert;
in
{
options.clan.services.mumble = {
user = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "alice";
description = "The user mumble should be set up for.";
};
};
config = {
warnings = [
"The clan.mumble module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
services.murmur = {
enable = true;
logDays = -1;
registerName = config.clan.core.settings.machine.name;
openFirewall = true;
bonjour = true;
sslKey = "/var/lib/murmur/sslKey";
sslCert = "/var/lib/murmur/sslCert";
};
clan.core.state.mumble.folders = [
"/var/lib/mumble"
"/var/lib/murmur"
];
systemd.tmpfiles.rules = [
"d '/var/lib/mumble' 0770 '${config.clan.services.mumble.user}' 'users' - -"
];
systemd.tmpfiles.settings."murmur" = {
"/var/lib/murmur/sslKey" = {
C.argument = config.clan.core.vars.generators.mumble.files.mumble-key.path;
Z = {
mode = "0400";
user = "murmur";
};
};
"/var/lib/murmur/sslCert" = {
C.argument = config.clan.core.vars.generators.mumble.files.mumble-cert.path;
Z = {
mode = "0400";
user = "murmur";
};
};
};
environment.systemPackages =
let
mumbleCfgDir = "/var/lib/mumble";
mumbleDatabasePath = "${mumbleCfgDir}/mumble.sqlite";
mumbleCfgPath = "/var/lib/mumble/mumble_settings.json";
populate-channels = pkgs.writers.writePython3 "mumble-populate-channels" {
libraries = [
pkgs.python3Packages.cryptography
pkgs.python3Packages.pyopenssl
];
flakeIgnore = [
# We don't live in the dark ages anymore.
# Languages like Python that are whitespace heavy will overrun
# 79 characters..
"E501"
];
} (builtins.readFile ./mumble-populate-channels.py);
mumble = pkgs.writeShellScriptBin "mumble" ''
set -xeu
mkdir -p ${mumbleCfgDir}
pushd "${mumbleCfgDir}"
XDG_DATA_HOME=${mumbleCfgDir}
XDG_DATA_DIR=${mumbleCfgDir}
${populate-channels} --ensure-config '${mumbleCfgPath}' --db-location ${mumbleDatabasePath}
${populate-channels} --machines '${machineJson}' --username ${config.clan.core.settings.machine.name} --db-location ${mumbleDatabasePath}
${populate-channels} --servers '${machineCertJson}' --username ${config.clan.core.settings.machine.name} --db-location ${mumbleDatabasePath} --cert True
${pkgs.mumble}/bin/mumble --config ${mumbleCfgPath} "$@"
popd
'';
in
[ mumble ];
clan.core.vars.generators.mumble = {
migrateFact = "mumble";
files.mumble-key = { };
files.mumble-cert.secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.openssl
];
script = ''
openssl genrsa -out "$out/mumble-key" 2048
cat > mumble-cert.conf <<EOF
[ req ]
default_bits = 2048
distinguished_name = req_distinguished_name
req_extensions = req_ext
prompt = no
[ req_distinguished_name ]
C = "US"
ST = "California"
L = "San Francisco"
O = "Clan"
OU = "Clan"
CN = "${config.clan.core.settings.machine.name}"
[ req_ext ]
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = "${config.clan.core.settings.machine.name}"
EOF
openssl req -new -x509 -config mumble-cert.conf -key "$out/mumble-key" -out "$out/mumble-cert" < /dev/null
'';
};
};
}

View File

@@ -1,30 +0,0 @@
---
description = "End-2-end encrypted IPv6 overlay network"
categories = ["System", "Network"]
features = [ "inventory", "deprecated" ]
---
Mycelium is an IPv6 overlay network written in Rust. Each node that joins the overlay network will receive an overlay network IP in the 400::/7 range.
Features:
- Mycelium, is locality aware, it will look for the shortest path between nodes
- All traffic between the nodes is end-2-end encrypted
- Traffic can be routed over nodes of friends, location aware
- If a physical link goes down Mycelium will automatically reroute your traffic
- The IP address is IPV6 and linked to private key
- A simple reliable messagebus is implemented on top of Mycelium
- Mycelium has multiple ways how to communicate quic, tcp, ... and we are working on holepunching for Quick which means P2P traffic without middlemen for NATted networks e.g. most homes
- Scalability is very important for us, we tried many overlay networks before and got stuck on all of them, we are trying to design a network which scales to a planetary level
- You can run mycelium without TUN and only use it as reliable message bus.
An example configuration might look like this in the inventory:
```nix
mycelium.default = {
roles.peer.machines = [
"berlin"
"munich"
];
};
```
This will add the machines named `berlin` and `munich` to the `mycelium` vpn.

View File

@@ -1,51 +0,0 @@
{
pkgs,
config,
lib,
...
}:
{
options = {
clan.mycelium.openFirewall = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Open the firewall for mycelium";
};
clan.mycelium.addHostedPublicNodes = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Add hosted Public nodes";
};
};
config.warnings = [
"The clan.mycelium module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
config.services.mycelium = {
enable = true;
addHostedPublicNodes = lib.mkDefault config.clan.mycelium.addHostedPublicNodes;
openFirewall = lib.mkDefault config.clan.mycelium.openFirewall;
keyFile = config.clan.core.vars.generators.mycelium.files.key.path;
};
config.clan.core.vars.generators.mycelium = {
files."key" = { };
files."ip".secret = false;
files."pubkey".secret = false;
runtimeInputs = [
pkgs.mycelium
pkgs.coreutils
pkgs.jq
];
script = ''
timeout 5 mycelium --key-file "$out"/key || :
mycelium inspect --key-file "$out"/key --json | jq -r .publicKey > "$out"/pubkey
mycelium inspect --key-file "$out"/key --json | jq -r .address > "$out"/ip
'';
};
}

View File

@@ -1,3 +0,0 @@
---
description = "Good defaults for the nginx webserver"
---

View File

@@ -1,68 +0,0 @@
{ config, lib, ... }:
{
imports = [
(lib.mkRemovedOptionModule [
"clan"
"nginx"
"enable"
] "Importing the module will already enable the service.")
];
options = {
clan.nginx.acme.email = lib.mkOption {
type = lib.types.str;
description = ''
Email address for account creation and correspondence from the CA.
It is recommended to use the same email for all certs to avoid account
creation limits.
'';
};
};
config = {
security.acme.acceptTerms = true;
security.acme.defaults.email = config.clan.nginx.acme.email;
networking.firewall.allowedTCPPorts = [
443
80
];
services.nginx = {
enable = true;
statusPage = lib.mkDefault true;
recommendedBrotliSettings = lib.mkDefault true;
recommendedGzipSettings = lib.mkDefault true;
recommendedOptimisation = lib.mkDefault true;
recommendedProxySettings = lib.mkDefault true;
recommendedTlsSettings = lib.mkDefault true;
# Nginx sends all the access logs to /var/log/nginx/access.log by default.
# instead of going to the journal!
commonHttpConfig = "access_log syslog:server=unix:/dev/log;";
resolver.addresses =
let
isIPv6 = addr: builtins.match ".*:.*:.*" addr != null;
escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr;
cloudflare = [
"1.1.1.1"
"2606:4700:4700::1111"
];
resolvers =
if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
in
map escapeIPv6 resolvers;
sslDhparam = config.security.dhparams.params.nginx.path;
};
security.dhparams = {
enable = true;
params.nginx = { };
};
};
}

View File

@@ -1,5 +0,0 @@
---
description = "Define package sets from nixpkgs and install them on one or more machines"
categories = ["System"]
features = [ "inventory", "deprecated" ]
---

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,25 +0,0 @@
{
config,
lib,
pkgs,
...
}:
{
options.clan.packages = {
packages = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "The packages to install on the machine";
};
};
config = {
warnings = [
"The clan.packages module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
environment.systemPackages = map (
pName: lib.getAttrFromPath (lib.splitString "." pName) pkgs
) config.clan.packages.packages;
};
}

View File

@@ -1,3 +0,0 @@
---
description = "A free and open-source relational database management system (RDBMS) emphasizing extensibility and SQL compliance."
---

View File

@@ -1,224 +0,0 @@
{
pkgs,
lib,
config,
...
}:
let
createDatabaseState =
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
compression = lib.optionalString (lib.versionAtLeast config.services.postgresql.package.version "16") "--compress=zstd";
in
{
folders = [ folder ];
preBackupScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
mkdir -p "${folder}"
runuser -u postgres -- pg_dump ${compression} --dbname=${db.name} -Fc -c > "${current}.tmp"
mv "${current}.tmp" ${current}
'';
postRestoreScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
systemctl stop ${lib.concatStringsSep " " db.restore.stopOnRestore}
trap "systemctl start ${lib.concatStringsSep " " db.restore.stopOnRestore}" EXIT
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
'';
};
createDatabase = db: ''
CREATE DATABASE "${db.name}" ${
lib.concatStringsSep " " (
lib.mapAttrsToList (name: value: "${name} = '${value}'") db.create.options
)
}
'';
cfg = config.clan.postgresql;
userClauses = lib.mapAttrsToList (
_: user:
''$PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"' ''
) cfg.users;
databaseClauses = lib.mapAttrsToList (
name: db:
lib.optionalString db.create.enable ''$PSQL -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${name}'" | grep -q 1 || $PSQL -d postgres -c ${lib.escapeShellArg (createDatabase db)} ''
) cfg.databases;
in
{
options.clan.postgresql = {
# we are reimplemeting ensureDatabase and ensureUser options here to allow to create databases with options
databases = lib.mkOption {
description = "Databases to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "Database name.";
};
service = lib.mkOption {
type = lib.types.str;
default = name;
description = "Service name that we associate with the database.";
};
# set to false, in case the upstream module uses ensureDatabase option
create.enable = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Create the database if it does not exist.";
};
create.options = lib.mkOption {
description = "Options to pass to the CREATE DATABASE command.";
type = lib.types.lazyAttrsOf lib.types.str;
default = { };
example = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "foo";
};
};
restore.stopOnRestore = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = "List of systemd services to stop before restoring the database.";
};
};
}
)
);
};
users = lib.mkOption {
description = "Users to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options.name = lib.mkOption {
description = "User name";
type = lib.types.str;
default = name;
};
}
)
);
};
};
config = {
services.postgresql.settings = {
wal_level = "replica";
max_wal_senders = 3;
};
services.postgresql.enable = true;
# We are duplicating a bit the upstream module but allow to create databases with options
systemd.services.postgresql.postStart = ''
PSQL="psql --port=${builtins.toString config.services.postgresql.settings.port}"
while ! $PSQL -d postgres -c "" 2> /dev/null; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 0.1
done
${lib.concatStringsSep "\n" userClauses}
${lib.concatStringsSep "\n" databaseClauses}
'';
clan.core.state = lib.mapAttrs' (
_: db: lib.nameValuePair db.service (createDatabaseState db)
) config.clan.postgresql.databases;
environment.systemPackages = builtins.map (
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
in
pkgs.writeShellScriptBin "postgres-db-restore-command-${db.name}" ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
${lib.optionalString (db.restore.stopOnRestore != [ ]) ''
systemctl stop ${builtins.toString db.restore.stopOnRestore}
trap "systemctl start ${builtins.toString db.restore.stopOnRestore}" EXIT
''}
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
''
) (builtins.attrValues config.clan.postgresql.databases);
};
}

View File

@@ -1,20 +0,0 @@
---
description = "Automatically generates and configures a password for the root user."
categories = ["System"]
features = ["inventory", "deprecated"]
---
This module is deprecated and will be removed in a future release. It's functionality has been replaced by the user-password service.
After the system was installed/deployed the following command can be used to display the root-password:
```bash
clan vars get [machine_name] root-password/root-password
```
See also: [Vars](../../guides/vars-backend.md)
To regenerate the password run:
```
clan vars generate --regenerate [machine_name] --generator root-password
```

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,51 +0,0 @@
{
_class,
pkgs,
config,
lib,
...
}:
{
warnings = [
"The clan.root-password module is deprecated and will be removed on 2025-07-15.
Please migrate to user-maintained configuration or the new equivalent clan services
(https://docs.clan.lol/reference/clanServices)."
];
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash =
{
neededFor = "users";
}
// (lib.optionalAttrs (_class == "nixos") {
restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
});
files.password = {
deploy = false;
};
migrateFact = "root-password";
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "You can autogenerate a password, if you leave this prompt blank.";
script = ''
prompt_value="$(cat "$prompts"/password)"
if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/password
else
xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n" > "$out"/password
fi
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
'';
};
}

View File

@@ -1,43 +0,0 @@
---
description = "Configures partitioning of the main disk"
categories = ["System"]
features = [ "inventory" ]
---
# Primary Disk Layout
A module for the "disk-layout" category MUST be chosen.
There is exactly one slot for this type of module in the UI, if you don't fill the slot, your machine cannot boot
This module is a good choice for most machines. In the future clan will offer a broader choice of disk-layouts
The UI will ask for the options of this module:
`device: "/dev/null"`
# Usage example
`inventory.json`
```json
"services": {
"single-disk": {
"default": {
"meta": {
"name": "single-disk"
},
"roles": {
"default": {
"machines": ["jon"]
}
},
"machines": {
"jon": {
"config": {
"device": "/dev/null"
}
}
}
}
}
}
```

View File

@@ -1,3 +0,0 @@
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,56 +0,0 @@
{ lib, config, ... }:
{
options.clan.single-disk = {
device = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = "The primary disk device to install the system on";
};
};
config = {
warnings = [
"clanModules.single-disk is deprecated. Please copy the disko config from the module into your machine config."
];
boot.loader.grub.efiSupport = lib.mkDefault true;
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
disko.devices = {
disk = {
main = {
type = "disk";
# This is set through the UI
device = config.clan.single-disk.device;
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
priority = 1;
};
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
};
}

View File

@@ -1,11 +0,0 @@
---
description = "Enables secure remote access to the machine over ssh."
categories = ["System", "Network"]
features = [ "inventory", "deprecated" ]
---
This module will setup the opensshd service.
It will generate a host key for each machine
## Roles

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/server.nix ];
}

View File

@@ -1,6 +0,0 @@
{ ... }:
{
imports = [
../shared.nix
];
}

View File

@@ -1,18 +0,0 @@
---
description = "Automatically generate the state version of the nixos installation."
features = [ "inventory", "deprecated" ]
---
This module generates the `system.stateVersion` of the nixos installation automatically.
Options: [system.stateVersion](https://search.nixos.org/options?channel=unstable&show=system.stateVersion&from=0&size=50&sort=relevance&type=packages&query=stateVersion)
Migration:
If you are already setting `system.stateVersion`, then import the module and then either let the automatic generation happen, or trigger the generation manually for the machine. The module will take the specified version, if one is already supplied through the config.
To manually generate the version for a specified machine run:
```
clan vars generate [MACHINE]
```
If the setting was already set you can then remove `system.stateVersion` from your machine configuration. For new machines, just import the module.

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/default.nix ];
}

View File

@@ -1,28 +0,0 @@
{ config, lib, ... }:
let
var = config.clan.core.vars.generators.state-version.files.version or { };
in
{
warnings = [
''
The clan.state-version service is deprecated and will be
removed on 2025-07-15 in favor of a nix option.
Please migrate your configuration to use `clan.core.settings.state-version.enable = true` instead.
''
];
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
clan.core.vars.generators.state-version = {
files.version = {
secret = false;
value = lib.mkDefault config.system.nixos.release;
};
runtimeInputs = [ ];
script = ''
echo -n ${config.system.stateVersion} > "$out"/version
'';
};
}

View File

@@ -1,3 +0,0 @@
---
description = "Statically configure the host names of machines based on their respective zerotier-ip."
---

View File

@@ -1,63 +0,0 @@
{ lib, config, ... }:
{
options.clan.static-hosts = {
excludeHosts = lib.mkOption {
type = lib.types.listOf lib.types.str;
default =
if config.clan.static-hosts.topLevelDomain != "" then
[ ]
else
[ config.clan.core.settings.machine.name ];
defaultText = lib.literalExpression ''
if config.clan.static-hosts.topLevelDomain != "" then
[ ]
else
[ config.clan.core.settings.machine.name ];
'';
description = "Hosts that should be excluded";
};
topLevelDomain = lib.mkOption {
type = lib.types.str;
default = "";
description = "Top level domain to reach hosts";
};
};
config.networking.hosts =
let
dir = config.clan.core.settings.directory;
machineDir = "${dir}/vars/per-machine";
zerotierIpMachinePath = machine: "${machineDir}/${machine}/zerotier/zerotier-ip/value";
machinesFileSet = builtins.readDir machineDir;
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
networkIpsUnchecked = builtins.map (
machine:
let
fullPath = zerotierIpMachinePath machine;
in
if builtins.pathExists fullPath then machine else null
) machines;
networkIps = lib.filter (machine: machine != null) networkIpsUnchecked;
machinesWithIp = lib.filterAttrs (name: _: (lib.elem name networkIps)) machinesFileSet;
filteredMachines = lib.filterAttrs (
name: _: !(lib.elem name config.clan.static-hosts.excludeHosts)
) machinesWithIp;
in
lib.filterAttrs (_: value: value != null) (
lib.mapAttrs' (
machine: _:
let
path = zerotierIpMachinePath machine;
in
if builtins.pathExists path then
lib.nameValuePair (builtins.readFile path) (
if (config.clan.static-hosts.topLevelDomain == "") then
[ machine ]
else
[ "${machine}.${config.clan.static-hosts.topLevelDomain}" ]
)
else
{ }
) filteredMachines
);
}

View File

@@ -1,5 +0,0 @@
---
description = "A desktop streaming server optimized for remote gaming and synchronized movie viewing."
---
**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future.

View File

@@ -1,203 +0,0 @@
{
pkgs,
config,
lib,
...
}:
let
ms-accept = pkgs.callPackage ../../pkgs/moonlight-sunshine-accept { };
sunshineConfiguration = pkgs.writeText "sunshine.conf" ''
address_family = both
channels = 5
pkey = /var/lib/sunshine/sunshine.key
cert = /var/lib/sunshine/sunshine.cert
file_state = /var/lib/sunshine/state.json
credentials_file = /var/lib/sunshine/credentials.json
'';
listenPort = 48011;
in
{
warnings = [
"The clan.sunshine module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration."
];
networking.firewall = {
allowedTCPPorts = [
47984
47989
47990
48010
48011
];
allowedUDPPorts = [
47998
47999
48000
48002
48010
];
};
networking.firewall.allowedTCPPortRanges = [
{
from = 47984;
to = 48010;
}
];
networking.firewall.allowedUDPPortRanges = [
{
from = 47998;
to = 48010;
}
];
environment.systemPackages = [
ms-accept
pkgs.sunshine
pkgs.avahi
# Convenience script, until we find a better UX
(pkgs.writers.writeDashBin "sun" ''
${pkgs.sunshine}/bin/sunshine -0 ${sunshineConfiguration} "$@"
'')
# Create a dummy account, for easier setup,
# don't use this account in actual production yet.
(pkgs.writers.writeDashBin "init-sun" ''
${pkgs.sunshine}/bin/sunshine \
--creds "sunshine" "sunshine"
'')
];
# Required to simulate input
boot.kernelModules = [ "uinput" ];
services.udev.extraRules = ''
KERNEL=="uinput", SUBSYSTEM=="misc", OPTIONS+="static_node=uinput", TAG+="uaccess"
'';
security = {
rtkit.enable = true;
wrappers.sunshine = {
owner = "root";
group = "root";
capabilities = "cap_sys_admin+p";
source = "${pkgs.sunshine}/bin/sunshine";
};
};
systemd.tmpfiles.rules = [
"d '/var/lib/sunshine' 0770 'user' 'users' - -"
"C '/var/lib/sunshine/sunshine.cert' 0644 'user' 'users' - ${
config.clan.core.vars.generators.sunshine.files."sunshine.cert".path or ""
}"
"C '/var/lib/sunshine/sunshine.key' 0644 'user' 'users' - ${
config.clan.core.vars.generators.sunshine.files."sunshine.key".path or ""
}"
];
hardware.graphics.enable = true;
systemd.user.services.sunshine = {
enable = true;
description = "Sunshine self-hosted game stream host for Moonlight";
startLimitBurst = 5;
startLimitIntervalSec = 500;
script = "/run/current-system/sw/bin/env /run/wrappers/bin/sunshine ${sunshineConfiguration}";
serviceConfig = {
Restart = "on-failure";
RestartSec = "5s";
ReadWritePaths = [ "/var/lib/sunshine" ];
ReadOnlyPaths = [
(config.clan.core.vars.services.sunshine.files."sunshine.key".path or "")
(config.clan.core.vars.services.sunshine.files."sunshine.cert".path or "")
];
};
wantedBy = [ "graphical-session.target" ];
partOf = [ "graphical-session.target" ];
wants = [ "graphical-session.target" ];
after = [
"sunshine-init-state.service"
"sunshine-init-credentials.service"
];
};
systemd.user.services.sunshine-init-state = {
enable = true;
description = "Sunshine self-hosted game stream host for Moonlight";
startLimitBurst = 5;
startLimitIntervalSec = 500;
script = ''
${ms-accept}/bin/moonlight-sunshine-accept sunshine init-state \
--uuid ${config.clan.core.vars.generators.sunshine.files.sunshine-uuid.value} \
--state-file /var/lib/sunshine/state.json
'';
serviceConfig = {
Restart = "on-failure";
RestartSec = "5s";
Type = "oneshot";
ReadWritePaths = [ "/var/lib/sunshine" ];
};
wantedBy = [ "graphical-session.target" ];
};
systemd.user.services.sunshine-init-credentials = {
enable = true;
description = "Sunshine self-hosted game stream host for Moonlight";
startLimitBurst = 5;
startLimitIntervalSec = 500;
script = ''
${lib.getExe pkgs.sunshine} ${sunshineConfiguration} --creds sunshine sunshine
'';
serviceConfig = {
Restart = "on-failure";
RestartSec = "5s";
Type = "oneshot";
ReadWritePaths = [ "/var/lib/sunshine" ];
};
wantedBy = [ "graphical-session.target" ];
};
systemd.user.services.sunshine-listener = {
enable = true;
description = "Sunshine self-hosted game stream host for Moonlight";
startLimitBurst = 5;
startLimitIntervalSec = 500;
script = ''
${ms-accept}/bin/moonlight-sunshine-accept sunshine listen --port ${builtins.toString listenPort} \
--uuid ${config.clan.core.vars.generators.sunshine.files.sunshine-uuid.value} \
--state /var/lib/sunshine/state.json --cert '${
config.clan.core.vars.generators.sunshine.files."sunshine.cert".value
}'
'';
serviceConfig = {
# );
Restart = "on-failure";
RestartSec = 5;
ReadWritePaths = [ "/var/lib/sunshine" ];
};
wantedBy = [ "graphical-session.target" ];
};
clan.core.vars.generators.sunshine = {
# generator was named incorrectly in the past
migrateFact = "ergochat";
files."sunshine.key" = { };
files."sunshine.cert" = { };
files."sunshine-uuid".secret = false;
files."sunshine.cert".secret = false;
runtimeInputs = [
pkgs.coreutils
ms-accept
];
script = ''
moonlight-sunshine-accept sunshine init
mv credentials/cakey.pem "$out"/sunshine.key
cp credentials/cacert.pem "$out"/sunshine.cert
mv credentials/cacert.pem "$out"/sunshine.cert
mv uuid "$out"/sunshine-uuid
'';
};
}

View File

@@ -1,3 +0,0 @@
---
description = "Statically configure syncthing peers through clan"
---

View File

@@ -1,110 +0,0 @@
{
lib,
config,
pkgs,
...
}:
let
dir = config.clan.core.settings.directory;
machineVarDir = "${dir}/vars/per-machine/";
syncthingPublicKeyPath = machine: "${machineVarDir}/${machine}/syncthing/id/value";
machinesFileSet = builtins.readDir machineVarDir;
machines = lib.mapAttrsToList (name: _: name) machinesFileSet;
syncthingPublicKeysUnchecked = builtins.map (
machine:
let
fullPath = syncthingPublicKeyPath machine;
in
if builtins.pathExists fullPath then machine else null
) machines;
syncthingPublicKeyMachines = lib.filter (machine: machine != null) syncthingPublicKeysUnchecked;
zerotierIpMachinePath = machine: "${machineVarDir}/${machine}/zerotier/zerotier-ip/value";
networkIpsUnchecked = builtins.map (
machine:
let
fullPath = zerotierIpMachinePath machine;
in
if builtins.pathExists fullPath then machine else null
) machines;
networkIpMachines = lib.filter (machine: machine != null) networkIpsUnchecked;
devices = builtins.map (machine: {
name = machine;
value = {
name = machine;
id = (lib.removeSuffix "\n" (builtins.readFile (syncthingPublicKeyPath machine)));
addresses =
[ "dynamic" ]
++ (
if (lib.elem machine networkIpMachines) then
[ "tcp://[${(lib.removeSuffix "\n" (builtins.readFile (zerotierIpMachinePath machine)))}]:22000" ]
else
[ ]
);
};
}) syncthingPublicKeyMachines;
in
{
options.clan.syncthing-static-peers = {
excludeMachines = lib.mkOption {
type = lib.types.listOf lib.types.str;
example = lib.literalExpression "[ config.clan.core.settings.machine.name ]";
default = [ ];
description = ''
Machines that should not be added.
'';
};
};
config.services.syncthing.settings.devices = (builtins.listToAttrs devices);
imports = [
{
# Syncthing ports: 8384 for remote access to GUI
# 22000 TCP and/or UDP for sync traffic
# 21027/UDP for discovery
# source: https://docs.syncthing.net/users/firewall.html
networking.firewall.interfaces."zt+".allowedTCPPorts = [
8384
22000
];
networking.firewall.allowedTCPPorts = [ 8384 ];
networking.firewall.interfaces."zt+".allowedUDPPorts = [
22000
21027
];
# Activates inotify compatibility on syncthing
# use mkOverride 900 here as it otherwise would collide with the default of the
# upstream nixos xserver.nix
boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkOverride 900 524288;
services.syncthing = {
enable = true;
configDir = "/var/lib/syncthing";
group = "syncthing";
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files.key.path or null;
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files.cert.path or null;
};
clan.core.vars.generators.syncthing = {
files.key = { };
files.cert = { };
files.api = { };
files.id.secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.gnugrep
pkgs.syncthing
];
script = ''
syncthing generate --config "$out"
mv "$out"/key.pem "$out"/key
mv "$out"/cert.pem "$out"/cert
cat "$out"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$out"/id
cat "$out"/config.xml | grep -oP '<apikey>\K[^<]+' | uniq > "$out"/api
'';
};
}
];
}

View File

@@ -1,40 +0,0 @@
---
description = "A secure, file synchronization app for devices over networks, offering a private alternative to cloud services."
features = [ "inventory" ]
[constraints]
roles.introducer.min = 1
roles.introducer.max = 1
---
**Warning**: This module was written with our VM integration in mind likely won't work outside of this context. They will be generalized in future.
## Usage
We recommend configuring this module as an sync-service through the provided options. Although it provides a Web GUI through which more usage scenarios are supported.
## Features
- **Private and Secure**: Syncthing uses TLS encryption to secure data transfer between devices, ensuring that only the intended devices can read your data.
- **Decentralized**: No central server is involved in the data transfer. Each device communicates directly with others.
- **Open Source**: The source code is openly available for audit and contribution, fostering trust and continuous improvement.
- **Cross-Platform**: Syncthing supports multiple platforms including Windows, macOS, Linux, BSD, and Android.
- **Real-time Synchronization**: Changes made to files are synchronized in real-time across all connected devices.
- **Web GUI**: It includes a user-friendly web interface for managing devices and configurations. (`127.0.0.1:8384`)
## Configuration
- **Share Folders**: Select folders to share with connected devices and configure permissions and synchronization parameters.
!!! info
Clan automatically discovers other devices. Automatic discovery requires one machine to be an [introducer](#clan.syncthing.introducer)
If that is not the case you can add the other device by its Device ID manually.
You can find and share Device IDs under the "Add Device" button in the Web GUI. (`127.0.0.1:8384`)
## Troubleshooting
- **Sync Conflicts**: Resolve synchronization conflicts manually by reviewing file versions and modification times in the Web GUI (`127.0.0.1:8384`).
## Support
- **Documentation**: Extensive documentation is available on the [Syncthing website](https://docs.syncthing.net/).

View File

@@ -1,6 +0,0 @@
# Dont import this file
# It is only here for backwards compatibility.
# Dont author new modules with this file.
{
imports = [ ./roles/peer.nix ];
}

View File

@@ -1,6 +0,0 @@
{ ... }:
{
imports = [
../shared.nix
];
}

View File

@@ -1,21 +0,0 @@
{ config, lib, ... }:
let
instanceNames = builtins.attrNames config.clan.inventory.services.syncthing;
instanceName = builtins.head instanceNames;
instance = config.clan.inventory.services.syncthing.${instanceName};
introducer = builtins.head instance.roles.introducer.machines;
introducerId = "${config.clan.core.settings.directory}/vars/per-machine/${introducer}/syncthing/id/value";
in
{
imports = [
../shared.nix
];
clan.syncthing.introducer = lib.strings.removeSuffix "\n" (
if builtins.pathExists introducerId then
builtins.readFile introducerId
else
throw "${introducerId} does not exists. Please run `clan vars generate ${introducer}` to generate the introducer device id"
);
}

View File

@@ -1,214 +0,0 @@
{
config,
pkgs,
lib,
...
}:
{
options.clan.syncthing = {
id = lib.mkOption {
description = ''
The ID of the machine.
It is generated automatically by default.
'';
type = lib.types.nullOr lib.types.str;
example = "BABNJY4-G2ICDLF-QQEG7DD-N3OBNGF-BCCOFK6-MV3K7QJ-2WUZHXS-7DTW4AS";
default = config.clan.core.vars.generators.syncthing.files."id".value;
defaultText = "config.clan.core.vars.generators.syncthing.files.\"id\".value";
};
introducer = lib.mkOption {
description = ''
The introducer for the machine.
'';
type = lib.types.nullOr lib.types.str;
default = null;
};
autoAcceptDevices = lib.mkOption {
description = ''
Auto accept incoming device requests.
Should only be used on the introducer.
'';
type = lib.types.bool;
default = false;
};
autoShares = lib.mkOption {
description = ''
Auto share the following Folders by their ID's with introduced devices.
Should only be used on the introducer.
'';
type = lib.types.listOf lib.types.str;
default = [ ];
example = [
"folder1"
"folder2"
];
};
};
imports = [
{
# Syncthing ports: 8384 for remote access to GUI
# 22000 TCP and/or UDP for sync traffic
# 21027/UDP for discovery
# source: https://docs.syncthing.net/users/firewall.html
networking.firewall.interfaces."zt+".allowedTCPPorts = [
8384
22000
];
networking.firewall.allowedTCPPorts = [ 8384 ];
networking.firewall.interfaces."zt+".allowedUDPPorts = [
22000
21027
];
assertions = [
{
assertion = lib.all (
attr: builtins.hasAttr attr config.services.syncthing.settings.folders
) config.clan.syncthing.autoShares;
message = ''
Syncthing: If you want to AutoShare a folder, you need to have it configured on the sharing device.
'';
}
];
# Activates inotify compatibility on syncthing
# use mkOverride 900 here as it otherwise would collide with the default of the
# upstream nixos xserver.nix
boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkOverride 900 524288;
services.syncthing = {
enable = true;
overrideFolders = lib.mkDefault (
if (config.clan.syncthing.introducer == null) then true else false
);
overrideDevices = lib.mkDefault (
if (config.clan.syncthing.introducer == null) then true else false
);
key = lib.mkDefault config.clan.core.vars.generators.syncthing.files."key".path or null;
cert = lib.mkDefault config.clan.core.vars.generators.syncthing.files."cert".path or null;
settings = {
options = {
urAccepted = -1;
allowedNetworks = [ ];
};
devices =
{ }
// (
if (config.clan.syncthing.introducer == null) then
{ }
else
{
"${config.clan.syncthing.introducer}" = {
name = "introducer";
id = config.clan.syncthing.introducer;
introducer = true;
autoAcceptFolders = true;
};
}
);
};
};
systemd.services.syncthing-auto-accept =
let
baseAddress = "127.0.0.1:8384";
getPendingDevices = "/rest/cluster/pending/devices";
postNewDevice = "/rest/config/devices";
SharedFolderById = "/rest/config/folders/";
apiKey = config.clan.core.vars.generators.syncthing.files."apikey".path;
in
lib.mkIf config.clan.syncthing.autoAcceptDevices {
description = "Syncthing auto accept devices";
requisite = [ "syncthing.service" ];
after = [ "syncthing.service" ];
wantedBy = [ "multi-user.target" ];
script = ''
set -x
# query pending deviceID's
APIKEY=$(cat ${apiKey})
PENDING=$(${lib.getExe pkgs.curl} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${getPendingDevices})
PENDING=$(echo $PENDING | ${lib.getExe pkgs.jq} keys[])
# accept pending deviceID's
for ID in $PENDING;do
${lib.getExe pkgs.curl} -X POST -d "{\"deviceId\": $ID}" -H "Content-Type: application/json" -H "X-API-Key: $APIKEY" ${baseAddress}${postNewDevice}
# get all shared folders by their ID
for folder in ${builtins.toString config.clan.syncthing.autoShares}; do
SHARED_IDS=$(${lib.getExe pkgs.curl} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder" | ${lib.getExe pkgs.jq} ."devices")
PATCHED_IDS=$(echo $SHARED_IDS | ${lib.getExe pkgs.jq} ".+= [{\"deviceID\": $ID, \"introducedBy\": \"\", \"encryptionPassword\": \"\"}]")
${lib.getExe pkgs.curl} -X PATCH -d "{\"devices\": $PATCHED_IDS}" -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder"
done
done
'';
};
systemd.timers.syncthing-auto-accept = lib.mkIf config.clan.syncthing.autoAcceptDevices {
description = "Syncthing Auto Accept";
wantedBy = [ "syncthing-auto-accept.service" ];
timerConfig = {
OnActiveSec = lib.mkDefault 60;
OnUnitActiveSec = lib.mkDefault 60;
};
};
systemd.services.syncthing-init-api-key =
let
apiKey = config.clan.core.vars.generators.syncthing.files."apikey".path;
in
lib.mkIf config.clan.syncthing.autoAcceptDevices {
description = "Set the api key";
after = [ "syncthing-init.service" ];
wantedBy = [ "multi-user.target" ];
script = ''
# set -x
set -efu pipefail
APIKEY=$(cat ${apiKey})
${lib.getExe pkgs.gnused} -i "s/<apikey>.*<\/apikey>/<apikey>$APIKEY<\/apikey>/" ${config.services.syncthing.configDir}/config.xml
# sudo systemctl restart syncthing.service
systemctl restart syncthing.service
'';
serviceConfig = {
BindReadOnlyPaths = [ apiKey ];
Type = "oneshot";
};
};
clan.core.vars.generators.syncthing = {
migrateFact = "syncthing";
files."key".group = config.services.syncthing.group;
files."key".owner = config.services.syncthing.user;
files."cert".group = config.services.syncthing.group;
files."cert".owner = config.services.syncthing.user;
files."apikey".group = config.services.syncthing.group;
files."apikey".owner = config.services.syncthing.user;
files."id".secret = false;
runtimeInputs = [
pkgs.coreutils
pkgs.gnugrep
pkgs.syncthing
];
script = ''
syncthing generate --config "$out"
mv "$out"/key.pem "$out"/key
mv "$out"/cert.pem "$out"/cert
cat "$out"/config.xml | grep -oP '(?<=<device id=")[^"]+' | uniq > "$out"/id
cat "$out"/config.xml | grep -oP '<apikey>\K[^<]+' | uniq > "$out"/apikey
'';
};
}
];
}

View File

@@ -1,3 +0,0 @@
---
description = "Modern web IRC client"
---

View File

@@ -1,19 +0,0 @@
_: {
warnings = [
"The clan.thelounge module is deprecated and will be removed on 2025-07-15. Please migrate to user-maintained configuration."
];
services.thelounge = {
enable = true;
public = true;
extraConfig = {
prefetch = true;
defaults = {
port = 6667;
tls = false;
};
};
};
clan.core.state.thelounde.folders = [ "/var/lib/thelounge" ];
}

View File

@@ -1,5 +0,0 @@
---
description = "This module sets the `clan.lol` and `nix-community` cache up as a trusted cache."
categories = ["System", "Network"]
features = [ "deprecated" ]
---

Some files were not shown because too many files have changed in this diff Show More