Compare commits

..

1 Commits

Author SHA1 Message Date
Johannes Kirschbauer
1f13ce2732 WIP: dont merge 2025-10-09 15:53:21 +02:00
420 changed files with 5079 additions and 13301 deletions

View File

@@ -17,4 +17,4 @@ jobs:
- name: Build clan-app for x86_64-darwin
run: |
nix build .#packages.x86_64-darwin.clan-app --log-format bar-with-logs
nix build .#packages.x86_64-darwin.clan-app --system x86_64-darwin --log-format bar-with-logs

View File

@@ -1,10 +1,8 @@
clanServices/.* @pinpox @kenji
lib/test/container-test-driver/.* @DavHau @mic92
lib/inventory/.* @hsjobeki
lib/inventoryClass/.* @hsjobeki
modules/.* @hsjobeki
lib/modules/inventory/.* @hsjobeki
lib/modules/inventoryClass/.* @hsjobeki
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki

View File

@@ -1,4 +1,4 @@
Copyright 2023-2025 Clan contributors
Copyright 2023-2024 Clan contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in

View File

@@ -19,19 +19,28 @@ let
nixosLib = import (self.inputs.nixpkgs + "/nixos/lib") { };
in
{
imports = filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix
# clan core submodule tests
../nixosModules/clanCore/machine-id/tests/flake-module.nix
../nixosModules/clanCore/postgresql/tests/flake-module.nix
../nixosModules/clanCore/state-version/tests/flake-module.nix
];
imports =
let
clanCoreModulesDir = ../nixosModules/clanCore;
getClanCoreTestModules =
let
moduleNames = attrNames (builtins.readDir clanCoreModulesDir);
testPaths = map (
moduleName: clanCoreModulesDir + "/${moduleName}/tests/flake-module.nix"
) moduleNames;
in
filter pathExists testPaths;
in
getClanCoreTestModules
++ filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix
./nixos-documentation/flake-module.nix
./dont-depend-on-repo-root.nix
];
flake.check = genAttrs [ "x86_64-linux" "aarch64-darwin" ] (
system:
let
@@ -86,13 +95,11 @@ in
# Container Tests
nixos-test-container = self.clanLib.test.containerTest ./container nixosTestArgs;
nixos-systemd-abstraction = self.clanLib.test.containerTest ./systemd-abstraction nixosTestArgs;
nixos-llm-test = self.clanLib.test.containerTest ./llm nixosTestArgs;
nixos-test-user-firewall-iptables = self.clanLib.test.containerTest ./user-firewall/iptables.nix nixosTestArgs;
nixos-test-user-firewall-nftables = self.clanLib.test.containerTest ./user-firewall/nftables.nix nixosTestArgs;
nixos-test-extra-python-packages = self.clanLib.test.containerTest ./test-extra-python-packages nixosTestArgs;
service-dummy-test = import ./service-dummy-test nixosTestArgs;
wireguard = import ./wireguard nixosTestArgs;
service-dummy-test-from-flake = import ./service-dummy-test-from-flake nixosTestArgs;
};

View File

@@ -15,6 +15,7 @@ let
networking.useNetworkd = true;
services.openssh.enable = true;
services.openssh.settings.UseDns = false;
services.openssh.settings.PasswordAuthentication = false;
system.nixos.variant_id = "installer";
environment.systemPackages = [
pkgs.nixos-facter

View File

@@ -1,82 +0,0 @@
{ self, pkgs, ... }:
let
cli = self.packages.${pkgs.hostPlatform.system}.clan-cli-full;
ollama-model = pkgs.callPackage ./qwen3-4b-instruct.nix { };
in
{
name = "llm";
nodes = {
peer1 =
{ pkgs, ... }:
{
users.users.text-user = {
isNormalUser = true;
linger = true;
uid = 1000;
extraGroups = [ "systemd-journal" ];
};
# Set environment variables for user systemd
environment.extraInit = ''
if [ "$(id -u)" = "1000" ]; then
export XDG_RUNTIME_DIR="/run/user/1000"
export DBUS_SESSION_BUS_ADDRESS="unix:path=/run/user/1000/bus"
ollama_dir="$HOME/.ollama"
mkdir -p "$ollama_dir"
ln -sf ${ollama-model}/models "$ollama_dir"/models
fi
'';
# Enable PAM for user systemd sessions
security.pam.services.systemd-user = {
startSession = true;
# Workaround for containers - use pam_permit to avoid helper binary issues
text = pkgs.lib.mkForce ''
account required pam_permit.so
session required pam_permit.so
session required pam_env.so conffile=/etc/pam/environment readenv=0
session required ${pkgs.systemd}/lib/security/pam_systemd.so
'';
};
environment.systemPackages = [
cli
pkgs.ollama
(cli.pythonRuntime.withPackages (
ps: with ps; [
pytest
pytest-xdist
(cli.pythonRuntime.pkgs.toPythonModule cli)
self.legacyPackages.${pkgs.hostPlatform.system}.nixosTestLib
]
))
];
};
};
testScript =
{ ... }:
''
start_all()
peer1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("user@1000.service")
# Fix user journal permissions so text-user can read their own logs
peer1.succeed("chown text-user:systemd-journal /var/log/journal/*/user-1000.journal*")
peer1.succeed("chmod 640 /var/log/journal/*/user-1000.journal*")
# the -o adopts="" is needed to overwrite any args coming from pyproject.toml
# -p no:cacheprovider disables pytest's cacheprovider which tries to write to the nix store in this case
cmd = "su - text-user -c 'pytest -s -n0 -m service_runner -p no:cacheprovider -o addopts="" ${cli.passthru.sourceWithTests}/clan_lib/llm'"
print("Running tests with command: " + cmd)
# Run tests as text-user (environment variables are set automatically)
peer1.succeed(cmd)
'';
}

View File

@@ -1,70 +0,0 @@
{ pkgs }:
let
# Got them from https://github.com/Gholamrezadar/ollama-direct-downloader
# Download manifest
manifest = pkgs.fetchurl {
url = "https://registry.ollama.ai/v2/library/qwen3/manifests/4b-instruct";
# You'll need to calculate this hash - run the derivation once and it will tell you the correct hash
hash = "sha256-Dtze80WT6sGqK+nH0GxDLc+BlFrcpeyi8nZiwY8Wi6A=";
};
# Download blobs
blob1 = pkgs.fetchurl {
url = "https://registry.ollama.ai/v2/library/qwen3/blobs/sha256:b72accf9724e93698c57cbd3b1af2d3341b3d05ec2089d86d273d97964853cd2";
hash = "sha256-tyrM+XJOk2mMV8vTsa8tM0Gz0F7CCJ2G0nPZeWSFPNI=";
};
blob2 = pkgs.fetchurl {
url = "https://registry.ollama.ai/v2/library/qwen3/blobs/sha256:85e4a5b7b8ef0e48af0e8658f5aaab9c2324c76c1641493f4d1e25fce54b18b9";
hash = "sha256-heSlt7jvDkivDoZY9aqrnCMkx2wWQUk/TR4l/OVLGLk=";
};
blob3 = pkgs.fetchurl {
url = "https://registry.ollama.ai/v2/library/qwen3/blobs/sha256:eade0a07cac7712787bbce23d12f9306adb4781d873d1df6e16f7840fa37afec";
hash = "sha256-6t4KB8rHcSeHu84j0S+TBq20eB2HPR324W94QPo3r+w=";
};
blob4 = pkgs.fetchurl {
url = "https://registry.ollama.ai/v2/library/qwen3/blobs/sha256:d18a5cc71b84bc4af394a31116bd3932b42241de70c77d2b76d69a314ec8aa12";
hash = "sha256-0YpcxxuEvErzlKMRFr05MrQiQd5wx30rdtaaMU7IqhI=";
};
blob5 = pkgs.fetchurl {
url = "https://registry.ollama.ai/v2/library/qwen3/blobs/sha256:0914c7781e001948488d937994217538375b4fd8c1466c5e7a625221abd3ea7a";
hash = "sha256-CRTHeB4AGUhIjZN5lCF1ODdbT9jBRmxeemJSIavT6no=";
};
in
pkgs.stdenv.mkDerivation {
pname = "ollama-qwen3-4b-instruct";
version = "1.0";
dontUnpack = true;
buildPhase = ''
mkdir -p $out/models/manifests/registry.ollama.ai/library/qwen3
mkdir -p $out/models/blobs
# Copy manifest
cp ${manifest} $out/models/manifests/registry.ollama.ai/library/qwen3/4b-instruct
# Copy blobs with correct names
cp ${blob1} $out/models/blobs/sha256-b72accf9724e93698c57cbd3b1af2d3341b3d05ec2089d86d273d97964853cd2
cp ${blob2} $out/models/blobs/sha256-85e4a5b7b8ef0e48af0e8658f5aaab9c2324c76c1641493f4d1e25fce54b18b9
cp ${blob3} $out/models/blobs/sha256-eade0a07cac7712787bbce23d12f9306adb4781d873d1df6e16f7840fa37afec
cp ${blob4} $out/models/blobs/sha256-d18a5cc71b84bc4af394a31116bd3932b42241de70c77d2b76d69a314ec8aa12
cp ${blob5} $out/models/blobs/sha256-0914c7781e001948488d937994217538375b4fd8c1466c5e7a625221abd3ea7a
'';
installPhase = ''
# buildPhase already created everything in $out
:
'';
meta = with pkgs.lib; {
description = "Qwen3 4B Instruct model for Ollama";
license = "apache-2.0";
platforms = platforms.all;
};
}

View File

@@ -27,7 +27,6 @@
modules.new-service = {
_class = "clan.service";
manifest.name = "new-service";
manifest.readme = "Just a sample readme to not trigger the warning.";
roles.peer = {
description = "A peer that uses the new-service to generate some files.";
};

View File

@@ -34,7 +34,6 @@ nixosLib.runTest (
modules.new-service = {
_class = "clan.service";
manifest.name = "new-service";
manifest.readme = "Just a sample readme to not trigger the warning.";
roles.peer = {
description = "A peer that uses the new-service to generate some files.";
};

View File

@@ -1,67 +0,0 @@
{ self, pkgs, ... }:
let
cli = self.packages.${pkgs.hostPlatform.system}.clan-cli-full;
in
{
name = "systemd-abstraction";
nodes = {
peer1 = {
users.users.text-user = {
isNormalUser = true;
linger = true;
uid = 1000;
extraGroups = [ "systemd-journal" ];
};
# Set environment variables for user systemd
environment.extraInit = ''
if [ "$(id -u)" = "1000" ]; then
export XDG_RUNTIME_DIR="/run/user/1000"
export DBUS_SESSION_BUS_ADDRESS="unix:path=/run/user/1000/bus"
fi
'';
# Enable PAM for user systemd sessions
security.pam.services.systemd-user = {
startSession = true;
# Workaround for containers - use pam_permit to avoid helper binary issues
text = pkgs.lib.mkForce ''
account required pam_permit.so
session required pam_permit.so
session required pam_env.so conffile=/etc/pam/environment readenv=0
session required ${pkgs.systemd}/lib/security/pam_systemd.so
'';
};
environment.systemPackages = [
cli
(cli.pythonRuntime.withPackages (
ps: with ps; [
pytest
pytest-xdist
]
))
];
};
};
testScript =
{ ... }:
''
start_all()
peer1.wait_for_unit("multi-user.target")
peer1.wait_for_unit("user@1000.service")
# Fix user journal permissions so text-user can read their own logs
peer1.succeed("chown text-user:systemd-journal /var/log/journal/*/user-1000.journal*")
peer1.succeed("chmod 640 /var/log/journal/*/user-1000.journal*")
# Run tests as text-user (environment variables are set automatically)
peer1.succeed("su - text-user -c 'pytest -p no:cacheprovider -o addopts="" -s -n0 ${cli.passthru.sourceWithTests}/clan_lib/service_runner'")
'';
}

View File

@@ -1,26 +0,0 @@
(
{ ... }:
{
name = "test-extra-python-packages";
extraPythonPackages = ps: [ ps.numpy ];
nodes.machine =
{ ... }:
{
networking.hostName = "machine";
};
testScript = ''
import numpy as np
start_all()
machine.wait_for_unit("multi-user.target")
# Test availability of numpy
arr = np.array([1, 2, 3])
print(f"Numpy array: {arr}")
assert len(arr) == 3
'';
}
)

View File

@@ -0,0 +1,115 @@
{
pkgs,
nixosLib,
clan-core,
lib,
...
}:
nixosLib.runTest (
{ ... }:
let
machines = [
"controller1"
"controller2"
"peer1"
"peer2"
"peer3"
];
in
{
imports = [
clan-core.modules.nixosTest.clanTest
];
hostPkgs = pkgs;
name = "wireguard";
clan = {
directory = ./.;
modules."@clan/wireguard" = import ../../clanServices/wireguard/default.nix;
inventory = {
machines = lib.genAttrs machines (_: { });
instances = {
/*
wg-test-one
controller2 controller1
peer2 peer1 peer3
*/
wg-test-one = {
module.name = "@clan/wireguard";
module.input = "self";
roles.controller.machines."controller1".settings = {
endpoint = "192.168.1.1";
};
roles.controller.machines."controller2".settings = {
endpoint = "192.168.1.2";
};
roles.peer.machines = {
peer1.settings.controller = "controller1";
peer2.settings.controller = "controller2";
peer3.settings.controller = "controller1";
};
};
# TODO: Will this actually work with conflicting ports? Can we re-use interfaces?
#wg-test-two = {
# module.name = "@clan/wireguard";
# roles.controller.machines."controller1".settings = {
# endpoint = "192.168.1.1";
# port = 51922;
# };
# roles.peer.machines = {
# peer1 = { };
# };
#};
};
};
};
testScript = ''
start_all()
# Show all addresses
machines = [peer1, peer2, peer3, controller1, controller2]
for m in machines:
m.systemctl("start network-online.target")
for m in machines:
m.wait_for_unit("network-online.target")
m.wait_for_unit("systemd-networkd.service")
print("\n\n" + "="*60)
print("STARTING PING TESTS")
print("="*60)
for m1 in machines:
for m2 in machines:
if m1 != m2:
print(f"\n--- Pinging from {m1.name} to {m2.name}.wg-test-one ---")
m1.wait_until_succeeds(f"ping -c1 {m2.name}.wg-test-one >&2")
'';
}
)

View File

@@ -1,25 +0,0 @@
The admin service aggregates components that allow an administrator to log in to and manage the machine.
The following configuration:
1. Enables OpenSSH with root login and adds an SSH public key named`myusersKey` to the machine's authorized_keys via the `allowedKeys` setting.
2. Automatically generates a password for the root user.
```nix
instances = {
admin = {
roles.default.tags = {
all = { };
};
roles.default.settings = {
allowedKeys = {
myusersKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEFDNnynMbFWatSFdANzbJ8iiEKL7+9ZpDaMLrWRQjyH lhebendanz@wintux";
};
};
};
};
```

View File

@@ -3,7 +3,6 @@
manifest.name = "clan-core/admin";
manifest.description = "Adds a root user with ssh access";
manifest.categories = [ "Utility" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the admin service";

View File

@@ -2,7 +2,7 @@ let
public-key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6zj7ubTg6z/aDwRNwvM/WlQdUocMprQ8E92NWxl6t+ test@test";
in
{
name = "admin";
name = "service-admin";
clan = {
directory = ./.;

View File

@@ -3,7 +3,7 @@
...
}:
{
name = "borgbackup";
name = "service-borgbackup";
clan = {
directory = ./.;

View File

@@ -1,7 +1,4 @@
{
clanLib,
...
}:
{ ... }:
let
sharedInterface =
{ lib, ... }:
@@ -54,15 +51,15 @@ let
builtins.foldl' (
urls: name:
let
ip = clanLib.vars.getPublicValue {
flake = config.clan.core.settings.directory;
machine = name;
generator = "zerotier";
file = "zerotier-ip";
default = null;
};
ipPath = "${config.clan.core.settings.directory}/vars/per-machine/${name}/zerotier/zerotier-ip/value";
in
if ip != null then urls ++ [ "[${ip}]:${builtins.toString settings.network.port}" ] else urls
if builtins.pathExists ipPath then
let
ip = builtins.readFile ipPath;
in
urls ++ [ "[${ip}]:${builtins.toString settings.network.port}" ]
else
urls
) [ ] (builtins.attrNames ((roles.admin.machines or { }) // (roles.signer.machines or { })))
);
@@ -159,14 +156,9 @@ in
readHostKey =
machine:
let
publicKey = clanLib.vars.getPublicValue {
flake = config.clan.core.settings.directory;
inherit machine;
generator = "data-mesher-host-key";
file = "public_key";
};
path = "${config.clan.core.settings.directory}/vars/per-machine/${machine}/data-mesher-host-key/public_key/value";
in
builtins.elemAt (lib.splitString "\n" publicKey) 1;
builtins.elemAt (lib.splitString "\n" (builtins.readFile path)) 1;
in
{
enable = true;

View File

@@ -9,7 +9,7 @@ in
perSystem =
{ ... }:
{
clan.nixosTests.data-mesher = {
clan.nixosTests.service-data-mesher = {
imports = [ ./tests/vm/default.nix ];
clan.modules."@clan/data-mesher" = module;
};

View File

@@ -2,7 +2,7 @@
...
}:
{
name = "data-mesher";
name = "service-data-mesher";
clan = {
directory = ./.;

View File

@@ -3,7 +3,7 @@
...
}:
{
name = "dyndns";
name = "service-dyndns";
clan = {
directory = ./.;

View File

@@ -1,6 +0,0 @@
[
{
"publickey": "age164wrhlnake7f7duhzs936lq6w49dtg53hcdyxqwxj0agad6tqg2s2u4yta",
"type": "age"
}
]

View File

@@ -1,14 +0,0 @@
{
"data": "ENC[AES256_GCM,data:seLxbv590dO0KvMJmtN7WVvUcH27VYwAc3rmyD7q6ZmwCgswOKx55LFnh0stRDKSZa8K7Dq1x7D9adhZtPAMWX8tbJswBeNMPt8=,iv:G52eugxfTi0tTzH4EN4CWmpyv6feSL34++UVSjb0aAo=,tag:6r10/a7kD2hBAmae0nz2OQ==,type:str]",
"sops": {
"age": [
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHVC8wZUZJYUl5MXVNa2k5\ndGV1MnFWbUNLNVdxeEtCVUc3MTd0ck9aeFFBCnFhZW40amVYc3FlN1FPRTFSWTJR\nQzhNOERKbnRnSlJVeElNSEM5ZUJsZGsKLS0tIG1uNnlNN3MweHlYczNRTW9xSytu\neThzUmxKZTJBT2lCcTdiNUI4N3paTVEKgS9j2/GVt1KBoggUj9d6UK/mIlK4niLQ\nzVq2BHt3irxQpkpGUogXH2b86zSAOEJFzsL1Rk8HM1mogTG8jqf0qA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-10-19T12:49:11Z",
"mac": "ENC[AES256_GCM,data:T/2xw2mvUi8YALyxz78qG/g/xguoUTeHNzcZfXwwSyCXMg9ircsGGLO9SOVWy/QNkibnw3Yp80tXNJyr4oJH28PhFH7RrRp8jzNdopF49ZNJb2IqJ3C7xNYRZMHfjOCd/raka+ehZq8YGilEpXUWLRk1ere9lbBMh1ycL7jJS3c=,iv:FZbY/jTNPM+p4qD41FD0K7B9zoppGuvnUY5hL/EkmYM=,tag:IF5QTyUkHXWthlAGBn9R8w==,type:str]",
"version": "3.11.0"
}
}

View File

@@ -1,18 +0,0 @@
{
"data": "ENC[AES256_GCM,data:Zu+n+DDYP7rQRTS17PJ6Apo=,iv:5WOs81Pj+S85kdC1AlOXSyPMGDfwM5UD8x7nyRZtRYQ=,tag:2JYkGnLugAni49Upv43o2g==,type:str]",
"sops": {
"age": [
{
"recipient": "age164wrhlnake7f7duhzs936lq6w49dtg53hcdyxqwxj0agad6tqg2s2u4yta",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlR3RGQ2ZLTkR3ZWxNVCsv\naXJHRjBiVUVYZVRIY2swY2xubGhmb3pLRkNvCldhQUV2WDlqYjZ4ZUFWYXkvUEEw\nZi9XRWw0Mi9mRENDcnI0aENDR2Z4MHcKLS0tIGFQU3Q4WEErbnBjOHpNR1BSR2cr\nRFg0anE1cHExT0sySmxuUks1R05nczAKZO3R6+f9co2+YGO8HPufoq1fLqqrdTWD\n4zqemMmG2BjMRDumxtcKp8CLaZWlJoP4e/+tonfdoe42qmNF5NJcFw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzZWo4WGh1cWxKeDhDdlBm\nTVFjVFBIUU9xaGRkanNHaUVUUHN1czNRSUhNCkp5MmwzSGdycmsrZGhaRUhEbXBF\nNUhtdEF6bHZQOGJYUVhFVHlYc3FPODAKLS0tIDBRQ2VGT2IvU1F4MEVabzhYSFJq\nOWZmbGpkQmNSMnNKa0s4K2JXdGgwRlkKUQRREpG5H1mNHSc/cZrdMiSz0veJFR4N\n+W49XL/wQUZwajykwYj++G+dWDO7DQ+fpbB9w4mzbsAmCsXirseTLA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2025-10-19T12:49:11Z",
"mac": "ENC[AES256_GCM,data:0msda7WbQQxXQ+juT7yErgT7NADgnzqEZLTQw+4JPuAE4xcqRIYwrrAALaA0GCCM2aIWlICzJigLCuzQUfSUbIzeP79tEHiKez+NOt/xgSM9ljz7GlsmLd0vzkxdt3WSxP+sHxy0S866N2sLMUkLqPGdqeTjB+Jji5ghGhzk9ys=,iv:8UU7iA4SdR6ZlVolm708l2Iea0sQYRT+5wPBBP5tpS0=,tag:VQXslAlqLqs1QEkwW6x6qg==,type:str]",
"version": "3.11.0"
}
}

View File

@@ -1,12 +0,0 @@
[Garage](https://garagehq.deuxfleurs.fr/) is an open-source, S3-compatible distributed object storage service for self-hosting.
This module provisions a single-instance S3 bucket. To customize its behavior, set `services.garage.settings` in your Nix configuration.
Example configuration:
```
instances = {
garage = {
roles.default.machines."server" = {};
};
};
```

View File

@@ -4,7 +4,6 @@
manifest.name = "clan-core/garage";
manifest.description = "S3-compatible object store for small self-hosted geo-distributed deployments";
manifest.categories = [ "System" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the garage service";

View File

@@ -3,7 +3,7 @@
...
}:
{
name = "garage";
name = "service-garage";
clan = {
directory = ./.;

View File

@@ -1 +0,0 @@
This a test README just to appease the eval warnings if we don't have one

View File

@@ -9,7 +9,6 @@
_class = "clan.service";
manifest.name = "clan-core/hello-word";
manifest.description = "This is a test";
manifest.readme = builtins.readFile ./README.md;
# This service provides two roles: "morning" and "evening". Roles can be
# defined in this file directly (e.g. the "morning" role) or split up into a
@@ -35,13 +34,10 @@
settings,
# The name of this instance of the service
instanceName,
# The current machine
machine,
# All roles of this service, with their assigned machines
roles,
...
}:
{

View File

@@ -1,5 +1,5 @@
{
name = "hello-service";
name = "service-hello-service";
clan = {
directory = ./.;

View File

@@ -1,27 +0,0 @@
🚧🚧🚧 Experimental 🚧🚧🚧
Use at your own risk.
We are still refining its interfaces, instability and breakages are expected.
---
This module is part of Clan's [networking interface](https://docs.clan.lol/guides/networking/networking/).
Clan's networking module automatically manages connections across available network transports and falls back intelligently. When you run `clan ssh` or `clan machines update`, Clan attempts each configured network in priority order until a connection succeeds.
The example below shows how to configure a domain so server1 is reachable over the clearnet. By default, the `internet` module has the highest priority among networks.
```nix
inventory.instances = {
# Direct SSH with fallback support
internet = {
roles.default.machines.server1 = {
settings.host = "server1.example.com";
};
roles.default.machines.server2 = {
settings.host = "192.168.1.100";
};
};
};
```

View File

@@ -7,7 +7,6 @@
"System"
"Network"
];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the internet service";
interface =
@@ -16,7 +15,6 @@
options = {
host = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
ip address or hostname (domain) of the machine
'';

View File

@@ -1,28 +0,0 @@
This module sets up the [KDE Plasma](https://kde.org) Desktop environment.
!!! Note "Customisation"
This service intentionally does not provide any settings or customisation
options, as desktop preferences are highly subjective. Clan currently
supports only this default desktop configuration. Any additional
customisation can be done via the `extraModules` option. Furthermore, if you
want to use a different desktop environment or compositor (e.g. Gnome or
sway), we encourage you to to build your own
[Clan Service](https://docs.clan.lol/guides/services/community/) or have a
look at the [Community Services](https://docs.clan.lol/services/community/).
## Example Usage
```nix
inventory = {
instances = {
kde = {
# Deploy on all machines
roles.default.tags.all = { };
# Or individual hosts
roles.default.machines.laptop = { };
};
};
};
```

View File

@@ -1,19 +0,0 @@
{ ... }:
{
_class = "clan.service";
manifest.name = "clan-core/kde";
manifest.description = "Sets up a graphical desktop environment";
manifest.categories = [ "Desktop" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "KDE/Plasma (wayland): Full-featured desktop environment with modern Qt-based interface";
perInstance.nixosModule = {
services = {
displayManager.sddm.enable = true;
displayManager.sddm.wayland.enable = true;
desktopManager.plasma6.enable = true;
};
};
};
}

View File

@@ -1,24 +0,0 @@
{
self,
lib,
...
}:
let
module = lib.modules.importApply ./default.nix {
inherit (self) packages;
};
in
{
clan.modules = {
kde = module;
};
perSystem =
{ ... }:
{
clan.nixosTests.kde = {
imports = [ ./tests/vm/default.nix ];
clan.modules.kde = module;
};
};
}

View File

@@ -1,30 +0,0 @@
{
name = "kde";
clan = {
directory = ./.;
inventory = {
machines.client = { };
instances = {
kde = {
module.name = "kde";
module.input = "self";
roles.default.machines."client" = { };
};
};
};
};
testScript = ''
start_all()
client.systemctl("start network-online.target")
client.wait_for_unit("network-online.target")
client.wait_for_unit("graphical.target")
client.wait_for_unit("display-manager.service")
client.succeed("systemctl status display-manager.service")
'';
}

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -1,6 +1,6 @@
{ ... }:
{
name = "localbackup";
name = "service-localbackup";
clan = {
directory = ./.;

View File

@@ -1,4 +0,0 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -1,23 +0,0 @@
This NixOS module installs and configures Synapse — a federated Matrix homeserver with end-to-end encryption — and optionally provides the Element web client.
The example below demonstrates a minimal setup that includes:
- Element web client.
- Synapse backed by PostgreSQL and nginx.
- An admin user and an additional regular user.
Example configuration:
```nix
instances = {
matrix-synapse = {
roles.default.machines."jon".settings = {
acmeEmail = "admins@clan.lol";
server_tld = "clan.test";
app_domain = "matrix.clan.test";
users.admin.admin = true;
users.someuser = { };
};
};
};
```

View File

@@ -4,7 +4,6 @@
manifest.name = "clan-core/matrix-synapese";
manifest.description = "A federated messaging server with end-to-end encryption.";
manifest.categories = [ "Social" ];
manifest.readme = builtins.readFile ./README.md;
roles.default = {
description = "Placeholder role to apply the matrix-synapse service";

View File

@@ -44,10 +44,8 @@
pkgs.openssl
];
# TODO: Implement automated certificate rotation instead of using a 100-year expiration
script = ''
openssl req -x509 -nodes -newkey rsa:4096 \
-days 36500 \
-keyout "$out"/key \
-out "$out"/crt \
-subj "/C=US/ST=CA/L=San Francisco/O=Example Corp/OU=IT/CN=example.com"

Some files were not shown because too many files have changed in this diff Show More