Compare commits

..

1 Commits

Author SHA1 Message Date
pinpox
99270051cd Add prometheus role to monitoring 2025-08-15 11:22:15 +02:00
430 changed files with 6934 additions and 14760 deletions

View File

@@ -0,0 +1,9 @@
name: checks
on:
pull_request:
jobs:
checks-impure:
runs-on: nix
steps:
- uses: actions/checkout@v4
- run: nix run .#impure-checks

1
.gitignore vendored
View File

@@ -39,6 +39,7 @@ select
# Generated files
pkgs/clan-app/ui/api/API.json
pkgs/clan-app/ui/api/API.ts
pkgs/clan-app/ui/api/Inventory.ts
pkgs/clan-app/ui/api/modules_schemas.json
pkgs/clan-app/ui/api/schema.json
pkgs/clan-app/ui/.fonts

View File

@@ -1,20 +0,0 @@
clanServices/.* @pinpox @kenji
lib/test/container-test-driver/.* @DavHau @mic92
lib/modules/inventory/.* @hsjobeki
lib/modules/inventoryClass/.* @hsjobeki
pkgs/clan-app/ui/.* @hsjobeki @brianmcgee
pkgs/clan-app/clan_app/.* @qubasa @hsjobeki
pkgs/clan-cli/clan_cli/.* @lassulus @mic92 @kenji
pkgs/clan-cli/clan_cli/(secrets|vars)/.* @DavHau @lassulus
pkgs/clan-cli/clan_lib/log_machines/.* @Qubasa
pkgs/clan-cli/clan_lib/ssh/.* @Qubasa @Mic92 @lassulus
pkgs/clan-cli/clan_lib/tags/.* @hsjobeki
pkgs/clan-cli/clan_lib/persist/.* @hsjobeki
pkgs/clan-cli/clan_lib/flake/.* @lassulus
pkgs/clan-cli/api.py @hsjobeki
pkgs/clan-cli/openapi.py @hsjobeki

View File

@@ -8,7 +8,7 @@ Our mission is simple: to democratize computing by providing tools that empower
## Features of Clan
- **Full-Stack System Deployment:** Utilize Clan's toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Full-Stack System Deployment:** Utilize Clans toolkit alongside Nix's reliability to build and manage systems effortlessly.
- **Overlay Networks:** Secure, private communication channels between devices.
- **Virtual Machine Integration:** Seamless operation of VM applications within the main operating system.
- **Robust Backup Management:** Long-term, self-hosted data preservation.

View File

@@ -36,6 +36,7 @@ in
++ filter pathExists [
./devshell/flake-module.nix
./flash/flake-module.nix
./impure/flake-module.nix
./installation/flake-module.nix
./update/flake-module.nix
./morph/flake-module.nix
@@ -138,6 +139,33 @@ in
nixosTests
// flakeOutputs
// {
# TODO: Automatically provide this check to downstream users to check their modules
clan-modules-json-compatible =
let
allSchemas = lib.mapAttrs (
_n: m:
let
schema =
(self.clanLib.evalService {
modules = [ m ];
prefix = [
"checks"
system
];
}).config.result.api.schema;
in
schema
) self.clan.modules;
in
pkgs.runCommand "combined-result"
{
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
}
''
mkdir -p $out
cat $schemaFile > $out/allSchemas.json
'';
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
cp -r ${privateInputs.clan-core-for-checks} $out
chmod -R +w $out

View File

@@ -0,0 +1,51 @@
{
perSystem =
{
pkgs,
lib,
self',
...
}:
{
# a script that executes all other checks
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
#!${pkgs.bash}/bin/bash
set -euo pipefail
unset CLAN_DIR
export PATH="${
lib.makeBinPath (
[
pkgs.gitMinimal
pkgs.nix
pkgs.coreutils
pkgs.rsync # needed to have rsync installed on the dummy ssh server
]
++ self'.packages.clan-cli-full.runtimeDependencies
)
}"
ROOT=$(git rev-parse --show-toplevel)
cd "$ROOT/pkgs/clan-cli"
# Set up custom git configuration for tests
export GIT_CONFIG_GLOBAL=$(mktemp)
git config --file "$GIT_CONFIG_GLOBAL" user.name "Test User"
git config --file "$GIT_CONFIG_GLOBAL" user.email "test@example.com"
export GIT_CONFIG_SYSTEM=/dev/null
# this disables dynamic dependency loading in clan-cli
export CLAN_NO_DYNAMIC_DEPS=1
jobs=$(nproc)
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
# (current number of impure tests)
jobs="$((jobs > 13 ? 13 : jobs))"
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"
# Clean up temporary git config
rm -f "$GIT_CONFIG_GLOBAL"
'';
};
}

View File

@@ -232,7 +232,6 @@
"-i", ssh_conn.ssh_key,
"--option", "store", os.environ['CLAN_TEST_STORE'],
"--update-hardware-config", "nixos-facter",
"--no-persist-state",
]
subprocess.run(clan_cmd, check=True)
@@ -242,7 +241,7 @@
target.shutdown()
except BrokenPipeError:
# qemu has already exited
target.connected = False
pass
# Create a new machine instance that boots from the installed system
installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install")
@@ -276,7 +275,7 @@
"${self.checks.x86_64-linux.clan-core-for-checks}",
"${closureInfo}"
)
# Set up SSH connection
ssh_conn = setup_ssh_connection(
target,

View File

@@ -24,5 +24,12 @@
};
};
imports = [ ./telegraf.nix ];
# roles.prometheus = {
# interface = { lib, ... }: { };
# };
imports = [
./telegraf.nix
./prometheus.nix
];
}

View File

@@ -0,0 +1,182 @@
{
roles.prometheus.perInstance =
{ settings, roles, ... }:
{
nixosModule =
{ pkgs, lib, ... }:
{
# imports = [
# # ./matrix-alertmanager.nix
# # ./irc-alertmanager.nix
# # ./rules.nix
# ];
services.prometheus = {
# webExternalUrl = "https://prometheus.thalheim.io";
extraFlags = [ "--storage.tsdb.retention.time=30d" ];
scrapeConfigs = [
{
job_name = "telegraf";
scrape_interval = "60s";
metrics_path = "/metrics";
static_configs = [
(map (host: {
labels.host = host;
# labels.org = "TODO";
targets = [ "${host}.clan:9273" ];
}) lib.attrNames roles.telegraf.machines)
# {
# # labels.host = "rauter.r:9273";
# # labels.org = "TODO";
# targets = map (host: "${host}.clan:9273") lib.attrNames roles.telegraf.machines;
# }
];
}
# {
# job_name = "gitea";
# scrape_interval = "60s";
# metrics_path = "/metrics";
#
# scheme = "https";
# static_configs = [ { targets = [ "git.thalheim.io:443" ]; } ];
# }
];
alertmanagers = [ { static_configs = [ { targets = [ "localhost:9093" ]; } ]; } ];
};
services.prometheus.alertmanager = {
enable = true;
# environmentFile = config.sops.secrets.alertmanager.path;
# webExternalUrl = "https://alertmanager.thalheim.io";
# listenAddress = "[::1]";
# configuration = {
# global = {
# # The smarthost and SMTP sender used for mail notifications.
# smtp_smarthost = "mail.thalheim.io:587";
# smtp_from = "alertmanager@thalheim.io";
# smtp_auth_username = "alertmanager@thalheim.io";
# smtp_auth_password = "$SMTP_PASSWORD";
# };
# route = {
# receiver = "default";
# routes = [
# {
# group_by = [ "host" ];
# match_re.org = "krebs";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "krebs";
# }
# {
# group_by = [ "host" ];
# match_re.org = "nixos-wiki";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "nixos-wiki";
# }
# {
# group_by = [ "host" ];
# match_re.org = "numtide";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "numtide";
# }
# {
# group_by = [ "host" ];
# match_re.org = "clan-lol";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "clan-lol";
# }
# {
# group_by = [ "host" ];
# match_re.org = "dave";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "dave";
# }
# {
# group_by = [ "host" ];
# group_wait = "30s";
# group_interval = "2m";
# repeat_interval = "2h";
# receiver = "all";
# }
# ];
# };
# receivers = [
# {
# name = "krebs";
# webhook_configs = [
# {
# url = "http://127.0.0.1:9223/";
# max_alerts = 5;
# }
# ];
# }
# {
# name = "numtide";
# webhook_configs = [
# # TODO
# #{
# # send_resolved = true;
# # url = "https://chat.ntd.one/plugins/alertmanager/api/webhook?token='xxxxxxxxxxxxxxxxxxx-yyyyyyy'";
# #}
# ];
# }
# {
# name = "nixos-wiki";
# webhook_configs = [
# {
# url = "http://localhost:9088/alert";
# max_alerts = 5;
# }
# ];
# }
# {
# name = "clan-lol";
# webhook_configs = [
# # TODO
# #{
# # url = "http://localhost:4050/services/hooks/YWxlcnRtYW5hZ2VyX3NlcnZpY2U";
# # max_alerts = 5;
# #}
# ];
# }
# {
# name = "dave";
# telegram_configs = [
# {
# chat_id = 42927997;
# bot_token = "$TELEGRAM_BOT_TOKEN";
# }
# ];
# }
# {
# name = "all";
# # pushover_configs = [
# # {
# # user_key = "$PUSHOVER_USER_KEY";
# # token = "$PUSHOVER_TOKEN";
# # priority = "0";
# # }
# # ];
# }
# { name = "default"; }
# ];
# };
};
};
};
}

View File

@@ -4,66 +4,22 @@
{
nixosModule =
{
config,
pkgs,
lib,
...
}:
let
jsonpath = "/tmp/telegraf.json";
auth_user = "prometheus";
in
{ pkgs, lib, ... }:
{
networking.firewall.interfaces = lib.mkIf (settings.allowAllInterfaces == false) (
builtins.listToAttrs (
map (name: {
inherit name;
value.allowedTCPPorts = [
9273
9990
];
value.allowedTCPPorts = [ 9273 ];
}) settings.interfaces
)
);
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [
9273
9990
];
clan.core.vars.generators."telegraf" = {
files.password.restartUnits = [ "telegraf.service" ];
files.password-env.restartUnits = [ "telegraf.service" ];
files.miniserve-auth.restartUnits = [ "telegraf.service" ];
runtimeInputs = [
pkgs.coreutils
pkgs.xkcdpass
pkgs.mkpasswd
];
script = ''
PASSWORD=$(xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n")
echo "BASIC_AUTH_PWD=$PASSWORD" > "$out"/password-env
echo "${auth_user}:$PASSWORD" > "$out"/miniserve-auth
echo "$PASSWORD" | tr -d "\n" > "$out"/password
'';
};
systemd.services.telegraf-json = {
enable = true;
wantedBy = [ "multi-user.target" ];
script = "${pkgs.miniserve}/bin/miniserve -p 9990 ${jsonpath} --auth-file ${config.clan.core.vars.generators.telegraf.files.miniserve-auth.path}";
};
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [ 9273 ];
services.telegraf = {
enable = true;
environmentFiles = [
(builtins.toString config.clan.core.vars.generators.telegraf.files.password-env.path)
];
extraConfig = {
agent.interval = "60s";
inputs = {
@@ -77,34 +33,22 @@
exec =
let
nixosSystems = pkgs.writeShellScript "current-system" ''
printf "nixos_systems,current_system=%s,booted_system=%s,current_kernel=%s,booted_kernel=%s present=0\n" \
"$(readlink /run/current-system)" "$(readlink /run/booted-system)" \
"$(basename $(echo /run/current-system/kernel-modules/lib/modules/*))" \
"$(basename $(echo /run/booted-system/kernel-modules/lib/modules/*))"
currentSystemScript = pkgs.writeShellScript "current-system" ''
printf "current_system,path=%s present=0\n" $(readlink /run/current-system)
'';
in
[
{
# Expose the path to current-system as metric. We use
# this to check if the machine is up-to-date.
commands = [ nixosSystems ];
commands = [ currentSystemScript ];
data_format = "influx";
}
];
};
# sadly there doesn'T seem to exist a telegraf http_client output plugin
outputs.prometheus_client = {
listen = ":9273";
metric_version = 2;
basic_username = "${auth_user}";
basic_password = "$${BASIC_AUTH_PWD}";
};
outputs.file = {
files = [ jsonpath ];
data_format = "json";
json_timestamp_units = "1s";
};
};
};

View File

@@ -17,20 +17,6 @@
};
};
# Deploy user Carol on all machines. Prompt only once and use the
# same password on all machines. (`share = true`)
user-carol = {
module = {
name = "users";
input = "clan";
};
roles.default.tags.all = { };
roles.default.settings = {
user = "carol";
share = true;
};
};
# Deploy user bob only on his laptop. Prompt for a password.
user-bob = {
module = {
@@ -43,44 +29,3 @@
};
}
```
## Migration from `root-password` module
The deprecated `clan.root-password` module has been replaced by the `users` module. Here's how to migrate:
### 1. Update your flake configuration
Replace the `root-password` module import with a `users` service instance:
```nix
# OLD - Remove this from your nixosModules:
imports = [
self.inputs.clan-core.clanModules.root-password
];
# NEW - Add to inventory.instances or machines/flake-module.nix:
instances = {
users-root = {
module.name = "users";
module.input = "clan-core";
roles.default.tags.nixos = { };
roles.default.settings = {
user = "root";
prompt = false; # Set to true if you want to be prompted
groups = [ ];
};
};
};
```
### 2. Migrate vars
The vars structure has changed from `root-password` to `user-password-root`:
```bash
# For each machine, rename the vars directories:
cd vars/per-machine/<machine-name>/
mv root-password user-password-root
mv user-password-root/password-hash user-password-root/user-password-hash
mv user-password-root/password user-password-root/user-password
```

View File

@@ -59,17 +59,6 @@
- "input" - Allows the user to access input devices.
'';
};
share = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = ''
Weather the user should have the same password on all machines.
By default, you will be prompted for a new password for every host.
Unless `generate` is set to `true`.
'';
};
};
};
@@ -93,6 +82,7 @@
};
clan.core.vars.generators."user-password-${settings.user}" = {
files.user-password-hash.neededFor = "users";
files.user-password-hash.restartUnits = lib.optional (config.services.userborn.enable) "userborn.service";
files.user-password.deploy = false;
@@ -117,8 +107,6 @@
pkgs.mkpasswd
];
share = settings.share;
script =
(
if settings.prompt then

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python3
"""IPv6 address allocator for WireGuard networks.
"""
IPv6 address allocator for WireGuard networks.
Network layout:
- Base network: /40 ULA prefix (fd00::/8 + 32 bits from hash)
@@ -19,7 +20,8 @@ def hash_string(s: str) -> str:
def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
"""Generate a /40 ULA prefix from instance name.
"""
Generate a /40 ULA prefix from instance name.
Format: fd{32-bit hash}/40
This gives us fd00:0000:0000::/40 through fdff:ffff:ff00::/40
@@ -44,10 +46,10 @@ def generate_ula_prefix(instance_name: str) -> ipaddress.IPv6Network:
def generate_controller_subnet(
base_network: ipaddress.IPv6Network,
controller_name: str,
base_network: ipaddress.IPv6Network, controller_name: str
) -> ipaddress.IPv6Network:
"""Generate a /56 subnet for a controller from the base /40 network.
"""
Generate a /56 subnet for a controller from the base /40 network.
We have 16 bits (40 to 56) to allocate controller subnets.
This allows for 65,536 possible controller subnets.
@@ -66,7 +68,8 @@ def generate_controller_subnet(
def generate_peer_suffix(peer_name: str) -> str:
"""Generate a unique 64-bit host suffix for a peer.
"""
Generate a unique 64-bit host suffix for a peer.
This suffix will be used in all controller subnets to create unique addresses.
Format: :xxxx:xxxx:xxxx:xxxx (64 bits)
@@ -83,7 +86,7 @@ def generate_peer_suffix(peer_name: str) -> str:
def main() -> None:
if len(sys.argv) < 4:
print(
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>",
"Usage: ipv6_allocator.py <output_dir> <instance_name> <controller|peer> <machine_name>"
)
sys.exit(1)

24
devFlake/flake.lock generated
View File

@@ -3,10 +3,10 @@
"clan-core-for-checks": {
"flake": false,
"locked": {
"lastModified": 1756081310,
"narHash": "sha256-wj1H5Pr6w4AsB+nG3K07SgSIDZ7jDCkGnh5XXWLdtk8=",
"lastModified": 1755093452,
"narHash": "sha256-NKBss7QtNnOqYVyJmYCgaCvYZK0mpQTQc9fLgE1mGyk=",
"ref": "main",
"rev": "7b926d43dc361cd8d3ad3c14a2e7e75375b7d215",
"rev": "7e97734797f0c6bd3c2d3a51cf54a2a6b371c222",
"shallow": true,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
@@ -84,11 +84,11 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1756050191,
"narHash": "sha256-lMtTT4rv5On7D0P4Z+k7UkvbAKKuVGRbJi/VJeRCQwI=",
"lastModified": 1755166611,
"narHash": "sha256-sk8pK8kWz4IE4ErAjKE1d8tMChY6VQR32U4yS68FIog=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "759dcc6981cd4aa222d36069f78fe7064d563305",
"rev": "1a341e3c908f4a3105e737bd13af0318dc06fbe3",
"type": "github"
},
"original": {
@@ -107,11 +107,11 @@
]
},
"locked": {
"lastModified": 1755555503,
"narHash": "sha256-WiOO7GUOsJ4/DoMy2IC5InnqRDSo2U11la48vCCIjjY=",
"lastModified": 1754869408,
"narHash": "sha256-G1zNuxiCDfqNQVoL9j5v+ZYfUER7AI158ev98/JC8LI=",
"owner": "NuschtOS",
"repo": "search",
"rev": "6f3efef888b92e6520f10eae15b86ff537e1d2ea",
"rev": "2f5478267557a0f7a70d953b6c0867a5b4282739",
"type": "github"
},
"original": {
@@ -165,11 +165,11 @@
"nixpkgs": []
},
"locked": {
"lastModified": 1755934250,
"narHash": "sha256-CsDojnMgYsfshQw3t4zjRUkmMmUdZGthl16bXVWgRYU=",
"lastModified": 1754847726,
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "74e1a52d5bd9430312f8d1b8b0354c92c17453e5",
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
"type": "github"
},
"original": {

2
docs/.gitignore vendored
View File

@@ -1,5 +1,5 @@
/site/reference
/site/static
/site/options
/site/options-page
/site/openapi.json
!/site/static/extra.css

View File

@@ -6,7 +6,7 @@ edit_uri: _edit/main/docs/docs/
validation:
omitted_files: warn
absolute_links: ignore
absolute_links: warn
unrecognized_links: warn
markdown_extensions:
@@ -64,7 +64,7 @@ nav:
- Disk Encryption: guides/disk-encryption.md
- Age Plugins: guides/age-plugins.md
- Secrets management: guides/secrets.md
- Networking: guides/networking.md
- Target Host: guides/target-host.md
- Zerotier VPN: guides/mesh-vpn.md
- Secure Boot: guides/secure-boot.md
- Flake-parts: guides/flake-parts.md
@@ -78,7 +78,7 @@ nav:
- Writing a Disko Template: guides/disko-templates/community.md
- Migrations:
- Migrate existing Flakes: guides/migrations/migration-guide.md
- Migrate from clan modules to services: guides/migrations/migrate-inventory-services.md
- Migrate inventory Services: guides/migrations/migrate-inventory-services.md
- Facts Vars Migration: guides/migrations/migration-facts-vars.md
- Disk id: guides/migrations/disk-id.md
- Concepts:
@@ -88,7 +88,7 @@ nav:
- Templates: concepts/templates.md
- Reference:
- Overview: reference/index.md
- Browse Options: "/options"
- Clan Options: options.md
- Services:
- Overview:
- reference/clanServices/index.md
@@ -155,7 +155,6 @@ nav:
- 05-deployment-parameters: decisions/05-deployment-parameters.md
- Template: decisions/_template.md
- Glossary: reference/glossary.md
- Browse Options: "/options"
docs_dir: site
site_dir: out

View File

@@ -54,9 +54,9 @@ pkgs.stdenv.mkDerivation {
chmod -R +w ./site/reference
echo "Generated API documentation in './site/reference/' "
rm -rf ./site/options
cp -r ${docs-options} ./site/options
chmod -R +w ./site/options
rm -r ./site/options-page || true
cp -r ${docs-options} ./site/options-page
chmod -R +w ./site/options-page
mkdir -p ./site/static/asciinema-player
ln -snf ${asciinema-player-js} ./site/static/asciinema-player/asciinema-player.min.js

View File

@@ -40,7 +40,6 @@ writeShellScriptBin "deploy-docs" ''
rsync \
--checksum \
--delete \
-e "ssh -o StrictHostKeyChecking=no $sshExtraArgs" \
-a ${docs}/ \
www@clan.lol:/var/www/docs.clan.lol

View File

@@ -18,8 +18,27 @@
inherit (self) clanModules;
clan-core = self;
inherit pkgs;
evalClanModules = self.clanLib.evalClan.evalClanModules;
modulesRolesOptions = self.clanLib.evalClan.evalClanModulesWithRoles {
allModules = self.clanModules;
inherit pkgs;
clan-core = self;
};
};
# Frontmatter for clanModules
clanModulesFrontmatter =
let
docs = pkgs.nixosOptionsDoc {
options = self.clanLib.modules.frontmatterOptions;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
};
in
docs.optionsJSON;
# Options available when imported via ` inventory.${moduleName}....${rolesName} `
clanModulesViaRoles = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaRoles);
# clan service options
clanModulesViaService = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaService);
@@ -69,10 +88,12 @@
}
}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_ROLES=${clanModulesViaRoles}
export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService}
export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json
# Frontmatter format for clanModules
export CLAN_MODULES_FRONTMATTER_DOCS=${clanModulesFrontmatter}/share/doc/nixos/options.json
export BUILD_CLAN_PATH=${buildClanOptions}/share/doc/nixos/options.json
@@ -86,6 +107,7 @@
legacyPackages = {
inherit
jsonDocs
clanModulesViaRoles
clanModulesViaService
;
};

View File

@@ -1,5 +1,7 @@
{
modulesRolesOptions,
nixosOptionsDoc,
evalClanModules,
lib,
pkgs,
clan-core,
@@ -8,36 +10,21 @@
let
inherit (clan-core.clanLib.docs) stripStorePathsFromDeclarations;
transformOptions = stripStorePathsFromDeclarations;
nixosConfigurationWithClan =
let
evaled = lib.evalModules {
class = "nixos";
modules = [
# Basemodule
(
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
}
)
{
clan.core.settings.directory = clan-core;
}
clan-core.nixosModules.clanCore
];
};
in
evaled;
in
{
clanModulesViaRoles = lib.mapAttrs (
_moduleName: rolesOptions:
lib.mapAttrs (
_roleName: options:
(nixosOptionsDoc {
inherit options;
warningsAreErrors = true;
inherit transformOptions;
}).optionsJSON
) rolesOptions
) modulesRolesOptions;
# Test with:
# nix build .\#legacyPackages.x86_64-linux.clanModulesViaService
clanModulesViaService = lib.mapAttrs (
@@ -51,6 +38,7 @@ in
{
roles = lib.mapAttrs (
_roleName: role:
(nixosOptionsDoc {
transformOptions =
opt:
@@ -66,13 +54,20 @@ in
warningsAreErrors = true;
}).optionsJSON
) evaluatedService.config.roles;
manifest = evaluatedService.config.manifest;
}
) clan-core.clan.modules;
clanCore =
(nixosOptionsDoc {
options = nixosConfigurationWithClan.options.clan.core;
options =
((evalClanModules {
modules = [ ];
inherit pkgs clan-core;
}).options
).clan.core or { };
warningsAreErrors = true;
inherit transformOptions;
}).optionsJSON;

View File

@@ -25,7 +25,7 @@
serviceModules = self.clan.modules;
baseHref = "/options/";
baseHref = "/options-page/";
getRoles =
module:
@@ -126,7 +126,7 @@
nestedSettingsOption = mkOption {
type = types.raw;
description = ''
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=inventory.instances.${name}.roles.${roleName}.settings)
See [instances.${name}.roles.${roleName}.settings](${baseHref}?option_scope=0&option=instances.${name}.roles.${roleName}.settings)
'';
};
settingsOption = mkOption {
@@ -161,42 +161,6 @@
}
];
baseModule =
# Module
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
};
evalClanModules =
let
evaled = lib.evalModules {
class = "nixos";
modules = [
baseModule
{
clan.core.settings.directory = self;
}
self.nixosModules.clanCore
];
};
in
evaled;
coreOptions =
(pkgs.nixosOptionsDoc {
options = (evalClanModules.options).clan.core or { };
warningsAreErrors = true;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
}).optionsJSON;
in
{
# Uncomment for debugging
@@ -211,17 +175,10 @@
# scopes = mapAttrsToList mkScope serviceModules;
scopes = [
{
inherit baseHref;
name = "Flake Options (clan.nix file)";
name = "Clan";
modules = docModules;
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
{
name = "Machine Options (clan.core NixOS options)";
optionsJSON = "${coreOptions}/share/doc/nixos/options.json";
urlPrefix = "https://git.clan.lol/clan/clan-core/src/branch/main/";
}
];
};
};

View File

@@ -32,14 +32,23 @@ from typing import Any
from clan_lib.errors import ClanError
from clan_lib.services.modules import (
CategoryInfo,
ModuleManifest,
Frontmatter,
extract_frontmatter,
get_roles,
)
# Get environment variables
CLAN_CORE_PATH = Path(os.environ["CLAN_CORE_PATH"])
CLAN_CORE_DOCS = Path(os.environ["CLAN_CORE_DOCS"])
CLAN_MODULES_FRONTMATTER_DOCS = os.environ.get("CLAN_MODULES_FRONTMATTER_DOCS")
BUILD_CLAN_PATH = os.environ.get("BUILD_CLAN_PATH")
## Clan modules ##
# Some modules can be imported via nix natively
CLAN_MODULES_VIA_NIX = os.environ.get("CLAN_MODULES_VIA_NIX")
# Some modules can be imported via inventory
CLAN_MODULES_VIA_ROLES = os.environ.get("CLAN_MODULES_VIA_ROLES")
# Options how to author clan.modules
# perInstance, perMachine, ...
CLAN_SERVICE_INTERFACE = os.environ.get("CLAN_SERVICE_INTERFACE")
@@ -66,7 +75,8 @@ def render_option_header(name: str) -> str:
def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
"""Joins multiple lines with a specified number of whitespace characters as indentation.
"""
Joins multiple lines with a specified number of whitespace characters as indentation.
Args:
lines (list of str): The lines of text to join.
@@ -74,7 +84,6 @@ def join_lines_with_indentation(lines: list[str], indent: int = 4) -> str:
Returns:
str: The indented and concatenated string.
"""
# Create the indentation string (e.g., four spaces)
indent_str = " " * indent
@@ -161,10 +170,7 @@ def render_option(
def print_options(
options_file: str,
head: str,
no_options: str,
replace_prefix: str | None = None,
options_file: str, head: str, no_options: str, replace_prefix: str | None = None
) -> str:
res = ""
with (Path(options_file) / "share/doc/nixos/options.json").open() as f:
@@ -179,8 +185,26 @@ def print_options(
return res
def module_header(module_name: str) -> str:
return f"# {module_name}\n\n"
def module_header(module_name: str, has_inventory_feature: bool = False) -> str:
indicator = " 🔹" if has_inventory_feature else ""
return f"# {module_name}{indicator}\n\n"
def module_nix_usage(module_name: str) -> str:
return f"""## Usage via Nix
**This module can be also imported directly in your nixos configuration. Although it is recommended to use the [inventory](../../concepts/inventory.md) interface if available.**
Some modules are considered 'low-level' or 'expert modules' and are not available via the inventory interface.
```nix
{{config, lib, inputs, ...}}: {{
imports = [ inputs.clan-core.clanModules.{module_name} ];
# ...
}}
```
"""
clan_core_descr = """
@@ -199,6 +223,68 @@ The following options are available for this module.
"""
def produce_clan_modules_frontmatter_docs() -> None:
if not CLAN_MODULES_FRONTMATTER_DOCS:
msg = f"Environment variables are not set correctly: $CLAN_CORE_DOCS={CLAN_CORE_DOCS}"
raise ClanError(msg)
if not OUT:
msg = f"Environment variables are not set correctly: $out={OUT}"
raise ClanError(msg)
with Path(CLAN_MODULES_FRONTMATTER_DOCS).open() as f:
options: dict[str, dict[str, Any]] = json.load(f)
# header
output = """# Frontmatter
Every clan module has a `frontmatter` section within its readme. It provides
machine readable metadata about the module.
!!! example
The used format is `TOML`
The content is separated by `---` and the frontmatter must be placed at the very top of the `README.md` file.
```toml
---
description = "A description of the module"
categories = ["category1", "category2"]
[constraints]
roles.client.max = 10
roles.server.min = 1
---
# Readme content
...
```
"""
output += """## Overview
This provides an overview of the available attributes of the `frontmatter`
within the `README.md` of a clan module.
"""
# for option_name, info in options.items():
# if option_name == "_module.args":
# continue
# output += render_option(option_name, info)
root = options_to_tree(options, debug=True)
for option in root.suboptions:
output += options_docs_from_tree(option, init_level=2)
outfile = Path(OUT) / "clanModules/frontmatter/index.md"
outfile.parent.mkdir(
parents=True,
exist_ok=True,
)
with outfile.open("w") as of:
of.write(output)
def produce_clan_core_docs() -> None:
if not CLAN_CORE_DOCS:
msg = f"Environment variables are not set correctly: $CLAN_CORE_DOCS={CLAN_CORE_DOCS}"
@@ -238,7 +324,7 @@ def produce_clan_core_docs() -> None:
for submodule_name, split_options in split.items():
outfile = f"{module_name}/{submodule_name}.md"
print(
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}",
f"[clan_core.{submodule_name}] Rendering option of: {submodule_name}... {outfile}"
)
init_level = 1
root = options_to_tree(split_options, debug=True)
@@ -273,9 +359,56 @@ def produce_clan_core_docs() -> None:
of.write(output)
def render_roles(roles: list[str] | None, module_name: str) -> str:
if roles:
roles_list = "\n".join([f"- `{r}`" for r in roles])
return (
f"""
### Roles
This module can be used via predefined roles
{roles_list}
"""
"""
Every role has its own configuration options, which are each listed below.
For more information, see the [inventory guide](../../concepts/inventory.md).
??? Example
For example the `admin` module adds the following options globally to all machines where it is used.
`clan.admin.allowedkeys`
```nix
clan-core.lib.clan {
inventory.services = {
admin.me = {
roles.default.machines = [ "jon" ];
config.allowedkeys = [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQD..." ];
};
};
};
```
"""
)
return ""
clan_modules_descr = """
Clan modules are [NixOS modules](https://wiki.nixos.org/wiki/NixOS_modules)
which have been enhanced with additional features provided by Clan, with
certain option types restricted to enable configuration through a graphical
interface.
!!! note "🔹"
Modules with this indicator support the [inventory](../../concepts/inventory.md) feature.
"""
def render_categories(
categories: list[str],
categories_info: dict[str, CategoryInfo],
categories: list[str], categories_info: dict[str, CategoryInfo]
) -> str:
res = """<div style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px;">"""
for cat in categories:
@@ -340,10 +473,10 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
# output += f"`clan.modules.{module_name}`\n"
output += f"*{module_info['manifest']['description']}*\n"
fm = Frontmatter("")
# output += "## Categories\n\n"
output += render_categories(
module_info["manifest"]["categories"],
ModuleManifest.categories_info(),
module_info["manifest"]["categories"], fm.categories_info
)
output += f"{module_info['manifest']['readme']}\n"
@@ -352,7 +485,7 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
output += f"The {module_name} module has the following roles:\n\n"
for role_name in module_info["roles"]:
for role_name, _ in module_info["roles"].items():
output += f"- {role_name}\n"
for role_name, role_filename in module_info["roles"].items():
@@ -372,8 +505,183 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
of.write(output)
def produce_clan_modules_docs() -> None:
if not CLAN_MODULES_VIA_NIX:
msg = f"Environment variables are not set correctly: $CLAN_MODULES_VIA_NIX={CLAN_MODULES_VIA_NIX}"
raise ClanError(msg)
if not CLAN_MODULES_VIA_ROLES:
msg = f"Environment variables are not set correctly: $CLAN_MODULES_VIA_ROLES={CLAN_MODULES_VIA_ROLES}"
raise ClanError(msg)
if not CLAN_CORE_PATH:
msg = f"Environment variables are not set correctly: $CLAN_CORE_PATH={CLAN_CORE_PATH}"
raise ClanError(msg)
if not OUT:
msg = f"Environment variables are not set correctly: $out={OUT}"
raise ClanError(msg)
modules_index = "# Modules Overview\n\n"
modules_index += clan_modules_descr
modules_index += "## Overview\n\n"
modules_index += '<div class="grid cards" markdown>\n\n'
with Path(CLAN_MODULES_VIA_ROLES).open() as f2:
role_links: dict[str, dict[str, str]] = json.load(f2)
with Path(CLAN_MODULES_VIA_NIX).open() as f:
links: dict[str, str] = json.load(f)
for module_name, options_file in links.items():
print(f"Rendering ClanModule: {module_name}")
readme_file = CLAN_CORE_PATH / "clanModules" / module_name / "README.md"
with readme_file.open() as f:
readme = f.read()
frontmatter: Frontmatter
frontmatter, readme_content = extract_frontmatter(readme, str(readme_file))
# skip if experimental feature enabled
if "experimental" in frontmatter.features:
print(f"Skipping {module_name}: Experimental feature")
continue
modules_index += build_option_card(module_name, frontmatter)
##### Print module documentation #####
# 1. Header
output = module_header(module_name, "inventory" in frontmatter.features)
# 2. Description from README.md
if frontmatter.description:
output += f"*{frontmatter.description}*\n\n"
# 2. Deprecation note if the module is deprecated
if "deprecated" in frontmatter.features:
output += f"""
!!! Warning "Deprecated"
The `{module_name}` module is deprecated.*
Use 'clanServices/{module_name}' or a similar successor instead
"""
else:
output += f"""
!!! Warning "Will be deprecated"
The `{module_name}` module might eventually be migrated to 'clanServices'*
See: [clanServices](../../guides/clanServices.md)
"""
# 3. Categories from README.md
output += "## Categories\n\n"
output += render_categories(frontmatter.categories, frontmatter.categories_info)
output += "\n---\n\n"
# 3. README.md content
output += f"{readme_content}\n"
# 4. Usage
##### Print usage via Inventory #####
# get_roles(str) -> list[str] | None
# if not isinstance(options_file, str):
roles = get_roles(CLAN_CORE_PATH / "clanModules" / module_name)
if roles:
# Render inventory usage
output += """## Usage via Inventory\n\n"""
output += render_roles(roles, module_name)
for role in roles:
role_options_file = role_links[module_name][role]
# Abort if the options file is not found
if not isinstance(role_options_file, str):
print(
f"Error: module: {module_name} in role: {role} - options file not found, Got {role_options_file}"
)
exit(1)
no_options = f"""### Options of `{role}` role
**The `{module_name}` `{role}` doesnt offer / require any options to be set.**
"""
heading = f"""### Options of `{role}` role
The following options are available when using the `{role}` role.
"""
output += print_options(
role_options_file,
heading,
no_options,
replace_prefix=f"clan.{module_name}",
)
else:
# No roles means no inventory usage
output += """## Usage via Inventory
**This module cannot be used via the inventory interface.**
"""
##### Print usage via Nix / nixos #####
if not isinstance(options_file, str):
print(
f"Skipping {module_name}: Cannot be used via import clanModules.{module_name}"
)
output += """## Usage via Nix
**This module cannot be imported directly in your nixos configuration.**
"""
else:
output += module_nix_usage(module_name)
no_options = "** This module doesnt require any options to be set.**"
output += print_options(options_file, options_head, no_options)
outfile = Path(OUT) / f"clanModules/{module_name}.md"
outfile.parent.mkdir(
parents=True,
exist_ok=True,
)
with outfile.open("w") as of:
of.write(output)
modules_index += "</div>"
modules_index += "\n"
modules_outfile = Path(OUT) / "clanModules/index.md"
with modules_outfile.open("w") as of:
of.write(modules_index)
def build_option_card(module_name: str, frontmatter: Frontmatter) -> str:
"""
Build the overview index card for each reference target option.
"""
def indent_all(text: str, indent_size: int = 4) -> str:
"""
Indent all lines in a string.
"""
indent = " " * indent_size
lines = text.split("\n")
indented_text = indent + ("\n" + indent).join(lines)
return indented_text
def to_md_li(module_name: str, frontmatter: Frontmatter) -> str:
md_li = (
f"""- **[{module_name}](./{"-".join(module_name.split(" "))}.md)**\n\n"""
)
md_li += f"""{indent_all("---", 4)}\n\n"""
fmd = f"\n{frontmatter.description.strip()}" if frontmatter.description else ""
md_li += f"""{indent_all(fmd, 4)}"""
return md_li
return f"{to_md_li(module_name, frontmatter)}\n\n"
def split_options_by_root(options: dict[str, Any]) -> dict[str, dict[str, Any]]:
"""Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
"""
Split the flat dictionary of options into a dict of which each entry will construct complete option trees.
{
"a": { Data }
"a.b": { Data }
@@ -457,7 +765,9 @@ def option_short_name(option_name: str) -> str:
def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
"""Convert the options dictionary to a tree structure."""
"""
Convert the options dictionary to a tree structure.
"""
# Helper function to create nested structure
def add_to_tree(path_parts: list[str], info: Any, current_node: Option) -> None:
@@ -509,24 +819,22 @@ def options_to_tree(options: dict[str, Any], debug: bool = False) -> Option:
def options_docs_from_tree(
root: Option,
init_level: int = 1,
prefix: list[str] | None = None,
root: Option, init_level: int = 1, prefix: list[str] | None = None
) -> str:
"""Eender the options from the tree structure.
"""
eender the options from the tree structure.
Args:
root (Option): The root option node.
init_level (int): The initial level of indentation.
prefix (list str): Will be printed as common prefix of all attribute names.
"""
def render_tree(option: Option, level: int = init_level) -> str:
output = ""
should_render = not option.name.startswith("<") and not option.name.startswith(
"_",
"_"
)
if should_render:
# short_name = option_short_name(option.name)
@@ -551,8 +859,12 @@ def options_docs_from_tree(
return md
if __name__ == "__main__":
if __name__ == "__main__": #
produce_clan_core_docs()
produce_clan_service_author_docs()
# produce_clan_modules_docs()
produce_clan_service_docs()
# produce_clan_modules_frontmatter_docs()

View File

@@ -1,33 +1,15 @@
# Auto-included Files
Clan automatically imports specific files from each machine directory and registers them, reducing the need for manual configuration.
Clan automatically imports the following files from a directory and registers them.
## Machine Registration
## Machine registration
Every folder under `machines/{machineName}` is automatically registered as a Clan machine.
Every folder `machines/{machineName}` will be registered automatically as a Clan machine.
!!! info "Files loaded automatically for each machine"
!!! info "Automatically loaded files"
The following files are detected and imported for every Clan machine:
The following files are loaded automatically for each Clan machine:
- [x] `machines/{machineName}/configuration.nix`
Main configuration file for the machine.
- [x] `machines/{machineName}/hardware-configuration.nix`
Hardware-specific configuration generated by NixOS.
- [x] `machines/{machineName}/facter.json`
Contains system facts. Automatically generated — see [nixos-facter](https://clan.lol/blog/nixos-facter/) for details.
- [x] `machines/{machineName}/disko.nix`
Disk layout configuration. See the [disko quickstart](https://github.com/nix-community/disko/blob/master/docs/quickstart.md) for more info.
## Other Auto-included Files
* **`inventory.json`**
Managed by Clan's API.
Merges with `clan.inventory` to extend the inventory.
* **`.clan-flake`**
Sentinel file to be used to locate the root of a Clan repository.
Falls back to `.git`, `.hg`, `.svn`, or `flake.nix` if not found.
- [x] `machines/{machineName}/configuration.nix`
- [x] `machines/{machineName}/hardware-configuration.nix`
- [x] `machines/{machineName}/facter.json` Automatically configured, for further information see [nixos-facter](https://clan.lol/blog/nixos-facter/)
- [x] `machines/{machineName}/disko.nix` Automatically loaded, for further information see the [disko docs](https://github.com/nix-community/disko/blob/master/docs/quickstart.md).

View File

@@ -1,6 +1,6 @@
# Using `clanServices`
Clan's `clanServices` system is a composable way to define and deploy services across machines.
Clans `clanServices` system is a composable way to define and deploy services across machines.
This guide shows how to **instantiate** a `clanService`, explains how service definitions are structured in your inventory, and how to pick or create services from modules exposed by flakes.
@@ -130,7 +130,7 @@ inventory.instances = {
## Picking a clanService
You can use services exposed by Clan's core module library, `clan-core`.
You can use services exposed by Clans core module library, `clan-core`.
🔗 See: [List of Available Services in clan-core](../reference/clanServices/index.md)
@@ -152,7 +152,7 @@ You might expose your service module from your flake — this makes it easy for
---
## What's Next?
## Whats Next?
* [Author your own clanService →](../guides/services/community.md)
* [Migrate from clanModules →](../guides/migrations/migrate-inventory-services.md)

View File

@@ -90,10 +90,13 @@ export CLAN_DEBUG_COMMANDS=1
These options help you pinpoint the source and context of print messages and debug logs during development.
## Analyzing Performance
To understand what's causing slow performance, set the environment variable `export CLAN_CLI_PERF=1`. When you complete a clan command, you'll see a summary of various performance metrics, helping you identify what's taking up time.
## See all possible packages and tests
To quickly show all possible packages and tests execute:
@@ -152,16 +155,28 @@ To test the CLI locally in a development environment and set breakpoints for deb
## Test Locally in a Nix Sandbox
To run tests in a Nix sandbox:
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest-with-core
nix run .#impure-checks -L
```
This command will run the impure test functions.
### Running Pure Tests
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest-without-core
nix build .#checks.x86_64-linux.clan-pytest --rebuild
```
This command will run all pure test functions.
### Inspecting the Nix Sandbox
If you need to inspect the Nix sandbox while running tests, follow these steps:

View File

@@ -27,7 +27,7 @@ inputs = {
## Import the Clan flake-parts Module
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](/options) available within `mkFlake`.
After updating your flake inputs, the next step is to import the Clan flake-parts module. This will make the [Clan options](../options.md) available within `mkFlake`.
```nix
{

View File

@@ -1,129 +1,110 @@
# :material-clock-fast: Getting Started
Ready to manage your fleet of machines?
Ready to create your own Clan and manage a fleet of machines? Follow these simple steps to get started.
We will create a declarative infrastructure using **clan**, **git**, and **nix flakes**.
This guide walks your through setting up your own declarative infrastructure using clan, git and flakes. By the end of this, you will have one or more machines integrated and installed. You can then import your existing NixOS configuration into this setup if you wish.
You'll finish with a centrally managed fleet, ready to import your existing NixOS configuration.
The following steps are meant to be executed on the machine on which to administer the infrastructure.
In order to get started you should have at least one machine with either physical or ssh access available as an installation target. Your local machine can also be used as an installation target if it is already running NixOS.
## Prerequisites
Make sure you have the following:
=== "**Linux**"
* 💻 **Administration Machine**: Run the setup commands from this machine.
* 🛠️ **Nix**: The Nix package manager, installed on your administration machine.
??? info "**How to install Nix (Linux / MacOS / NixOS)**"
**On Linux or macOS:**
1. Run the recommended installer:
```shellSession
curl --proto '=https' --tlsv1.2 -sSf -L [https://install.determinate.systems/nix](https://install.determinate.systems/nix) | sh -s -- install
```
2. After installation, ensure flakes are enabled by adding this line to `~/.config/nix/nix.conf`:
```
experimental-features = nix-command flakes
```
**On NixOS:**
Nix is already installed. You only need to enable flakes for your user in your `configuration.nix`:
```nix
{
nix.settings.experimental-features = [ "nix-command" "flakes" ];
}
```
Then, run `nixos-rebuild switch` to apply the changes.
* 🎯 **Target Machine(s)**: A remote machine with SSH, or your local machine (if NixOS).
## Create a New Clan
1. Navigate to your desired directory:
```shellSession
cd <your-directory>
```
2. Create a new clan flake:
**Note:** This creates a new directory in your current location
Clan requires Nix to be installed on your system. Run the following command to install Nix:
```shellSession
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
3. Enter a **name** in the prompt:
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
```terminalSession
Enter a name for the new clan: my-clan
=== "**NixOS**"
If you run NixOS the `nix` binary is already installed.
You will also need to enable the `nix-command` and `flakes` experimental features in your `configuration.nix`:
```nix
{ nix.settings.experimental-features = [ "nix-command" "flakes" ]; }
```
## Project Structure
=== "**macOS**"
Your new directory, `my-clan`, should contain the following structure:
Clan requires Nix to be installed on your system. Run the following command to install Nix:
```
my-clan/
├── clan.nix
├── flake.lock
├── flake.nix
├── modules/
└── sops/
```
```shellSession
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
!!! note "Templates"
This is the structure for the `default` template.
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
Use `clan templates list` and `clan templates --help` for available templates & more. Keep in mind that the exact files may change as templates evolve.
## Create a new clan
## Activate the Environment
To get started, `cd` into your new project directory.
Initialize a new clan flake
```shellSession
cd my-clan
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
```
Now, activate the environment using one of the following methods.
This should prompt for a *name*:
```terminalSession
Enter a name for the new clan: my-clan
```
Enter a *name*, confirm with *enter*. A directory with that name will be created and initialized.
!!! Note
This command uses the `default` template
See `clan templates list` and the `--help` reference for how to use other templates.
## Explore the Project Structure
Take a look at all project files:
For example, you might see something like:
```{ .console .no-copy }
$ cd my-clan
$ ls
clan.nix flake.lock flake.nix modules sops
```
Dont worry if your output looks different — Clan templates evolve over time.
To interact with your newly created clan the you need to load the `clan` cli-package it into your environment by running:
=== "Automatic (direnv, recommended)"
**Prerequisite**: You must have [nix-direnv](https://github.com/nix-community/nix-direnv) installed.
- prerequisite: [install nix-direnv](https://github.com/nix-community/nix-direnv)
Run `direnv allow` to automatically load the environment whenever you enter this directory.
```shellSession
direnv allow
```
=== "Manual (nix develop)"
Run nix develop to load the environment for your current shell session.
```shellSession
nix develop
```
## Verify the Setup
Once your environment is active, verify that the clan command is available by running:
verify that you can run `clan` commands:
```shellSession
clan show
```
You should see the default metadata for your new clan:
You should see something like this:
```shellSession
Name: __CHANGE_ME__
Description: None
```
This confirms your setup is working correctly.
You can now change the default name by editing the `meta.name` field in your `clan.nix` file.
To change the name of your clan edit `meta.name` in the `clan.nix` or `flake.nix` file
```{.nix title="clan.nix" hl_lines="3"}
{

View File

@@ -271,7 +271,7 @@ The following table shows the migration status of each deprecated clanModule:
| `nginx` | ❌ Removed | |
| `packages` | ✅ [Migrated](../../reference/clanServices/packages.md) | |
| `postgresql` | ❌ Removed | Now an [option](../../reference/clan.core/settings.md) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | See [migration guide](../../reference/clanServices/users.md#migration-from-root-password-module) |
| `root-password` | ✅ [Migrated](../../reference/clanServices/users.md) | |
| `single-disk` | ❌ Removed | |
| `sshd` | ✅ [Migrated](../../reference/clanServices/sshd.md) | |
| `state-version` | ✅ [Migrated](../../reference/clanServices/state-version.md) | |

View File

@@ -1,184 +0,0 @@
# Connecting to Your Machines
Clan provides automatic networking with fallback mechanisms to reliably connect to your machines.
## Option 1: Automatic Networking with Fallback (Recommended)
Clan's networking module automatically manages connections through various network technologies with intelligent fallback. When you run `clan ssh` or `clan machines update`, Clan tries each configured network by priority until one succeeds.
### Basic Setup with Internet Service
For machines with public IPs or DNS names, use the `internet` service to configure direct SSH while keeping fallback options:
```{.nix title="flake.nix" hl_lines="7-10 14-16"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.instances = {
# Direct SSH with fallback support
internet = {
roles.default.machines.server1 = {
settings.address = "server1.example.com";
};
roles.default.machines.server2 = {
settings.address = "192.168.1.100";
};
};
# Fallback: Secure connections via Tor
tor = {
roles.server.tags.nixos = { };
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
### Advanced Setup with Multiple Networks
```{.nix title="flake.nix" hl_lines="7-10 13-16 19-21"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.instances = {
# Priority 1: Try direct connection first
internet = {
roles.default.machines.publicserver = {
settings.address = "public.example.com";
};
};
# Priority 2: VPN for internal machines
zerotier = {
roles.controller.machines."controller" = { };
roles.peer.tags.nixos = { };
};
# Priority 3: Tor as universal fallback
tor = {
roles.server.tags.nixos = { };
};
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
### How It Works
Clan automatically tries networks in order of priority:
1. Direct internet connections (if configured)
2. VPN networks (ZeroTier, Tailscale, etc.)
3. Tor hidden services
4. Any other configured networks
If one network fails, Clan automatically tries the next.
### Useful Commands
```bash
# View all configured networks and their status
clan network list
# Test connectivity through all networks
clan network ping machine1
# Show complete network topology
clan network overview
```
## Option 2: Manual targetHost (Bypasses Fallback!)
!!! warning
Setting `targetHost` directly **disables all automatic networking and fallback**. Only use this if you need complete control and don't want Clan's intelligent connection management.
### Using Inventory (For Static Addresses)
Use inventory-level `targetHost` when the address is **static** and doesn't depend on NixOS configuration:
```{.nix title="flake.nix" hl_lines="8"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
inventory.machines.server = {
# WARNING: This bypasses all networking modules!
# Use for: Static IPs, DNS names, known hostnames
deploy.targetHost = "root@192.168.1.100";
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
**When to use inventory-level:**
- Static IP addresses: `"root@192.168.1.100"`
- DNS names: `"user@server.example.com"`
- Any address that doesn't change based on machine configuration
### Using NixOS Configuration (For Dynamic Addresses)
Use machine-level `targetHost` when you need to **interpolate values from the NixOS configuration**:
```{.nix title="flake.nix" hl_lines="7"}
{
outputs = { self, clan-core, ... }:
let
clan = clan-core.lib.clan {
machines.server = { config, ... }: {
# WARNING: This also bypasses all networking modules!
# REQUIRED for: Addresses that depend on NixOS config
clan.core.networking.targetHost = "root@${config.networking.hostName}.local";
};
};
in
{
inherit (clan.config) nixosConfigurations;
};
}
```
**When to use machine-level (NixOS config):**
- Using hostName from config: `"root@${config.networking.hostName}.local"`
- Building from multiple config values: `"${config.users.users.deploy.name}@${config.networking.hostName}"`
- Any address that depends on evaluated NixOS configuration
!!! info "Key Difference"
**Inventory-level** (`deploy.targetHost`) is evaluated immediately and works with static strings.
**Machine-level** (`clan.core.networking.targetHost`) is evaluated after NixOS configuration and can access `config.*` values.
## Quick Decision Guide
| Scenario | Recommended Approach | Why |
|----------|---------------------|-----|
| Public servers | `internet` service | Keeps fallback options |
| Mixed infrastructure | Multiple networks | Automatic failover |
| Machines behind NAT | ZeroTier/Tor | NAT traversal with fallback |
| Testing/debugging | Manual targetHost | Full control, no magic |
| Single static machine | Manual targetHost | Simple, no overhead |
## Command-Line Override
The `--target-host` flag bypasses ALL networking configuration:
```bash
# Emergency access - ignores all networking config
clan machines update server --target-host root@backup-ip.com
# Direct SSH - no fallback attempted
clan ssh laptop --target-host user@10.0.0.5
```
Use this for debugging or emergency access when automatic networking isn't working.

View File

@@ -0,0 +1,84 @@
# How to Set `targetHost` for a Machine
The `targetHost` defines where the machine can be reached for operations like SSH or deployment. You can set it in two ways, depending on your use case.
---
## ✅ Option 1: Use the Inventory (Recommended for Static Hosts)
If the hostname is **static**, like `server.example.com`, set it in the **inventory**:
```{.nix title="flake.nix" hl_lines="8"}
{
# edlided
outputs =
{ self, clan-core, ... }:
let
# Sometimes this attribute set is defined in clan.nix
clan = clan-core.lib.clan {
inventory.machines.jon = {
deploy.targetHost = "root@server.example.com";
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
# elided
};
}
```
This is fast, simple and explicit, and doesnt require evaluating the NixOS config. We can also displayed it in the clan-cli or clan-app.
---
## ✅ Option 2: Use NixOS (Only for Dynamic Hosts)
If your target host depends on a **dynamic expression** (like using the machines evaluated FQDN), set it inside the NixOS module:
```{.nix title="flake.nix" hl_lines="8"}
{
# edlided
outputs =
{ self, clan-core, ... }:
let
# Sometimes this attribute set is defined in clan.nix
clan = clan-core.lib.clan {
machines.jon = {config, ...}: {
clan.core.networking.targetHost = "jon@${config.networking.fqdn}";
};
};
in
{
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
# elided
};
}
```
Use this **only if the value cannot be made static**, because its slower and won't be displayed in the clan-cli or clan-app yet.
---
## 📝 TL;DR
| Use Case | Use Inventory? | Example |
| ------------------------- | -------------- | -------------------------------- |
| Static hostname | ✅ Yes | `root@server.example.com` |
| Dynamic config expression | ❌ No | `jon@${config.networking.fqdn}` |
---
## 🚀 Coming Soon: Unified Networking Module
Were working on a new networking module that will automatically do all of this for you.
- Easier to use
- Sane defaults: Youll always be able to reach the machine — no need to worry about hostnames.
- ✨ Migration from **either method** will be supported and simple.
## Summary
- Ask: *Does this hostname dynamically change based on NixOS config?*
- If **no**, use the inventory.
- If **yes**, then use NixOS config.

6
docs/site/options.md Normal file
View File

@@ -0,0 +1,6 @@
---
template: options.html
---
<iframe src="/options-page/" height="1000" width="100%"></iframe>

View File

@@ -4,7 +4,7 @@ This section of the site provides an overview of available options and commands
---
- [Clan Configuration Option](/options) - for defining a Clan
- [Clan Configuration Option](../options.md) - for defining a Clan
- Learn how to use the [Clan CLI](./cli/index.md)
- Explore available [services](./clanServices/index.md)
- [NixOS Configuration Options](./clan.core/index.md) - Additional options avilable on a NixOS machine.

46
flake.lock generated
View File

@@ -13,11 +13,11 @@
]
},
"locked": {
"lastModified": 1756091210,
"narHash": "sha256-oEUEAZnLbNHi8ti4jY8x10yWcIkYoFc5XD+2hjmOS04=",
"rev": "eb831bca21476fa8f6df26cb39e076842634700d",
"lastModified": 1753067306,
"narHash": "sha256-jyoEbaXa8/MwVQ+PajUdT63y3gYhgD9o7snO/SLaikw=",
"rev": "18dfd42bdb2cfff510b8c74206005f733e38d8b9",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/eb831bca21476fa8f6df26cb39e076842634700d.tar.gz"
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/18dfd42bdb2cfff510b8c74206005f733e38d8b9.tar.gz"
},
"original": {
"type": "tarball",
@@ -31,11 +31,11 @@
]
},
"locked": {
"lastModified": 1755519972,
"narHash": "sha256-bU4nqi3IpsUZJeyS8Jk85ytlX61i4b0KCxXX9YcOgVc=",
"lastModified": 1754971456,
"narHash": "sha256-p04ZnIBGzerSyiY2dNGmookCldhldWAu03y0s3P8CB0=",
"owner": "nix-community",
"repo": "disko",
"rev": "4073ff2f481f9ef3501678ff479ed81402caae6d",
"rev": "8246829f2e675a46919718f9a64b71afe3bfb22d",
"type": "github"
},
"original": {
@@ -71,11 +71,11 @@
]
},
"locked": {
"lastModified": 1755825449,
"narHash": "sha256-XkiN4NM9Xdy59h69Pc+Vg4PxkSm9EWl6u7k6D5FZ5cM=",
"lastModified": 1751313918,
"narHash": "sha256-HsJM3XLa43WpG+665aGEh8iS8AfEwOIQWk3Mke3e7nk=",
"owner": "nix-darwin",
"repo": "nix-darwin",
"rev": "8df64f819698c1fee0c2969696f54a843b2231e8",
"rev": "e04a388232d9a6ba56967ce5b53a8a6f713cdfcf",
"type": "github"
},
"original": {
@@ -86,11 +86,11 @@
},
"nix-select": {
"locked": {
"lastModified": 1755887746,
"narHash": "sha256-lzWbpHKX0WAn/jJDoCijIDss3rqYIPawe46GDaE6U3g=",
"rev": "92c2574c5e113281591be01e89bb9ddb31d19156",
"lastModified": 1745005516,
"narHash": "sha256-IVaoOGDIvAa/8I0sdiiZuKptDldrkDWUNf/+ezIRhyc=",
"rev": "69d8bf596194c5c35a4e90dd02c52aa530caddf8",
"type": "tarball",
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/92c2574c5e113281591be01e89bb9ddb31d19156.tar.gz"
"url": "https://git.clan.lol/api/v1/repos/clan/nix-select/archive/69d8bf596194c5c35a4e90dd02c52aa530caddf8.tar.gz"
},
"original": {
"type": "tarball",
@@ -99,11 +99,11 @@
},
"nixos-facter-modules": {
"locked": {
"lastModified": 1755504238,
"narHash": "sha256-mw7q5DPdmz/1au8mY0u1DztRgVyJToGJfJszxjKSNes=",
"lastModified": 1750412875,
"narHash": "sha256-uP9Xxw5XcFwjX9lNoYRpybOnIIe1BHfZu5vJnnPg3Jc=",
"owner": "nix-community",
"repo": "nixos-facter-modules",
"rev": "354ed498c9628f32383c3bf5b6668a17cdd72a28",
"rev": "14df13c84552a7d1f33c1cd18336128fbc43f920",
"type": "github"
},
"original": {
@@ -115,10 +115,10 @@
"nixpkgs": {
"locked": {
"lastModified": 315532800,
"narHash": "sha256-h8Sx4S+/0FpodZji6W9lHzwY5BcuUG85Aj3GfhvGC2o=",
"rev": "a650b5d0de99158323597f048667c4d914243224",
"narHash": "sha256-2ILJtWugqmMyZnaWnHh+5yyw8RZWbKu9rVdeWmrBVhY=",
"rev": "a595dde4d0d31606e19dcec73db02279db59d201",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre845298.a650b5d0de99/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre844295.a595dde4d0d3/nixexprs.tar.xz"
},
"original": {
"type": "tarball",
@@ -181,11 +181,11 @@
]
},
"locked": {
"lastModified": 1755934250,
"narHash": "sha256-CsDojnMgYsfshQw3t4zjRUkmMmUdZGthl16bXVWgRYU=",
"lastModified": 1754847726,
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "74e1a52d5bd9430312f8d1b8b0354c92c17453e5",
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
"type": "github"
},
"original": {

View File

@@ -67,6 +67,7 @@
clan = {
meta.name = "clan-core";
inventory = {
services = { };
machines = {
"test-darwin-machine" = {
machineClass = "darwin";
@@ -96,7 +97,6 @@
./nixosModules/flake-module.nix
./pkgs/flake-module.nix
./templates/flake-module.nix
./pkgs/clan-cli/clan_cli/tests/flake-module.nix
]
++ [
(if pathExists ./flakeModules/clan.nix then import ./flakeModules/clan.nix inputs.self else { })

View File

@@ -33,6 +33,7 @@ lib.fix (
evalService = clanLib.callLib ./modules/inventory/distributed-service/evalService.nix { };
# ------------------------------------
# ClanLib functions
evalClan = clanLib.callLib ./modules/inventory/eval-clan-modules { };
inventory = clanLib.callLib ./modules/inventory { };
modules = clanLib.callLib ./modules/inventory/frontmatter { };
test = clanLib.callLib ./test { };

View File

@@ -328,7 +328,7 @@ rec {
# To get the type of a Deferred modules we need to know the interface of the place where it is evaluated.
# i.e. in case of a clan.service this is the interface of the service which dynamically changes depending on the service
# We assign "type" = []
# This means any value is valid — or like TypeScript's unknown.
# This means any value is valid — or like TypeScripts unknown.
# We can assign the type later, when we know the exact interface.
# tsType = "unknown" is a type that we preload for json2ts, such that it gets the correct type in typescript
(option.type.name == "deferredModule")

View File

@@ -0,0 +1,108 @@
{
lib,
clanLib,
}:
let
baseModule =
{ pkgs }:
# Module
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
};
# This function takes a list of module names and evaluates them
# [ module ] -> { config, options, ... }
evalClanModulesLegacy =
{
modules,
pkgs,
clan-core,
}:
let
evaled = lib.evalModules {
class = "nixos";
modules = [
(baseModule { inherit pkgs; })
{
clan.core.settings.directory = clan-core;
}
clan-core.nixosModules.clanCore
]
++ modules;
};
in
# lib.warn ''
# doesn't respect role specific interfaces.
# The following {module}/default.nix file trying to be imported.
# Modules: ${builtins.toJSON modulenames}
# This might result in incomplete or incorrect interfaces.
# FIX: Use evalClanModuleWithRole instead.
# ''
evaled;
/*
This function takes a list of module names and evaluates them
Returns a set of interfaces as described below:
Fn :: { ${moduleName} = Module; } -> {
${moduleName} :: {
${roleName}: JSONSchema
}
}
*/
evalClanModulesWithRoles =
{
allModules,
clan-core,
pkgs,
}:
let
res = builtins.mapAttrs (
moduleName: module:
let
frontmatter = clanLib.modules.getFrontmatter allModules.${moduleName} moduleName;
roles =
if builtins.elem "inventory" frontmatter.features or [ ] then
assert lib.isPath module;
clan-core.clanLib.modules.getRoles "Documentation: inventory.modules" allModules moduleName
else
[ ];
in
lib.listToAttrs (
lib.map (role: {
name = role;
value =
(lib.evalModules {
class = "nixos";
modules = [
(baseModule { inherit pkgs; })
clan-core.nixosModules.clanCore
{
clan.core.settings.directory = clan-core;
}
# Role interface
(module + "/roles/${role}.nix")
];
}).options.clan.${moduleName} or { };
}) roles
)
) allModules;
in
res;
in
{
evalClanModules = evalClanModulesLegacy;
inherit evalClanModulesWithRoles;
}

View File

@@ -1,8 +1,12 @@
{
self,
inputs,
options,
...
}:
let
inputOverrides = self.clanLib.flake-inputs.getOverrides inputs;
in
{
imports = [
./distributed-service/flake-module.nix
@@ -11,13 +15,16 @@
{
pkgs,
lib,
config,
system,
self',
...
}:
{
devShells.inventory-schema = pkgs.mkShell {
name = "clan-inventory-schema";
inputsFrom = [
inputsFrom = with config.checks; [
eval-lib-inventory
self'.devShells.default
];
};
@@ -44,5 +51,41 @@
warningsAreErrors = true;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
}).optionsJSON;
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests
legacyPackages.evalTests-inventory = import ./tests {
inherit lib;
clan-core = self;
inherit (self) clanLib;
inherit (self.inputs) nix-darwin;
};
checks = {
eval-lib-inventory = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
export HOME="$(realpath .)"
export NIX_ABORT_ON_WARN=1
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
--show-trace \
${inputOverrides} \
--flake ${
lib.fileset.toSource {
root = ../../..;
fileset = lib.fileset.unions [
../../../flake.nix
../../../flake.lock
(lib.fileset.fileFilter (file: file.name == "flake-module.nix") ../../..)
../../../flakeModules
../../../lib
../../../nixosModules/clanCore
../../../machines
../../../inventory.json
];
}
}#legacyPackages.${system}.evalTests-inventory
touch $out
'';
};
};
}

View File

@@ -3,6 +3,51 @@ let
# Trim the .nix extension from a filename
trimExtension = name: builtins.substring 0 (builtins.stringLength name - 4) name;
jsonWithoutHeader = clanLib.jsonschema {
includeDefaults = true;
header = { };
};
getModulesSchema =
{
modules,
clan-core,
pkgs,
}:
lib.mapAttrs
(
_moduleName: rolesOptions:
lib.mapAttrs (_roleName: options: jsonWithoutHeader.parseOptions options { }) rolesOptions
)
(
clanLib.evalClan.evalClanModulesWithRoles {
allModules = modules;
inherit pkgs clan-core;
}
);
evalFrontmatter =
{
moduleName,
instanceName,
resolvedRoles,
allModules,
}:
lib.evalModules {
modules = [
(getFrontmatter allModules.${moduleName} moduleName)
./interface.nix
{
constraints.imports = [
(lib.modules.importApply ../constraints {
inherit moduleName resolvedRoles instanceName;
allRoles = getRoles "inventory.modules" allModules moduleName;
})
];
}
];
};
# For Documentation purposes only
frontmatterOptions =
(lib.evalModules {
@@ -74,12 +119,17 @@ let
builtins.readDir (checkedPath)
)
);
checkConstraints = args: (evalFrontmatter args).config.constraints.assertions;
getFrontmatter = _modulepath: _modulename: "clanModules are removed!";
in
{
inherit
frontmatterOptions
getModulesSchema
getFrontmatter
checkConstraints
getRoles
;
}

View File

@@ -1,14 +1,29 @@
{
self,
self',
lib,
pkgs,
flakeOptions,
...
}:
let
modulesSchema = self.clanLib.modules.getModulesSchema {
modules = self.clanModules;
inherit pkgs;
clan-core = self;
};
jsonLib = self.clanLib.jsonschema { inherit includeDefaults; };
includeDefaults = true;
frontMatterSchema = jsonLib.parseOptions self.clanLib.modules.frontmatterOptions { };
inventorySchema = jsonLib.parseModule ({
imports = [ ../../inventoryClass/interface.nix ];
_module.args = { inherit (self) clanLib; };
});
opts = (flakeOptions.flake.type.getSubOptions [ "flake" ]);
clanOpts = opts.clan.type.getSubOptions [ "clan" ];
include = [
@@ -23,6 +38,13 @@ let
];
clanSchema = jsonLib.parseOptions (lib.filterAttrs (n: _v: lib.elem n include) clanOpts) { };
renderSchema = pkgs.writers.writePython3Bin "render-schema" {
flakeIgnore = [
"F401"
"E501"
];
} ./render_schema.py;
clan-schema-abstract = pkgs.stdenv.mkDerivation {
name = "clan-schema-files";
buildInputs = [ pkgs.cue ];
@@ -41,7 +63,29 @@ in
{
inherit
flakeOptions
frontMatterSchema
clanSchema
inventorySchema
modulesSchema
renderSchema
clan-schema-abstract
;
# Inventory schema, with the modules schema added per role
inventory =
pkgs.runCommand "rendered"
{
buildInputs = [
pkgs.python3
self'.packages.clan-cli
];
}
''
export INVENTORY_SCHEMA_PATH=${builtins.toFile "inventory-schema.json" (builtins.toJSON inventorySchema)}
export MODULES_SCHEMA_PATH=${builtins.toFile "modules-schema.json" (builtins.toJSON modulesSchema)}
mkdir $out
# The python script will place the schemas in the output directory
exec python3 ${renderSchema}/bin/render-schema
'';
}

View File

@@ -0,0 +1,162 @@
"""
Python script to join the abstract inventory schema, with the concrete clan modules
Inventory has slots which are 'Any' type.
We dont want to evaluate the clanModules interface in nix, when evaluating the inventory
"""
import json
import os
from pathlib import Path
from typing import Any
from clan_lib.errors import ClanError
# Get environment variables
INVENTORY_SCHEMA_PATH = Path(os.environ["INVENTORY_SCHEMA_PATH"])
# { [moduleName] :: { [roleName] :: SCHEMA }}
MODULES_SCHEMA_PATH = Path(os.environ["MODULES_SCHEMA_PATH"])
OUT = os.environ.get("out")
if not INVENTORY_SCHEMA_PATH:
msg = f"Environment variables are not set correctly: INVENTORY_SCHEMA_PATH={INVENTORY_SCHEMA_PATH}."
raise ClanError(msg)
if not MODULES_SCHEMA_PATH:
msg = f"Environment variables are not set correctly: MODULES_SCHEMA_PATH={MODULES_SCHEMA_PATH}."
raise ClanError(msg)
if not OUT:
msg = f"Environment variables are not set correctly: OUT={OUT}."
raise ClanError(msg)
def service_roles_to_schema(
schema: dict[str, Any],
service_name: str,
roles: list[str],
roles_schemas: dict[str, dict[str, Any]],
# Original service properties: {'config': Schema, 'machines': Schema, 'meta': Schema, 'extraModules': Schema, ...?}
orig: dict[str, Any],
) -> dict[str, Any]:
"""
Add roles to the service schema
"""
# collect all the roles for the service, to form a type union
all_roles_schema: list[dict[str, Any]] = []
for role_name, role_schema in roles_schemas.items():
role_schema["title"] = f"{module_name}-config-role-{role_name}"
all_roles_schema.append(role_schema)
role_schema = {}
for role in roles:
role_schema[role] = {
"type": "object",
"additionalProperties": False,
"properties": {
**orig["roles"]["additionalProperties"]["properties"],
"config": {
**roles_schemas.get(role, {}),
"title": f"{service_name}-config-role-{role}",
"type": "object",
"default": {},
"additionalProperties": False,
},
},
}
machines_schema = {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
**orig["machines"]["additionalProperties"]["properties"],
"config": {
"title": f"{service_name}-config",
"oneOf": all_roles_schema,
"type": "object",
"default": {},
"additionalProperties": False,
},
},
},
}
services["properties"][service_name] = {
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": False,
"properties": {
# Original inventory schema
**orig,
# Inject the roles schemas
"roles": {
"title": f"{service_name}-roles",
"type": "object",
"properties": role_schema,
"additionalProperties": False,
},
"machines": machines_schema,
"config": {
"title": f"{service_name}-config",
"oneOf": all_roles_schema,
"type": "object",
"default": {},
"additionalProperties": False,
},
},
},
}
return schema
if __name__ == "__main__":
print("Joining inventory schema with modules schema")
print(f"Inventory schema path: {INVENTORY_SCHEMA_PATH}")
print(f"Modules schema path: {MODULES_SCHEMA_PATH}")
modules_schema = {}
with Path.open(MODULES_SCHEMA_PATH) as f:
modules_schema = json.load(f)
inventory_schema = {}
with Path.open(INVENTORY_SCHEMA_PATH) as f:
inventory_schema = json.load(f)
services = inventory_schema["properties"]["services"]
original_service_props = services["additionalProperties"]["additionalProperties"][
"properties"
].copy()
# Init the outer services schema
# Properties (service names) will be filled in the next step
services = {
"type": "object",
"properties": {
# Service names
},
"additionalProperties": False,
}
for module_name, roles_schemas in modules_schema.items():
# Add the roles schemas to the service schema
roles = list(roles_schemas.keys())
if roles:
services = service_roles_to_schema(
services,
module_name,
roles,
roles_schemas,
original_service_props,
)
inventory_schema["properties"]["services"] = services
outpath = Path(OUT)
with (outpath / "schema.json").open("w") as f:
json.dump(inventory_schema, f, indent=2)
with (outpath / "modules_schemas.json").open("w") as f:
json.dump(modules_schema, f, indent=2)

View File

@@ -0,0 +1,90 @@
{
clan-core,
nix-darwin,
lib,
clanLib,
}:
let
# TODO: Unify these tests with clan tests
clan =
m:
lib.evalModules {
specialArgs = { inherit clan-core nix-darwin clanLib; };
modules = [
clan-core.modules.clan.default
{
self = { };
}
m
];
};
in
{
test_inventory_a =
let
eval = clan {
inventory = {
machines = {
A = { };
};
services = {
legacyModule = { };
};
modules = {
legacyModule = ./legacyModule;
};
};
directory = ./.;
};
in
{
inherit eval;
expr = {
legacyModule = lib.filterAttrs (
name: _: name == "isClanModule"
) eval.config.clanInternals.inventoryClass.machines.A.compiledServices.legacyModule;
};
expected = {
legacyModule = {
};
};
};
test_inventory_empty =
let
eval = clan {
inventory = { };
directory = ./.;
};
in
{
# Empty inventory should return an empty module
expr = eval.config.clanInternals.inventoryClass.machines;
expected = { };
};
test_inventory_module_doesnt_exist =
let
eval = clan {
directory = ./.;
inventory = {
services = {
fanatasy.instance_1 = {
roles.default.machines = [ "machine_1" ];
};
};
machines = {
"machine_1" = { };
};
};
};
in
{
inherit eval;
expr = eval.config.clanInternals.inventoryClass.machines.machine_1.machineImports;
expectedError = {
type = "ThrownError";
msg = "ClanModule not found*";
};
};
}

View File

@@ -0,0 +1,4 @@
---
features = [ "inventory" ]
---
Description

View File

@@ -0,0 +1,9 @@
{
lib,
clan-core,
...
}:
{
# Just some random stuff
options.test = lib.mapAttrs clan-core;
}

View File

@@ -0,0 +1,78 @@
# Integrity validation of the inventory
{ config, lib, ... }:
{
# Assertion must be of type
# { assertion :: bool, message :: string, severity :: "error" | "warning" }
imports = [
# Check that each machine used in a service is defined in the top-level machines
{
assertions = lib.foldlAttrs (
ass1: serviceName: c:
ass1
++ lib.foldlAttrs (
ass2: instanceName: instanceConfig:
let
topLevelMachines = lib.attrNames config.machines;
# All machines must be defined in the top-level machines
assertions = lib.foldlAttrs (
assertions: roleName: role:
assertions
++ builtins.filter (a: !a.assertion) (
builtins.map (m: {
assertion = builtins.elem m topLevelMachines;
message = ''
Machine '${m}' is not defined in the inventory. This might still work, if the machine is defined via nix.
Defined in service: '${serviceName}' instance: '${instanceName}' role: '${roleName}'.
Inventory machines:
${builtins.concatStringsSep "\n" (map (n: "'${n}'") topLevelMachines)}
'';
severity = "warning";
}) role.machines
)
) [ ] instanceConfig.roles;
in
ass2 ++ assertions
) [ ] c
) [ ] config.services;
}
# Check that each tag used in a role is defined in at least one machines tags
{
assertions = lib.foldlAttrs (
ass1: serviceName: c:
ass1
++ lib.foldlAttrs (
ass2: instanceName: instanceConfig:
let
allTags = lib.foldlAttrs (
tags: _machineName: machine:
tags ++ machine.tags
) [ ] config.machines;
# All machines must be defined in the top-level machines
assertions = lib.foldlAttrs (
assertions: roleName: role:
assertions
++ builtins.filter (a: !a.assertion) (
builtins.map (m: {
assertion = builtins.elem m allTags;
message = ''
Tag '${m}' is not defined in the inventory.
Defined in service: '${serviceName}' instance: '${instanceName}' role: '${roleName}'.
Available tags:
${builtins.concatStringsSep "\n" (map (n: "'${n}'") allTags)}
'';
severity = "error";
}) role.tags
)
) [ ] instanceConfig.roles;
in
ass2 ++ assertions
) [ ] c
) [ ] config.services;
}
];
}

View File

@@ -1,5 +1,268 @@
{
lib,
config,
clanLib,
...
}:
let
inherit (config) inventory directory;
resolveTags =
# Inventory, { machines :: [string], tags :: [string] }
{
serviceName,
instanceName,
roleName,
inventory,
members,
}:
{
machines =
members.machines or [ ]
++ (builtins.foldl' (
acc: tag:
let
# For error printing
availableTags = lib.foldlAttrs (
acc: _: v:
v.tags or [ ] ++ acc
) [ ] (inventory.machines);
tagMembers = builtins.attrNames (
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
);
in
if tagMembers == [ ] then
lib.warn ''
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
Available tags: ${builtins.toJSON (lib.unique availableTags)}
'' [ ]
else
acc ++ tagMembers
) [ ] members.tags or [ ]);
};
checkService =
modulepath: serviceName:
builtins.elem "inventory" (clanLib.modules.getFrontmatter modulepath serviceName).features or [ ];
compileMachine =
{ machineConfig }:
{
machineImports = [
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
config.clan.core.networking.targetHost = lib.mkForce machineConfig.deploy.targetHost;
})
(lib.optionalAttrs (machineConfig.deploy.buildHost or null != null) {
config.clan.core.networking.buildHost = lib.mkForce machineConfig.deploy.buildHost;
})
];
assertions = { };
};
resolveImports =
{
supportedRoles,
resolvedRolesPerInstance,
serviceConfigs,
serviceName,
machineName,
getRoleFile,
}:
(lib.foldlAttrs (
# : [ Modules ] -> String -> ServiceConfig -> [ Modules ]
acc2: instanceName: serviceConfig:
let
resolvedRoles = resolvedRolesPerInstance.${instanceName};
isInService = builtins.any (members: builtins.elem machineName members.machines) (
builtins.attrValues resolvedRoles
);
# all roles where the machine is present
machineRoles = builtins.attrNames (
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
);
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
globalConfig = serviceConfig.config or { };
globalExtraModules = serviceConfig.extraModules or [ ];
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
roleServiceExtraModules = builtins.foldl' (
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
) [ ] machineRoles;
# TODO: maybe optimize this don't lookup the role in inverse roles. Imports are not lazy
roleModules = builtins.map (
role:
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
getRoleFile role
else
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
inventory.modules.${serviceName}
}/roles/${role}.nix not found."
) machineRoles;
roleServiceConfigs = builtins.filter (m: m != { }) (
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
);
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
);
features =
(clanLib.modules.getFrontmatter inventory.modules.${serviceName} serviceName).features or [ ];
deprecationWarning = lib.optionalAttrs (builtins.elem "deprecated" features) {
warnings = [
''
The '${serviceName}' module has been migrated from `inventory.services` to `inventory.instances`
See https://docs.clan.lol/guides/clanServices/ for usage.
''
];
};
in
if !(serviceConfig.enabled or true) then
acc2
else if isInService then
acc2
++ [
deprecationWarning
{
imports = roleModules ++ extraModules;
clan.inventory.services.${serviceName}.${instanceName} = {
roles = resolvedRoles;
# TODO: Add inverseRoles to the service config if needed
# inherit inverseRoles;
};
}
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
{
clan.${serviceName} = lib.mkMerge (
[
globalConfig
machineServiceConfig
]
++ roleServiceConfigs
);
}
)
]
else
acc2
) [ ] (serviceConfigs));
in
{
imports = [
./interface.nix
];
config = {
machines = builtins.mapAttrs (
machineName: machineConfig: m:
let
compiledServices = lib.mapAttrs (
_: serviceConfigs:
(
{ config, ... }:
let
serviceName = config.serviceName;
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
in
{
_file = "inventory/builder.nix";
_module.args = {
inherit
resolveTags
inventory
clanLib
machineName
serviceConfigs
;
};
imports = [
./roles.nix
];
machineImports = resolveImports {
supportedRoles = config.supportedRoles;
resolvedRolesPerInstance = config.resolvedRolesPerInstance;
inherit
serviceConfigs
serviceName
machineName
getRoleFile
;
};
# Assertions
assertions = {
"checkservice.${serviceName}" = {
assertion = checkService inventory.modules.${serviceName} serviceName;
message = ''
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
To allow it add the following to the beginning of the README.md of the module:
---
...
features = [ "inventory" ]
---
Also make sure to test the module with the 'inventory' feature enabled.
'';
};
};
}
)
) (config.inventory.services or { });
compiledMachine = compileMachine {
inherit
machineConfig
;
};
machineImports = (
compiledMachine.machineImports
++ builtins.foldl' (
acc: service:
let
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
failedAssertionsImports =
if failedAssertions != { } then
[
{
clan.inventory.assertions = failedAssertions;
}
]
else
[
{
clan.inventory.assertions = {
"alive.assertion.inventory" = {
assertion = true;
message = ''
No failed assertions found for machine ${machineName}. This will never be displayed.
It is here for testing purposes.
'';
};
};
}
];
in
acc
++ service.machineImports
# Import failed assertions
++ failedAssertionsImports
) [ ] (builtins.attrValues m.config.compiledServices)
);
in
{
inherit machineImports compiledServices compiledMachine;
}
) (inventory.machines or { });
};
}

View File

@@ -16,13 +16,76 @@ in
type = types.raw;
};
machines = mkOption {
type = types.attrsOf (submodule ({
options = {
machineImports = mkOption {
type = types.listOf types.raw;
};
};
}));
type = types.attrsOf (
submodule (
{ name, ... }:
let
machineName = name;
in
{
options = {
compiledMachine = mkOption {
type = types.raw;
};
compiledServices = mkOption {
# type = types.attrsOf;
type = types.attrsOf (
types.submoduleWith {
modules = [
(
{ name, ... }:
let
serviceName = name;
in
{
options = {
machineName = mkOption {
default = machineName;
readOnly = true;
};
serviceName = mkOption {
default = serviceName;
readOnly = true;
};
# Outputs
machineImports = mkOption {
type = types.listOf types.raw;
};
supportedRoles = mkOption {
type = types.listOf types.str;
};
matchedRoles = mkOption {
type = types.listOf types.str;
};
machinesRoles = mkOption {
type = types.attrsOf (types.listOf types.str);
};
resolvedRolesPerInstance = mkOption {
type = types.attrsOf (
types.attrsOf (submodule {
options.machines = mkOption {
type = types.listOf types.str;
};
})
);
};
assertions = mkOption {
type = types.attrsOf types.raw;
};
};
}
)
];
}
);
};
machineImports = mkOption {
type = types.listOf types.raw;
};
};
}
)
);
};
};
}

View File

@@ -0,0 +1,65 @@
{
lib,
config,
resolveTags,
inventory,
clanLib,
machineName,
serviceConfigs,
...
}:
let
serviceName = config.serviceName;
in
{
# Roles resolution
# : List String
supportedRoles = clanLib.modules.getRoles "inventory.modules" inventory.modules serviceName;
matchedRoles = builtins.attrNames (
lib.filterAttrs (_: ms: builtins.elem machineName ms) config.machinesRoles
);
resolvedRolesPerInstance = lib.mapAttrs (
instanceName: instanceConfig:
let
resolvedRoles = lib.genAttrs config.supportedRoles (
roleName:
resolveTags {
members = instanceConfig.roles.${roleName} or { };
inherit
instanceName
serviceName
roleName
inventory
;
}
);
usedRoles = builtins.attrNames instanceConfig.roles;
unmatchedRoles = builtins.filter (role: !builtins.elem role config.supportedRoles) usedRoles;
in
if unmatchedRoles != [ ] then
throw ''
Roles ${builtins.toJSON unmatchedRoles} are not defined in the service ${serviceName}.
Instance: '${instanceName}'
Please use one of available roles: ${builtins.toJSON config.supportedRoles}
''
else
resolvedRoles
) serviceConfigs;
machinesRoles = builtins.zipAttrsWith (
_n: vs:
let
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
in
lib.unique flat
) (builtins.attrValues config.resolvedRolesPerInstance);
assertions = lib.concatMapAttrs (
instanceName: resolvedRoles:
clanLib.modules.checkConstraints {
moduleName = serviceName;
allModules = inventory.modules;
inherit resolvedRoles instanceName;
}
) config.resolvedRolesPerInstance;
}

View File

@@ -31,13 +31,70 @@ let
'';
};
};
moduleConfig = lib.mkOption {
default = { };
# TODO: use types.deferredModule
# clan.borgbackup MUST be defined as submodule
type = types.attrsOf types.anything;
description = ''
Configuration of the specific clanModule.
!!! Note
Configuration is passed to the nixos configuration scoped to the module.
```nix
clan.<serviceName> = { ... # Config }
```
'';
};
extraModulesOption = lib.mkOption {
description = ''
List of additionally imported `.nix` expressions.
Supported types:
- **Strings**: Interpreted relative to the 'directory' passed to `lib.clan`.
- **Paths**: should be relative to the current file.
- **Any**: Nix expression must be serializable to JSON.
!!! Note
**The import only happens if the machine is part of the service or role.**
Other types are passed through to the nixos configuration.
???+ Example
To import the `special.nix` file
```
. Clan Directory
flake.nix
...
modules
special.nix
...
```
```nix
{
extraModules = [ "modules/special.nix" ];
}
```
'';
apply = value: if lib.isString value then value else builtins.seq (builtins.toJSON value) value;
default = [ ];
type = types.listOf (
types.oneOf [
types.str
types.anything
]
);
};
in
{
imports = [
(lib.mkRemovedOptionModule [ "services" ] ''
The `inventory.services` option has been removed. Use `inventory.instances` instead.
See: https://docs.clan.lol/concepts/inventory/#services
'')
./assertions.nix
];
options = {
# Internal things
@@ -255,16 +312,6 @@ in
'';
};
installedAt = lib.mkOption {
type = types.nullOr types.int;
default = null;
description = ''
Indicates when the machine was first installed.
Timestamp is in unix time (seconds since epoch).
'';
};
tags = lib.mkOption {
description = ''
List of tags for the machine.
@@ -368,5 +415,160 @@ in
);
default = { };
};
services = lib.mkOption {
# TODO: deprecate these options
# services are deprecated in favor of `instances`
# visible = false;
description = ''
Services of the inventory.
- The first `<name>` is the moduleName. It must be a valid clanModule name.
- The second `<name>` is an arbitrary instance name.
???+ Example
```nix
# ClanModule name. See the module documentation for the available modules.
# Instance name, can be anything, some services might use it as a unique identifier.
services.borgbackup."instance_1" = {
roles.client.machines = ["machineA"];
};
```
!!! Note
Services MUST be added to machines via `roles` exclusively.
See [`roles.<rolename>.machines`](#inventory.services.roles.machines) or [`roles.<rolename>.tags`](#inventory.services.roles.tags) for more information.
'';
default = { };
type = types.attrsOf (
types.attrsOf (
types.submodule (
# instance name
{ name, ... }:
{
options.enabled = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Enable or disable the complete service.
If the service is disabled, it will not be added to any machine.
!!! Note
This flag is primarily used to temporarily disable a service.
I.e. A 'backup service' without any 'server' might be incomplete and would cause failure if enabled.
'';
};
options.meta = metaOptionsWith name;
options.extraModules = extraModulesOption;
options.config = moduleConfig // {
description = ''
Configuration of the specific clanModule.
!!! Note
Configuration is passed to the nixos configuration scoped to the module.
```nix
clan.<serviceName> = { ... # Config }
```
???+ Example
For `services.borgbackup` the config is the passed to the machine with the prefix of `clan.borgbackup`.
This means all config values are mapped to the `borgbackup` clanModule exclusively (`config.clan.borgbackup`).
```nix
{
services.borgbackup."instance_1".config = {
destinations = [ ... ];
# See the 'borgbackup' module docs for all options
};
}
```
!!! Note
The module author is responsible for supporting multiple instance configurations in different roles.
See each clanModule's documentation for more information.
'';
};
options.machines = lib.mkOption {
description = ''
Attribute set of machines specific config for the service.
Will be merged with other service configs, such as the role config and the global config.
For machine specific overrides use `mkForce` or other higher priority methods.
???+ Example
```{.nix hl_lines="4-7"}
services.borgbackup."instance_1" = {
roles.client.machines = ["machineA"];
machines.machineA.config = {
# Additional specific config for the machine
# This is merged with all other config places
};
};
```
'';
default = { };
type = types.attrsOf (
types.submodule {
options.extraModules = extraModulesOption;
options.config = moduleConfig // {
description = ''
Additional configuration of the specific machine.
See how [`service.<name>.<name>.config`](#inventory.services.config) works in general for further information.
'';
};
}
);
};
options.roles = lib.mkOption {
default = { };
type = types.attrsOf (
types.submodule {
options.machines = lib.mkOption {
default = [ ];
type = types.listOf types.str;
example = [ "machineA" ];
description = ''
List of machines which are part of the role.
The machines are referenced by their `attributeName` in the `inventory.machines` attribute set.
Memberships are declared here to determine which machines are part of the service.
Alternatively, `tags` can be used to determine the membership, more dynamically.
'';
};
options.tags = lib.mkOption {
default = [ ];
apply = lib.unique;
type = types.listOf types.str;
description = ''
List of tags which are used to determine the membership of the role.
The tags are matched against the `inventory.machines.<machineName>.tags` attribute set.
If a machine has at least one tag of the role, it is part of the role.
'';
};
options.config = moduleConfig // {
description = ''
Additional configuration of the specific role.
See how [`service.<name>.<name>.config`](#inventory.services.config) works in general for further information.
'';
};
options.extraModules = extraModulesOption;
}
);
};
}
)
)
);
};
};
}

View File

@@ -11,10 +11,6 @@
default =
builtins.removeAttrs (clanLib.introspection.getPrios { options = config.inventory.options; })
# tags are freeformType which is not supported yet.
# services is removed and throws an error if accessed.
[
"tags"
"services"
];
[ "tags" ];
};
}

View File

@@ -32,15 +32,11 @@ def init_test_environment() -> None:
# Set up network bridge
subprocess.run(
["ip", "link", "add", "br0", "type", "bridge"],
check=True,
text=True,
["ip", "link", "add", "br0", "type", "bridge"], check=True, text=True
)
subprocess.run(["ip", "link", "set", "br0", "up"], check=True, text=True)
subprocess.run(
["ip", "addr", "add", "192.168.1.254/24", "dev", "br0"],
check=True,
text=True,
["ip", "addr", "add", "192.168.1.254/24", "dev", "br0"], check=True, text=True
)
# Set up minimal passwd file for unprivileged operations
@@ -115,7 +111,8 @@ def mount(
mountflags: int = 0,
data: str | None = None,
) -> None:
"""A Python wrapper for the mount system call.
"""
A Python wrapper for the mount system call.
:param source: The source of the file system (e.g., device name, remote filesystem).
:param target: The mount point (an existing directory).
@@ -132,11 +129,7 @@ def mount(
# Call the mount system call
result = libc.mount(
source_c,
target_c,
fstype_c,
ctypes.c_ulong(mountflags),
data_c,
source_c, target_c, fstype_c, ctypes.c_ulong(mountflags), data_c
)
if result != 0:
@@ -152,7 +145,7 @@ def prepare_machine_root(machinename: str, root: Path) -> None:
root.mkdir(parents=True, exist_ok=True)
root.joinpath("etc").mkdir(parents=True, exist_ok=True)
root.joinpath(".env").write_text(
"\n".join(f"{k}={v}" for k, v in os.environ.items()),
"\n".join(f"{k}={v}" for k, v in os.environ.items())
)
@@ -164,6 +157,7 @@ def retry(fn: Callable, timeout: int = 900) -> None:
"""Call the given function repeatedly, with 1 second intervals,
until it returns True or a timeout is reached.
"""
for _ in range(timeout):
if fn(False):
return
@@ -290,7 +284,8 @@ class Machine:
check_output: bool = True,
timeout: int | None = 900,
) -> subprocess.CompletedProcess:
"""Execute a shell command, returning a list `(status, stdout)`.
"""
Execute a shell command, returning a list `(status, stdout)`.
Commands are run with `set -euo pipefail` set:
@@ -321,6 +316,7 @@ class Machine:
`timeout` parameter, e.g., `execute(cmd, timeout=10)` or
`execute(cmd, timeout=None)`. The default is 900 seconds.
"""
# Always run command with shell opts
command = f"set -eo pipefail; source /etc/profile; set -xu; {command}"
@@ -334,9 +330,7 @@ class Machine:
return proc
def nested(
self,
msg: str,
attrs: dict[str, str] | None = None,
self, msg: str, attrs: dict[str, str] | None = None
) -> _GeneratorContextManager:
if attrs is None:
attrs = {}
@@ -345,7 +339,8 @@ class Machine:
return self.logger.nested(msg, my_attrs)
def systemctl(self, q: str) -> subprocess.CompletedProcess:
"""Runs `systemctl` commands with optional support for
"""
Runs `systemctl` commands with optional support for
`systemctl --user`
```py
@@ -360,7 +355,8 @@ class Machine:
return self.execute(f"systemctl {q}")
def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
"""Repeat a shell command with 1-second intervals until it succeeds.
"""
Repeat a shell command with 1-second intervals until it succeeds.
Has a default timeout of 900 seconds which can be modified, e.g.
`wait_until_succeeds(cmd, timeout=10)`. See `execute` for details on
command execution.
@@ -378,17 +374,18 @@ class Machine:
return output
def wait_for_open_port(
self,
port: int,
addr: str = "localhost",
timeout: int = 900,
self, port: int, addr: str = "localhost", timeout: int = 900
) -> None:
"""Wait for a port to be open on the given address."""
"""
Wait for a port to be open on the given address.
"""
command = f"nc -z {shlex.quote(addr)} {port}"
self.wait_until_succeeds(command, timeout=timeout)
def wait_for_file(self, filename: str, timeout: int = 30) -> None:
"""Waits until the file exists in the machine's file system."""
"""
Waits until the file exists in the machine's file system.
"""
def check_file(_last_try: bool) -> bool:
result = self.execute(f"test -e {filename}")
@@ -398,7 +395,8 @@ class Machine:
retry(check_file, timeout)
def wait_for_unit(self, unit: str, timeout: int = 900) -> None:
"""Wait for a systemd unit to get into "active" state.
"""
Wait for a systemd unit to get into "active" state.
Throws exceptions on "failed" and "inactive" states as well as after
timing out.
"""
@@ -443,7 +441,9 @@ class Machine:
return res.stdout
def shutdown(self) -> None:
"""Shut down the machine, waiting for the VM to exit."""
"""
Shut down the machine, waiting for the VM to exit.
"""
if self.process:
self.process.terminate()
self.process.wait()
@@ -557,7 +557,7 @@ class Driver:
rootdir=tempdir_path / container.name,
out_dir=self.out_dir,
logger=self.logger,
),
)
)
def start_all(self) -> None:
@@ -581,7 +581,7 @@ class Driver:
)
print(
f"To attach to container {machine.name} run on the same machine that runs the test:",
f"To attach to container {machine.name} run on the same machine that runs the test:"
)
print(
" ".join(
@@ -603,8 +603,8 @@ class Driver:
"-c",
"bash",
Style.RESET_ALL,
],
),
]
)
)
def test_symbols(self) -> dict[str, Any]:
@@ -623,7 +623,7 @@ class Driver:
"additionally exposed symbols:\n "
+ ", ".join(m.name for m in self.machines)
+ ",\n "
+ ", ".join(list(general_symbols.keys())),
+ ", ".join(list(general_symbols.keys()))
)
return {**general_symbols, **machine_symbols}

View File

@@ -25,18 +25,14 @@ class AbstractLogger(ABC):
@abstractmethod
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
pass
@abstractmethod
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
pass
@@ -70,7 +66,7 @@ class JunitXMLLogger(AbstractLogger):
def __init__(self, outfile: Path) -> None:
self.tests: dict[str, JunitXMLLogger.TestCaseState] = {
"main": self.TestCaseState(),
"main": self.TestCaseState()
}
self.currentSubtest = "main"
self.outfile: Path = outfile
@@ -82,9 +78,7 @@ class JunitXMLLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
old_test = self.currentSubtest
self.tests.setdefault(name, self.TestCaseState())
@@ -96,9 +90,7 @@ class JunitXMLLogger(AbstractLogger):
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
self.log(message)
yield
@@ -152,9 +144,7 @@ class CompositeLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with ExitStack() as stack:
for logger in self.logger_list:
@@ -163,9 +153,7 @@ class CompositeLogger(AbstractLogger):
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with ExitStack() as stack:
for logger in self.logger_list:
@@ -212,24 +200,19 @@ class TerminalLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with self.nested("subtest: " + name, attributes):
yield
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
self._eprint(
self.maybe_prefix(
Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL,
attributes,
),
Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL, attributes
)
)
tic = time.time()
@@ -276,9 +259,7 @@ class XMLLogger(AbstractLogger):
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
def maybe_prefix(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> str:
if attributes and "machine" in attributes:
return f"{attributes['machine']}: {message}"
@@ -328,18 +309,14 @@ class XMLLogger(AbstractLogger):
@contextmanager
def subtest(
self,
name: str,
attributes: dict[str, str] | None = None,
self, name: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
with self.nested("subtest: " + name, attributes):
yield
@contextmanager
def nested(
self,
message: str,
attributes: dict[str, str] | None = None,
self, message: str, attributes: dict[str, str] | None = None
) -> Iterator[None]:
if attributes is None:
attributes = {}

View File

@@ -1,17 +1,40 @@
{ ... }:
{
perSystem.clan.nixosTests.machine-id = {
perSystem =
{ ... }:
{
clan.nixosTests.machine-id = {
name = "service-machine-id";
name = "service-machine-id";
clan = {
directory = ./.;
machines.server = {
clan.core.settings.machine-id.enable = true;
clan = {
directory = ./.;
# Workaround until we can use nodes.server = { };
modules."@clan/importer" = ../../../../clanServices/importer;
inventory = {
machines.server = { };
instances.importer = {
module.name = "@clan/importer";
module.input = "self";
roles.default.tags.all = { };
roles.default.extraModules = [
{
# Test machine ID generation
clan.core.settings.machine-id.enable = true;
}
];
};
};
};
# TODO: Broken. Use instead of importer after fixing.
# nodes.server = { };
# This is not an actual vm test, this is a workaround to
# generate the needed vars for the eval test.
testScript = "";
};
};
# This is not an actual vm test, this is a workaround to
# generate the needed vars for the eval test.
testScript = "";
};
}

View File

@@ -10,14 +10,30 @@
clan = {
directory = ./.;
machines.machine = {
clan.core.postgresql.enable = true;
clan.core.postgresql.users.test = { };
clan.core.postgresql.databases.test.create.options.OWNER = "test";
clan.core.settings.directory = ./.;
# Workaround until we can use nodes.machine = { };
modules."@clan/importer" = ../../../../clanServices/importer;
inventory = {
machines.machine = { };
instances.importer = {
module.name = "@clan/importer";
module.input = "self";
roles.default.tags.all = { };
roles.default.extraModules = [
{
clan.core.postgresql.enable = true;
clan.core.postgresql.users.test = { };
clan.core.postgresql.databases.test.create.options.OWNER = "test";
clan.core.settings.directory = ./.;
}
];
};
};
};
# TODO: Broken. Use instead of importer after fixing.
# nodes.machine = { };
testScript =
let
runpg = "runuser -u postgres -- /run/current-system/sw/bin/psql";

View File

@@ -290,11 +290,9 @@ in
};
owner = mkOption {
description = "The user name or id that will own the file.";
type = str;
default = "root";
};
group = mkOption {
type = str;
description = "The group name or id that will own the file.";
default = if _class == "darwin" then "wheel" else "root";
defaultText = lib.literalExpression ''if _class == "darwin" then "wheel" else "root"'';
@@ -304,15 +302,6 @@ in
description = "The unix file mode of the file. Must be a 4-digit octal number.";
default = "0400";
};
exists = mkOption {
description = ''
Returns true if the file exists, This is used to guard against reading not set value in evaluation.
This currently only works for non secret files.
'';
type = bool;
default = if file.config.secret then throw "Cannot determine existance of secret file" else false;
defaultText = "Throws error because the existance of a secret file cannot be determined";
};
value =
mkOption {
description = ''

View File

@@ -25,7 +25,7 @@ in
);
value = mkIf (file.config.secret == false) (
# dynamically adjust priority to allow overriding with mkDefault in case the file is not found
if file.config.exists then
if (pathExists file.config.flakePath) then
# if the file is found it should have normal priority
readFile file.config.flakePath
else
@@ -34,7 +34,6 @@ in
throw "Please run `clan vars generate ${config.clan.core.settings.machine.name}` as file was not found: ${file.config.path}"
)
);
exists = mkIf (file.config.secret == false) (pathExists file.config.flakePath);
};
};
}

View File

@@ -1,116 +0,0 @@
# Standalone VM base module that can be imported independently
# This module contains the core VM configuration without the system extension
{
lib,
config,
pkgs,
modulesPath,
...
}:
let
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList (_item: attrs: attrs.folders) config.clan.core.state
);
in
{
imports = [
(modulesPath + "/virtualisation/qemu-vm.nix")
./serial.nix
./waypipe.nix
];
clan.core.state.HOME.folders = [ "/home" ];
clan.services.waypipe = {
inherit (config.clan.core.vm.inspect.waypipe) enable command;
};
# required for issuing shell commands via qga
services.qemuGuest.enable = true;
# required to react to system_powerdown qmp command
# Some desktop managers like xfce override the poweroff signal and therefore
# make it impossible to handle it via 'logind' directly.
services.acpid.enable = true;
services.acpid.handlers.power.event = "button/power.*";
services.acpid.handlers.power.action = "poweroff";
# only works on x11
services.spice-vdagentd.enable = config.services.xserver.enable;
boot.initrd.systemd.enable = true;
boot.initrd.systemd.storePaths = [
pkgs.util-linux
pkgs.e2fsprogs
];
boot.initrd.systemd.emergencyAccess = true;
# userborn would be faster because it doesn't need perl, but it cannot create normal users
services.userborn.enable = true;
users.mutableUsers = false;
users.allowNoPasswordLogin = true;
boot.initrd.kernelModules = [ "virtiofs" ];
virtualisation.writableStore = false;
virtualisation.fileSystems = lib.mkForce (
{
"/nix/store" = {
device = "nix-store";
options = [
"x-systemd.requires=systemd-modules-load.service"
"ro"
];
fsType = "virtiofs";
};
"/" = {
device = "/dev/vda";
fsType = "ext4";
options = [
"defaults"
"x-systemd.makefs"
"nobarrier"
"noatime"
"nodiratime"
"data=writeback"
"discard"
];
};
"/vmstate" = {
device = "/dev/vdb";
options = [
"x-systemd.makefs"
"noatime"
"nodiratime"
"discard"
];
noCheck = true;
fsType = "ext4";
};
${config.clan.core.facts.secretUploadDirectory} = {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [
"trans=virtio"
"version=9p2000.L"
"cache=loose"
];
};
}
// lib.listToAttrs (
map (
folder:
lib.nameValuePair folder {
device = "/vmstate${folder}";
fsType = "none";
options = [ "bind" ];
}
) stateFolders
)
);
}

View File

@@ -4,11 +4,116 @@
pkgs,
options,
extendModules,
modulesPath,
...
}:
let
# Import the standalone VM base module
vmModule = import ./vm-base.nix;
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList (_item: attrs: attrs.folders) config.clan.core.state
);
vmModule = {
imports = [
(modulesPath + "/virtualisation/qemu-vm.nix")
./serial.nix
./waypipe.nix
];
clan.core.state.HOME.folders = [ "/home" ];
clan.services.waypipe = {
inherit (config.clan.core.vm.inspect.waypipe) enable command;
};
# required for issuing shell commands via qga
services.qemuGuest.enable = true;
# required to react to system_powerdown qmp command
# Some desktop managers like xfce override the poweroff signal and therefore
# make it impossible to handle it via 'logind' directly.
services.acpid.enable = true;
services.acpid.handlers.power.event = "button/power.*";
services.acpid.handlers.power.action = "poweroff";
# only works on x11
services.spice-vdagentd.enable = config.services.xserver.enable;
boot.initrd.systemd.enable = true;
boot.initrd.systemd.storePaths = [
pkgs.util-linux
pkgs.e2fsprogs
];
boot.initrd.systemd.emergencyAccess = true;
# userborn would be faster because it doesn't need perl, but it cannot create normal users
services.userborn.enable = true;
users.mutableUsers = false;
users.allowNoPasswordLogin = true;
boot.initrd.kernelModules = [ "virtiofs" ];
virtualisation.writableStore = false;
virtualisation.fileSystems = lib.mkForce (
{
"/nix/store" = {
device = "nix-store";
options = [
"x-systemd.requires=systemd-modules-load.service"
"ro"
];
fsType = "virtiofs";
};
"/" = {
device = "/dev/vda";
fsType = "ext4";
options = [
"defaults"
"x-systemd.makefs"
"nobarrier"
"noatime"
"nodiratime"
"data=writeback"
"discard"
];
};
"/vmstate" = {
device = "/dev/vdb";
options = [
"x-systemd.makefs"
"noatime"
"nodiratime"
"discard"
];
noCheck = true;
fsType = "ext4";
};
${config.clan.core.facts.secretUploadDirectory} = {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [
"trans=virtio"
"version=9p2000.L"
"cache=loose"
];
};
}
// lib.listToAttrs (
map (
folder:
lib.nameValuePair folder {
device = "/vmstate${folder}";
fsType = "none";
options = [ "bind" ];
}
) stateFolders
)
);
};
# We cannot simply merge the VM config into the current system config, because
# it is not necessarily a VM.

View File

@@ -195,7 +195,7 @@ def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Ad
(node_id >> 16) & 0xFF,
(node_id >> 8) & 0xFF,
(node_id) & 0xFF,
],
]
)
return ipaddress.IPv6Address(bytes(addr_parts))
@@ -203,10 +203,7 @@ def compute_zerotier_ip(network_id: str, identity: Identity) -> ipaddress.IPv6Ad
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode",
choices=["network", "identity"],
required=True,
type=str,
"--mode", choices=["network", "identity"], required=True, type=str
)
parser.add_argument("--ip", type=Path, required=True)
parser.add_argument("--identity-secret", type=Path, required=True)

View File

@@ -17,7 +17,7 @@ def main() -> None:
moon_json = json.loads(Path(moon_json_path).read_text())
moon_json["roots"][0]["stableEndpoints"] = json.loads(
Path(endpoint_config).read_text(),
Path(endpoint_config).read_text()
)
with NamedTemporaryFile("w") as f:

View File

@@ -34,7 +34,4 @@ in
flake.nixosModules.clanCore = clanCore;
flake.darwinModules.clanCore = clanCore;
# Standalone VM base module that can be imported for VM testing
flake.nixosModules.clan-vm-base = ./clanCore/vm-base.nix;
}

View File

@@ -38,7 +38,8 @@ def get_gitea_api_url(remote: str = "origin") -> str:
host_and_path = remote_url.split("@")[1] # git.clan.lol:clan/clan-core.git
host = host_and_path.split(":")[0] # git.clan.lol
repo_path = host_and_path.split(":")[1] # clan/clan-core.git
repo_path = repo_path.removesuffix(".git") # clan/clan-core
if repo_path.endswith(".git"):
repo_path = repo_path[:-4] # clan/clan-core
elif remote_url.startswith("https://"):
# HTTPS format: https://git.clan.lol/clan/clan-core.git
url_parts = remote_url.replace("https://", "").split("/")
@@ -85,10 +86,7 @@ def get_repo_info_from_api_url(api_url: str) -> tuple[str, str]:
def fetch_pr_statuses(
repo_owner: str,
repo_name: str,
commit_sha: str,
host: str,
repo_owner: str, repo_name: str, commit_sha: str, host: str
) -> list[dict]:
"""Fetch CI statuses for a specific commit SHA."""
status_url = (
@@ -185,7 +183,7 @@ def run_git_command(command: list) -> tuple[int, str, str]:
def get_current_branch_name() -> str:
exit_code, branch_name, error = run_git_command(
["git", "rev-parse", "--abbrev-ref", "HEAD"],
["git", "rev-parse", "--abbrev-ref", "HEAD"]
)
if exit_code != 0:
@@ -198,7 +196,7 @@ def get_current_branch_name() -> str:
def get_latest_commit_info() -> tuple[str, str]:
"""Get the title and body of the latest commit."""
exit_code, commit_msg, error = run_git_command(
["git", "log", "-1", "--pretty=format:%B"],
["git", "log", "-1", "--pretty=format:%B"]
)
if exit_code != 0:
@@ -227,7 +225,7 @@ def get_commits_since_main() -> list[tuple[str, str]]:
"main..HEAD",
"--no-merges",
"--pretty=format:%s|%b|---END---",
],
]
)
if exit_code != 0:
@@ -265,9 +263,7 @@ def open_editor_for_pr() -> tuple[str, str]:
commits_since_main = get_commits_since_main()
with tempfile.NamedTemporaryFile(
mode="w+",
suffix="COMMIT_EDITMSG",
delete=False,
mode="w+", suffix="COMMIT_EDITMSG", delete=False
) as temp_file:
temp_file.flush()
temp_file_path = temp_file.name
@@ -284,7 +280,7 @@ def open_editor_for_pr() -> tuple[str, str]:
temp_file.write("# The first line will be used as the PR title.\n")
temp_file.write("# Everything else will be used as the PR description.\n")
temp_file.write(
"# To abort creation of the PR, close editor with an error code.\n",
"# To abort creation of the PR, close editor with an error code.\n"
)
temp_file.write("# In vim for example you can use :cq!\n")
temp_file.write("#\n")
@@ -377,7 +373,7 @@ def create_agit_push(
print(
f" Description: {description[:50]}..."
if len(description) > 50
else f" Description: {description}",
else f" Description: {description}"
)
print()
@@ -534,26 +530,19 @@ Examples:
)
create_parser.add_argument(
"-t",
"--topic",
help="Set PR topic (default: current branch name)",
"-t", "--topic", help="Set PR topic (default: current branch name)"
)
create_parser.add_argument(
"--title",
help="Set the PR title (default: last commit title)",
"--title", help="Set the PR title (default: last commit title)"
)
create_parser.add_argument(
"--description",
help="Override the PR description (default: commit body)",
"--description", help="Override the PR description (default: commit body)"
)
create_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Force push the changes",
"-f", "--force", action="store_true", help="Force push the changes"
)
create_parser.add_argument(

View File

@@ -13,9 +13,7 @@ log = logging.getLogger(__name__)
def main(argv: list[str] = sys.argv) -> int:
parser = argparse.ArgumentParser(description="Clan App")
parser.add_argument(
"--content-uri",
type=str,
help="The URI of the content to display",
"--content-uri", type=str, help="The URI of the content to display"
)
parser.add_argument("--debug", action="store_true", help="Enable debug mode")
parser.add_argument(

View File

@@ -56,23 +56,18 @@ class ApiBridge(ABC):
for middleware in self.middleware_chain:
try:
log.debug(
f"{middleware.__class__.__name__} => {request.method_name}",
f"{middleware.__class__.__name__} => {request.method_name}"
)
middleware.process(context)
except Exception as e:
# If middleware fails, handle error
self.send_api_error_response(
request.op_key or "unknown",
str(e),
["middleware_error"],
request.op_key or "unknown", str(e), ["middleware_error"]
)
return
def send_api_error_response(
self,
op_key: str,
error_message: str,
location: list[str],
self, op_key: str, error_message: str, location: list[str]
) -> None:
"""Send an error response."""
from clan_lib.api import ApiError, ErrorDataClass
@@ -85,7 +80,7 @@ class ApiBridge(ABC):
message="An internal error occured",
description=error_message,
location=location,
),
)
],
)
@@ -112,7 +107,6 @@ class ApiBridge(ABC):
thread_name: Name for the thread (for debugging)
wait_for_completion: Whether to wait for the thread to complete
timeout: Timeout in seconds when waiting for completion
"""
op_key = request.op_key or "unknown"
@@ -122,7 +116,7 @@ class ApiBridge(ABC):
try:
log.debug(
f"Processing {request.method_name} with args {request.args} "
f"and header {request.header} in thread {thread_name}",
f"and header {request.header} in thread {thread_name}"
)
self.process_request(request)
finally:
@@ -130,9 +124,7 @@ class ApiBridge(ABC):
stop_event = threading.Event()
thread = threading.Thread(
target=thread_task,
args=(stop_event,),
name=thread_name,
target=thread_task, args=(stop_event,), name=thread_name
)
thread.start()
@@ -146,7 +138,5 @@ class ApiBridge(ABC):
if thread.is_alive():
stop_event.set() # Cancel the thread
self.send_api_error_response(
op_key,
"Request timeout",
["api_bridge", request.method_name],
op_key, "Request timeout", ["api_bridge", request.method_name]
)

View File

@@ -26,7 +26,8 @@ RESULT: dict[str, SuccessDataClass[list[str] | None] | ErrorDataClass] = {}
def get_clan_folder() -> SuccessDataClass[Flake] | ErrorDataClass:
"""Opens the clan folder using the GTK file dialog.
"""
Opens the clan folder using the GTK file dialog.
Returns the path to the clan folder or an error if it fails.
"""
file_request = FileRequest(
@@ -51,7 +52,7 @@ def get_clan_folder() -> SuccessDataClass[Flake] | ErrorDataClass:
message="No folder selected",
description="You must select a folder to open.",
location=["get_clan_folder"],
),
)
],
)
@@ -65,7 +66,7 @@ def get_clan_folder() -> SuccessDataClass[Flake] | ErrorDataClass:
message="Invalid clan folder",
description=f"The selected folder '{clan_folder}' is not a valid clan folder.",
location=["get_clan_folder"],
),
)
],
)
@@ -101,10 +102,8 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
selected_path = remove_none([gfile.get_path()])
returns(
SuccessDataClass(
op_key=op_key,
data=selected_path,
status="success",
),
op_key=op_key, data=selected_path, status="success"
)
)
except Exception as e:
log.exception("Error opening file")
@@ -117,9 +116,9 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
message=e.__class__.__name__,
description=str(e),
location=["get_system_file"],
),
)
],
),
)
)
def on_file_select_multiple(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
@@ -129,10 +128,8 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
selected_paths = remove_none([gfile.get_path() for gfile in gfiles])
returns(
SuccessDataClass(
op_key=op_key,
data=selected_paths,
status="success",
),
op_key=op_key, data=selected_paths, status="success"
)
)
else:
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
@@ -147,9 +144,9 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
message=e.__class__.__name__,
description=str(e),
location=["get_system_file"],
),
)
],
),
)
)
def on_folder_select(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
@@ -159,10 +156,8 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
selected_path = remove_none([gfile.get_path()])
returns(
SuccessDataClass(
op_key=op_key,
data=selected_path,
status="success",
),
op_key=op_key, data=selected_path, status="success"
)
)
else:
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
@@ -177,9 +172,9 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
message=e.__class__.__name__,
description=str(e),
location=["get_system_file"],
),
)
],
),
)
)
def on_save_finish(file_dialog: Gtk.FileDialog, task: Gio.Task) -> None:
@@ -189,10 +184,8 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
selected_path = remove_none([gfile.get_path()])
returns(
SuccessDataClass(
op_key=op_key,
data=selected_path,
status="success",
),
op_key=op_key, data=selected_path, status="success"
)
)
else:
returns(SuccessDataClass(op_key=op_key, data=None, status="success"))
@@ -207,9 +200,9 @@ def gtk_open_file(file_request: FileRequest, op_key: str) -> bool:
message=e.__class__.__name__,
description=str(e),
location=["get_system_file"],
),
)
],
),
)
)
dialog = Gtk.FileDialog()

View File

@@ -39,7 +39,7 @@ class ArgumentParsingMiddleware(Middleware):
except Exception as e:
log.exception(
f"Error while parsing arguments for {context.request.method_name}",
f"Error while parsing arguments for {context.request.method_name}"
)
context.bridge.send_api_error_response(
context.request.op_key or "unknown",

View File

@@ -23,9 +23,7 @@ class Middleware(ABC):
"""Process the request through this middleware."""
def register_context_manager(
self,
context: MiddlewareContext,
cm: AbstractContextManager[Any],
self, context: MiddlewareContext, cm: AbstractContextManager[Any]
) -> Any:
"""Register a context manager with the exit stack."""
return context.exit_stack.enter_context(cm)

View File

@@ -25,26 +25,23 @@ class LoggingMiddleware(Middleware):
try:
# Handle log group configuration
log_group: list[str] | None = context.request.header.get("logging", {}).get(
"group_path",
None,
"group_path", None
)
if log_group is not None:
if not isinstance(log_group, list):
msg = f"Expected log_group to be a list, got {type(log_group)}"
raise TypeError(msg) # noqa: TRY301
log.warning(
f"Using log group {log_group} for {context.request.method_name} with op_key {context.request.op_key}",
f"Using log group {log_group} for {context.request.method_name} with op_key {context.request.op_key}"
)
# Create log file
log_file = self.log_manager.create_log_file(
method,
op_key=context.request.op_key or "unknown",
group_path=log_group,
method, op_key=context.request.op_key or "unknown", group_path=log_group
).get_file_path()
except Exception as e:
log.exception(
f"Error while handling request header of {context.request.method_name}",
f"Error while handling request header of {context.request.method_name}"
)
context.bridge.send_api_error_response(
context.request.op_key or "unknown",
@@ -79,8 +76,7 @@ class LoggingMiddleware(Middleware):
line_buffering=True,
)
self.handler = setup_logging(
log.getEffectiveLevel(),
log_file=handler_stream,
log.getEffectiveLevel(), log_file=handler_stream
)
return self

View File

@@ -32,7 +32,7 @@ class MethodExecutionMiddleware(Middleware):
except Exception as e:
log.exception(
f"Error while handling result of {context.request.method_name}",
f"Error while handling result of {context.request.method_name}"
)
context.bridge.send_api_error_response(
context.request.op_key or "unknown",

View File

@@ -48,7 +48,7 @@ def app_run(app_opts: ClanAppOptions) -> int:
# Add a log group ["clans", <dynamic_name>, "machines", <dynamic_name>]
log_manager = LogManager(base_dir=user_data_dir() / "clan-app" / "logs")
clan_log_group = LogGroupConfig("clans", "Clans").add_child(
LogGroupConfig("machines", "Machines"),
LogGroupConfig("machines", "Machines")
)
log_manager = log_manager.add_root_group_config(clan_log_group)
# Init LogManager global in log_manager_api module
@@ -89,7 +89,7 @@ def app_run(app_opts: ClanAppOptions) -> int:
# HTTP-only mode - keep the server running
log.info("HTTP API server running...")
log.info(
f"Swagger: http://{app_opts.http_host}:{app_opts.http_port}/api/swagger",
f"Swagger: http://{app_opts.http_host}:{app_opts.http_port}/api/swagger"
)
log.info("Press Ctrl+C to stop the server")

View File

@@ -63,9 +63,7 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
self.send_header("Access-Control-Allow-Headers", "Content-Type")
def _send_json_response_with_status(
self,
data: dict[str, Any],
status_code: int = 200,
self, data: dict[str, Any], status_code: int = 200
) -> None:
"""Send a JSON response with the given status code."""
try:
@@ -84,13 +82,11 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
response_dict = dataclass_to_dict(response)
self._send_json_response_with_status(response_dict, 200)
log.debug(
f"HTTP response for {response._op_key}: {json.dumps(response_dict, indent=2)}", # noqa: SLF001
f"HTTP response for {response._op_key}: {json.dumps(response_dict, indent=2)}" # noqa: SLF001
)
def _create_success_response(
self,
op_key: str,
data: dict[str, Any],
self, op_key: str, data: dict[str, Any]
) -> BackendResponse:
"""Create a successful API response."""
return BackendResponse(
@@ -102,16 +98,14 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
def _send_info_response(self) -> None:
"""Send server information response."""
response = self._create_success_response(
"info",
{"message": "Clan API Server", "version": "1.0.0"},
"info", {"message": "Clan API Server", "version": "1.0.0"}
)
self.send_api_response(response)
def _send_methods_response(self) -> None:
"""Send available API methods response."""
response = self._create_success_response(
"methods",
{"methods": list(self.api.functions.keys())},
"methods", {"methods": list(self.api.functions.keys())}
)
self.send_api_response(response)
@@ -185,7 +179,7 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
json_data = json.loads(file_data.decode("utf-8"))
server_address = getattr(self.server, "server_address", ("localhost", 80))
json_data["servers"] = [
{"url": f"http://{server_address[0]}:{server_address[1]}/api/v1/"},
{"url": f"http://{server_address[0]}:{server_address[1]}/api/v1/"}
]
file_data = json.dumps(json_data, indent=2).encode("utf-8")
@@ -219,9 +213,7 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
# Validate API path
if not path.startswith("/api/v1/"):
self.send_api_error_response(
"post",
f"Path not found: {path}",
["http_bridge", "POST"],
"post", f"Path not found: {path}", ["http_bridge", "POST"]
)
return
@@ -229,9 +221,7 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
method_name = path[len("/api/v1/") :]
if not method_name:
self.send_api_error_response(
"post",
"Method name required",
["http_bridge", "POST"],
"post", "Method name required", ["http_bridge", "POST"]
)
return
@@ -299,26 +289,19 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
# Create API request
api_request = BackendRequest(
method_name=method_name,
args=body,
header=header,
op_key=op_key,
method_name=method_name, args=body, header=header, op_key=op_key
)
except Exception as e:
self.send_api_error_response(
gen_op_key,
str(e),
["http_bridge", method_name],
gen_op_key, str(e), ["http_bridge", method_name]
)
return
self._process_api_request_in_thread(api_request, method_name)
def _parse_request_data(
self,
request_data: dict[str, Any],
gen_op_key: str,
self, request_data: dict[str, Any], gen_op_key: str
) -> tuple[dict[str, Any], dict[str, Any], str]:
"""Parse and validate request data components."""
header = request_data.get("header", {})
@@ -361,9 +344,7 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
pass
def _process_api_request_in_thread(
self,
api_request: BackendRequest,
method_name: str,
self, api_request: BackendRequest, method_name: str
) -> None:
"""Process the API request in a separate thread."""
stop_event = threading.Event()
@@ -377,7 +358,7 @@ class HttpBridge(ApiBridge, BaseHTTPRequestHandler):
log.debug(
f"Processing {request.method_name} with args {request.args} "
f"and header {request.header}",
f"and header {request.header}"
)
self.process_request(request)

View File

@@ -64,8 +64,7 @@ def mock_log_manager() -> Mock:
@pytest.fixture
def http_bridge(
mock_api: MethodRegistry,
mock_log_manager: Mock,
mock_api: MethodRegistry, mock_log_manager: Mock
) -> tuple[MethodRegistry, tuple]:
"""Create HTTP bridge dependencies for testing."""
middleware_chain = (
@@ -257,9 +256,7 @@ class TestIntegration:
"""Integration tests for HTTP API components."""
def test_full_request_flow(
self,
mock_api: MethodRegistry,
mock_log_manager: Mock,
self, mock_api: MethodRegistry, mock_log_manager: Mock
) -> None:
"""Test complete request flow from server to bridge to middleware."""
server: HttpApiServer = HttpApiServer(
@@ -304,9 +301,7 @@ class TestIntegration:
server.stop()
def test_blocking_task(
self,
mock_api: MethodRegistry,
mock_log_manager: Mock,
self, mock_api: MethodRegistry, mock_log_manager: Mock
) -> None:
shared_threads: dict[str, tasks.WebThread] = {}
tasks.BAKEND_THREADS = shared_threads

View File

@@ -21,7 +21,7 @@ def _get_lib_names() -> list[str]:
machine = platform.machine().lower()
if system == "windows":
if machine in {"amd64", "x86_64"}:
if machine == "amd64" or machine == "x86_64":
return ["webview.dll", "WebView2Loader.dll"]
if machine == "arm64":
msg = "arm64 is not supported on Windows"
@@ -36,6 +36,7 @@ def _get_lib_names() -> list[str]:
def _be_sure_libraries() -> list[Path] | None:
"""Ensure libraries exist and return paths."""
lib_dir = os.environ.get("WEBVIEW_LIB_DIR")
if not lib_dir:
msg = "WEBVIEW_LIB_DIR environment variable is not set"

View File

@@ -144,9 +144,7 @@ class Webview:
)
else:
bridge = WebviewBridge(
webview=self,
middleware_chain=tuple(self._middleware),
threads={},
webview=self, middleware_chain=tuple(self._middleware), threads={}
)
self._bridge = bridge
@@ -156,10 +154,7 @@ class Webview:
def set_size(self, value: Size) -> None:
"""Set the webview size (legacy compatibility)."""
_webview_lib.webview_set_size(
self.handle,
value.width,
value.height,
value.hint,
self.handle, value.width, value.height, value.hint
)
def set_title(self, value: str) -> None:
@@ -199,10 +194,7 @@ class Webview:
self._callbacks[name] = c_callback
_webview_lib.webview_bind(
self.handle,
_encode_c_string(name),
c_callback,
None,
self.handle, _encode_c_string(name), c_callback, None
)
def bind(self, name: str, callback: Callable[..., Any]) -> None:
@@ -227,10 +219,7 @@ class Webview:
def return_(self, seq: str, status: int, result: str) -> None:
_webview_lib.webview_return(
self.handle,
_encode_c_string(seq),
status,
_encode_c_string(result),
self.handle, _encode_c_string(seq), status, _encode_c_string(result)
)
def eval(self, source: str) -> None:

View File

@@ -26,9 +26,7 @@ class WebviewBridge(ApiBridge):
def send_api_response(self, response: BackendResponse) -> None:
"""Send response back to the webview client."""
serialized = json.dumps(
dataclass_to_dict(response),
indent=4,
ensure_ascii=False,
dataclass_to_dict(response), indent=4, ensure_ascii=False
)
log.debug(f"Sending response: {serialized}")
@@ -42,6 +40,7 @@ class WebviewBridge(ApiBridge):
arg: int,
) -> None:
"""Handle a call from webview's JavaScript bridge."""
try:
op_key = op_key_bytes.decode()
raw_args = json.loads(request_data.decode())
@@ -69,10 +68,7 @@ class WebviewBridge(ApiBridge):
# Create API request
api_request = BackendRequest(
method_name=method_name,
args=args,
header=header,
op_key=op_key,
method_name=method_name, args=args, header=header, op_key=op_key
)
except Exception as e:
@@ -81,9 +77,7 @@ class WebviewBridge(ApiBridge):
)
log.exception(msg)
self.send_api_error_response(
op_key,
str(e),
["webview_bridge", method_name],
op_key, str(e), ["webview_bridge", method_name]
)
return

View File

@@ -54,7 +54,8 @@ class Command:
@pytest.fixture
def command() -> Iterator[Command]:
"""Starts a background command. The process is automatically terminated in the end.
"""
Starts a background command. The process is automatically terminated in the end.
>>> p = command.run(["some", "daemon"])
>>> print(p.pid)
"""

View File

@@ -2,15 +2,12 @@ from __future__ import annotations
import logging
import subprocess
from typing import TYPE_CHECKING
from pathlib import Path
import pytest
from clan_lib.custom_logger import setup_logging
from clan_lib.nix import nix_shell
if TYPE_CHECKING:
from pathlib import Path
pytest_plugins = [
"temporary_dir",
"root",

View File

@@ -13,17 +13,23 @@ else:
@pytest.fixture(scope="session")
def project_root() -> Path:
"""Root directory the clan-cli"""
"""
Root directory the clan-cli
"""
return PROJECT_ROOT
@pytest.fixture(scope="session")
def test_root() -> Path:
"""Root directory of the tests"""
"""
Root directory of the tests
"""
return TEST_ROOT
@pytest.fixture(scope="session")
def clan_core() -> Path:
"""Directory of the clan-core flake"""
"""
Directory of the clan-core flake
"""
return CLAN_CORE

View File

@@ -24,11 +24,7 @@ def app() -> Generator[GtkProc]:
cmd = [sys.executable, "-m", "clan_app"]
print(f"Running: {cmd}")
rapp = Popen(
cmd,
text=True,
stdout=sys.stdout,
stderr=sys.stderr,
start_new_session=True,
cmd, text=True, stdout=sys.stdout, stderr=sys.stderr, start_new_session=True
)
yield GtkProc(rapp)
# Cleanup: Terminate your application

View File

@@ -2,5 +2,4 @@ app/api
app/.fonts
.vite
storybook-static
*.css.d.ts
storybook-static

File diff suppressed because it is too large Load Diff

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M9.223 38.777h8.444V43H5V30.333h4.223zM43 43h-4.223v-8.444h-8.444V43h-4.222V21.889H43zM30.333 30.333h8.444v-4.222h-8.444zM17.667 9.223H9.223v4.221h8.444v4.223H9.223v4.222h8.444v4.222H5V5h12.667zm4.222 12.666h-4.222v-4.222h4.222zM43 17.667h-4.223V9.223h-8.444V5H43zm-21.111-4.223h-4.222V9.223h4.222z"/></svg>

Before

Width:  |  Height:  |  Size: 399 B

View File

@@ -1 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M27 38H6V17h4v-4h3.5V9h24v4H41v11H27v3h7v4h-3.5v3.5H27zM16.5 20.5H20V17h-3.5z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48" fill="currentColor">
<path d="M27 38H6V17H10V13H13.5V9H37.5V13H41V24H27V27H34V31H30.5V34.5H27V38ZM16.5 20.5H20V17H16.5V20.5Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 178 B

After

Width:  |  Height:  |  Size: 221 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M46 46H2V2h44zM16.667 33.777h4.889V28.89h-4.889zm-4.89-4.888h4.89V24h-4.89zm9.779 0h4.888V24h-4.888zM26.444 24h4.889v-4.889h-4.889zm4.889-9.777v4.888h4.89v-4.888z"/></svg>

Before

Width:  |  Height:  |  Size: 263 B

View File

@@ -1 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M36.888 11H41.3v4.413h-4.412zm-4.413 8.825v-4.412h4.413v4.412zm-4.413 4.413v-4.413h4.413v4.413zM23.65 28.65h4.413v-4.412H23.65zm-4.412 4.413h4.412V28.65h-4.412zm-4.413 0v4.412h4.413v-4.413zm-4.412-4.413h4.412v4.413h-4.412zm0 0H6v-4.412h4.413z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor">
<path d="M36.888 11H41.3v4.413h-4.412zm-4.413 8.825v-4.412h4.413v4.412zm-4.413 4.413v-4.413h4.413v4.413zM23.65 28.65h4.413v-4.412H23.65zm-4.412 4.413h4.412V28.65h-4.412zm-4.413 0v4.412h4.413v-4.413zm-4.412-4.413h4.412v4.413h-4.412zm0 0H6v-4.412h4.413z"/>
</svg>

Before

Width:  |  Height:  |  Size: 343 B

After

Width:  |  Height:  |  Size: 349 B

View File

@@ -1 +1,10 @@
<svg xmlns="http://www.w3.org/2000/svg" width="72" height="89" fill="currentColor"><g clip-path="url(#a)"><path d="M57.709 20.105H68.62c1.157 0 2.099-.94 2.099-2.095V9.632a2.1 2.1 0 0 0-2.099-2.095h-3.439c-1.111 0-2.014-.9-2.014-2.01V2.095A2.1 2.1 0 0 0 61.07 0H30.02a2.1 2.1 0 0 0-2.098 2.095v3.432c0 1.11-.903 2.01-2.014 2.01H22.47c-1.157 0-2.098.94-2.098 2.095v3.432c0 1.11-.903 2.01-2.014 2.01h-3.439c-1.157 0-2.099.94-2.099 2.094 0 0-.503-1.272-.503 22.493 0 21.247.503 19.38.503 19.38 0 1.156.942 2.096 2.1 2.096h3.438c1.111 0 2.014.9 2.014 2.01v3.517c0 1.109.902 2.01 2.013 2.01h3.524c1.111 0 2.014.9 2.014 2.01v3.432a2.1 2.1 0 0 0 2.098 2.094h30.211c1.157 0 2.099-.94 2.099-2.094v-3.433c0-1.11.902-2.01 2.013-2.01h5.557c1.158 0 2.099-.94 2.099-2.094v-9.984a2.1 2.1 0 0 0-2.099-2.095h-13.03c-1.157 0-2.098.94-2.098 2.095v5.044c0 1.11-.902 2.01-2.014 2.01H37.488c-1.111 0-2.013-.9-2.013-2.01v-5.11a2.1 2.1 0 0 0-2.099-2.094h-5.119c-1.111 0-1.739.163-2.014-2.01-.085-.698-.13-1.553-.196-2.695-.163-2.878-.307-1.723-.307-10.369 0-12.085.314-15.563.503-17.24.19-1.677.903-2.01 2.014-2.01h5.12c1.156 0 2.098-.94 2.098-2.094v-3.433c0-1.109.902-2.01 2.013-2.01h16.116c1.111 0 2.014.901 2.014 2.01v3.433c0 1.155.94 2.094 2.098 2.094zM18.626 73.757h-2.478a.87.87 0 0 1-.87-.868v-2.473c0-.96-.777-1.743-1.745-1.743H6.838c-.96 0-1.745.777-1.745 1.743v2.473a.87.87 0 0 1-.87.868H1.746c-.961 0-1.746.776-1.746 1.742v6.682c0 .96.778 1.742 1.746 1.742h2.477c.484 0 .87.392.87.868v2.473c0 .96.778 1.743 1.745 1.743h6.695c.961 0 1.746-.777 1.746-1.743v-2.473c0-.483.392-.868.87-.868h2.477c.961 0 1.746-.776 1.746-1.742v-6.682c0-.96-.778-1.742-1.746-1.742"/></g><defs><clipPath id="a"><path d="M0 0h72v89H0z"/></clipPath></defs></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="72" height="89" fill="currentColor">
<g clip-path="url(#a)">
<path d="M57.709 20.105H68.62c1.157 0 2.099-.94 2.099-2.095V9.632a2.1 2.1 0 0 0-2.099-2.095h-3.439c-1.111 0-2.014-.9-2.014-2.01V2.095A2.1 2.1 0 0 0 61.07 0H30.02a2.1 2.1 0 0 0-2.098 2.095v3.432c0 1.11-.903 2.01-2.014 2.01H22.47c-1.157 0-2.098.94-2.098 2.095v3.432c0 1.11-.903 2.01-2.014 2.01h-3.439c-1.157 0-2.099.94-2.099 2.094 0 0-.503-1.272-.503 22.493 0 21.247.503 19.38.503 19.38 0 1.156.942 2.096 2.1 2.096h3.438c1.111 0 2.014.9 2.014 2.01v3.517c0 1.109.902 2.01 2.013 2.01h3.524c1.111 0 2.014.9 2.014 2.01v3.432a2.1 2.1 0 0 0 2.098 2.094h30.211c1.157 0 2.099-.94 2.099-2.094v-3.433c0-1.11.902-2.01 2.013-2.01h5.557c1.158 0 2.099-.94 2.099-2.094v-9.984a2.1 2.1 0 0 0-2.099-2.095h-13.03c-1.157 0-2.098.94-2.098 2.095v5.044c0 1.11-.902 2.01-2.014 2.01H37.488c-1.111 0-2.013-.9-2.013-2.01v-5.11a2.1 2.1 0 0 0-2.099-2.094h-5.119c-1.111 0-1.739.163-2.014-2.01-.085-.698-.13-1.553-.196-2.695-.163-2.878-.307-1.723-.307-10.369 0-12.085.314-15.563.503-17.24.19-1.677.903-2.01 2.014-2.01h5.12c1.156 0 2.098-.94 2.098-2.094v-3.433c0-1.109.902-2.01 2.013-2.01h16.116c1.111 0 2.014.901 2.014 2.01v3.433c0 1.155.94 2.094 2.098 2.094zM18.626 73.757h-2.478a.87.87 0 0 1-.87-.868v-2.473c0-.96-.777-1.743-1.745-1.743H6.838c-.96 0-1.745.777-1.745 1.743v2.473a.87.87 0 0 1-.87.868H1.746c-.961 0-1.746.776-1.746 1.742v6.682c0 .96.778 1.742 1.746 1.742h2.477c.484 0 .87.392.87.868v2.473c0 .96.778 1.743 1.745 1.743h6.695c.961 0 1.746-.777 1.746-1.743v-2.473c0-.483.392-.868.87-.868h2.477c.961 0 1.746-.776 1.746-1.742v-6.682c0-.96-.778-1.742-1.746-1.742"/>
</g>
<defs>
<clipPath id="a">
<path d="M0 0h72v89H0z"/>
</clipPath>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="223" height="89" fill="currentColor"><g clip-path="url(#a)"><path d="M55.503 18.696h10.104a1.946 1.946 0 0 0 1.943-1.948v-7.79c0-1.075-.87-1.947-1.943-1.947h-3.186a1.863 1.863 0 0 1-1.866-1.87V1.947C60.555.872 59.685 0 58.612 0h-27.98a1.946 1.946 0 0 0-1.944 1.947v3.194c0 1.036-.832 1.87-1.865 1.87h-3.187a1.946 1.946 0 0 0-1.943 1.947v3.194c0 1.036-.832 1.87-1.866 1.87h-3.186a1.946 1.946 0 0 0-1.943 1.947s-.467 1.153-.467 23.253c0 19.763.467 21.913.467 21.913 0 1.075.87 1.948 1.943 1.948h3.186c1.034 0 1.866.833 1.866 1.87v3.271c0 1.036.831 1.87 1.865 1.87h3.265c1.033 0 1.865.833 1.865 1.87v3.193c0 1.075.87 1.948 1.943 1.948h27.981a1.946 1.946 0 0 0 1.943-1.948v-3.194c0-1.036.832-1.87 1.866-1.87h5.145a1.946 1.946 0 0 0 1.943-1.947v-9.285c0-1.075-.87-1.948-1.943-1.948H55.503a1.946 1.946 0 0 0-1.943 1.948v4.69c0 1.035-.832 1.869-1.866 1.869H37.55a1.863 1.863 0 0 1-1.866-1.87v-4.752c0-1.075-.87-1.947-1.943-1.947H29c-1.034 0-1.609.148-1.865-1.87-.078-.646-.125-1.44-.18-2.508-.147-2.68-.287-5.5-.287-13.539 0-11.24.288-16.81.466-18.369.18-1.558.832-1.87 1.866-1.87h4.741a1.946 1.946 0 0 0 1.943-1.947v-3.193c0-1.037.832-1.87 1.866-1.87h14.145c1.034 0 1.866.833 1.866 1.87v3.193c0 1.075.87 1.948 1.943 1.948M20.247 74.822h-2.293a.814.814 0 0 1-.808-.81v-2.298c0-.896-.723-1.62-1.617-1.62H9.327c-.894 0-1.617.724-1.617 1.62v2.298c0 .444-.365.81-.808.81H4.609c-.894 0-1.617.725-1.617 1.62v6.217c0 .896.723 1.62 1.617 1.62h2.293c.443 0 .808.366.808.81v2.299c0 .895.723 1.62 1.617 1.62h6.202c.894 0 1.617-.725 1.617-1.62v-2.299c0-.444.365-.81.808-.81h2.293c.894 0 1.617-.724 1.617-1.62v-6.216c0-.896-.723-1.62-1.617-1.62M221.135 35.04h-1.71a1.863 1.863 0 0 1-1.866-1.87v-3.272c0-1.036-.831-1.87-1.865-1.87h-3.265a1.863 1.863 0 0 1-1.865-1.87v-3.271c0-1.036-.832-1.87-1.865-1.87h-20.971a1.863 1.863 0 0 0-1.865 1.87v3.965c0 .514-.42.935-.933.935h-3.559c-.513 0-.84-.32-.933-.935l-.622-3.918c-.148-1.099-.676-1.777-1.788-1.777l-3.653-.14h-2.052a3.736 3.736 0 0 0-3.73 3.74V61.68a3.736 3.736 0 0 1-3.731 3.739h-8.394a1.863 1.863 0 0 1-1.866-1.87V36.714c0-11.825-7.461-18.813-22.556-18.813-13.718 0-20.325 5.04-21.203 14.443-.109 1.153.552 1.815 1.702 1.815l7.757.569c1.143.1 1.594-.554 1.811-1.652.77-3.74 4.174-5.827 9.933-5.827 7.081 0 10.042 3.358 10.042 9.076v3.014c0 1.036-.831 1.87-1.865 1.87l-.342-.024h-9.715c-15.421 0-22.984 5.983-22.984 17.956 0 3.802.778 7.058 2.254 9.738h-.59c-1.765-1.27-2.457-2.236-3.055-2.93-.256-.295-.653-.537-1.345-.537h-1.717l-5.993.008h-3.264a3.736 3.736 0 0 1-3.731-3.74V1.769C89.74.654 89.072 0 87.969 0H79.55c-1.034 0-1.865.732-1.865 1.768l-.024 54.304v13.554c0 4.13 3.343 7.479 7.462 7.479h50.84c8.448-.429 8.604-3.42 9.436-4.542.645 3.56 1.865 4.347 4.71 4.518 8.137.117 18.343.032 18.49.024h4.975c4.119 0 6.684-3.35 6.684-7.479l.777-27.264c0-1.036.832-1.87 1.866-1.87h2.021a1.56 1.56 0 0 0 1.554-1.558v-3.583c0-1.036.832-1.87 1.866-1.87h11.868a3.37 3.37 0 0 1 3.366 3.373v3.249c0 1.075.87 1.947 1.943 1.947h4.119c.513 0 .933.42.933.935v32.25c0 1.036.831 1.87 1.865 1.87h6.84a3.736 3.736 0 0 0 3.731-3.74V36.91c0-1.036-.832-1.87-1.866-1.87zM142.64 54.225c0 8.927-6.132 14.715-15.335 14.715-6.606 0-9.793-2.953-9.793-8.748 0-6.442 3.832-9.636 11.62-9.636h13.508v3.669"/></g><defs><clipPath id="a"><path d="M0 0h223v89H0z"/></clipPath></defs></svg>

Before

Width:  |  Height:  |  Size: 3.3 KiB

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M35.667 7.667h4.666v4.666H45v23.334h-4.667v4.666h-4.666V45H12.333v-4.667H7.667v-4.666H3V12.333h4.667V7.667h4.666V3h23.334zM15 29.4V33h3.6v-3.6zm14.4 0V33H33v-3.6zm-10.8-3.6v3.6h3.6v-3.6zm7.2 0v3.6h3.6v-3.6zm-3.6-3.6v3.6h3.6v-3.6zm-3.6-3.6v3.6h3.6v-3.6zm7.2 0v3.6h3.6v-3.6zM15 15v3.6h3.6V15zm14.4 0v3.6H33V15z"/></svg>

Before

Width:  |  Height:  |  Size: 409 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M15.6 9h4.2v4.286h-4.2zM11.4 13.286h4.2v4.285h-4.2zM7.2 17.571h4.2v4.286H7.2zM3 21.857h4.2v4.286H3zM7.2 26.143h4.2v4.286H7.2zM11.4 30.429h4.2v4.285h-4.2zM15.6 34.714h4.2V39h-4.2zM32.4 9h-4.2v4.286h4.2zM36.6 13.286h-4.2v4.285h4.2zM40.8 17.571h-4.2v4.286h4.2zM45 21.857h-4.2v4.286H45zM40.8 26.143h-4.2v4.286h4.2z"/><path d="M36.6 30.429h-4.2v4.285h4.2zM32.4 34.714h-4.2V39h4.2z"/></svg>

Before

Width:  |  Height:  |  Size: 476 B

View File

@@ -1 +1,25 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M8 9h6v6H8zM14 9h6v6h-6zM20 9h6v6h-6zM14 15h6v6h-6zM26 21h6v6h-6zM26 15h6v6h-6zM20 27h6v6h-6zM20 21h6v6h-6zM20 15h6v6h-6zM8 3h6v6H8zM14 3h6v6h-6zM32 21h6v6h-6zM8 15h6v6H8zM14 21h6v6h-6zM8 21h6v6H8zM8 27h6v6H8zM8 33h6v6H8zM8 39h6v6H8zM14 27h6v6h-6zM26 27h6v6h-6zM32 27h6v6h-6z"/><path d="M37 27h6v6h-6zM14 33h6v6h-6z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="35" height="42" viewBox="0 0 35 42" fill="currentColor">
<rect y="6" width="6" height="6"/>
<rect x="6" y="6" width="6" height="6"/>
<rect x="12" y="6" width="6" height="6"/>
<rect x="6" y="12" width="6" height="6"/>
<rect x="18" y="18" width="6" height="6"/>
<rect x="18" y="12" width="6" height="6"/>
<rect x="12" y="24" width="6" height="6"/>
<rect x="12" y="18" width="6" height="6"/>
<rect x="12" y="12" width="6" height="6"/>
<rect width="6" height="6"/>
<rect x="6" width="6" height="6"/>
<rect x="24" y="18" width="6" height="6"/>
<rect y="12" width="6" height="6"/>
<rect x="6" y="18" width="6" height="6"/>
<rect y="18" width="6" height="6"/>
<rect y="24" width="6" height="6"/>
<rect y="30" width="6" height="6"/>
<rect y="36" width="6" height="6"/>
<rect x="6" y="24" width="6" height="6"/>
<rect x="18" y="24" width="6" height="6"/>
<rect x="24" y="24" width="6" height="6"/>
<rect x="29" y="24" width="6" height="6"/>
<rect x="6" y="30" width="6" height="6"/>
</svg>

Before

Width:  |  Height:  |  Size: 416 B

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -1 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path fill-rule="evenodd" d="M11.7 9h8.1v5.625H42v3.75H19.8V24h-8.1v-5.625H5v-3.75h6.7zm15.5 15h8.1v5.625H42v3.75h-6.7V39h-8.1v-5.625H5v-3.75h22.2z" clip-rule="evenodd"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path fill-rule="evenodd" d="M8.7 9h11.1v5.625H42v3.75H19.8V24H8.7v-5.625H5v-3.75h3.7zm3.7 3.75v7.5h3.7v-7.5zM27.2 24h11.1v5.625H42v3.75h-3.7V39H27.2v-5.625H5v-3.75h22.2zm3.7 3.75v7.5h3.7v-7.5z" clip-rule="evenodd"/></svg>

Before

Width:  |  Height:  |  Size: 259 B

After

Width:  |  Height:  |  Size: 305 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M34.25 9.3H45v34.4H2.001v-4.3H2V13.6h.001V9.3H12.75V5h21.5zM19.201 30.8v4.3h8.6v-4.3zm-4.3-4.3v4.3h4.3v-4.3zm12.9 0v4.3h4.3v-4.3zm-12.9-8.6v4.3h4.3v-4.3zm12.9 0v4.3h4.3v-4.3z"/></svg>

Before

Width:  |  Height:  |  Size: 275 B

View File

@@ -1 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M20.2 12.8H23v2.8h2.8v-1.4h2.8V10h5.6v2.8H37v2.8h2.8V24H37v2.8h-2.8v2.8h-2.8v2.8h-2.8v2.8h-2.8V38H23v-2.8h-2.8v-2.8h-2.8v-2.8h-2.8v-2.8h-2.8V24H9v-8.4h2.8v-2.8h2.8V10h5.6z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48" fill="currentColor">
<path d="M20.2002 12.7998H23V15.5996H25.8008V14.2002H28.6006V10H34.2002V12.7998H37V15.5996H39.8008V24H37V26.7998H34.2002V29.5996H31.4004V32.4004H28.6006V35.2002H25.8008V38H23V35.2002H20.2002V32.4004H17.4004V29.5996H14.6006V26.7998H11.8008V24H9V15.5996H11.8008V12.7998H14.6006V10H20.2002V12.7998Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 272 B

After

Width:  |  Height:  |  Size: 413 B

View File

@@ -1 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M38.666 5v34.667h.001V5H43v39H4V5zm-26 30.334h4.333V31h-4.333zm17.333 0h4.334V31h-4.334zm-8.666-8.667h4.333v-4.333h-4.333zM12.666 18h4.333v-4.333h-4.333zm17.333 0h4.334v-4.333h-4.334z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48" fill="currentColor">
<path d="M38.666 5V39.667H38.667V5H43V44H4V5H38.666ZM12.666 35.334H16.999V31H12.666V35.334ZM29.999 35.334H34.333V31H29.999V35.334ZM21.333 26.667H25.666V22.334H21.333V26.667ZM12.666 18H16.999V13.667H12.666V18ZM29.999 18H34.333V13.667H29.999V18Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 284 B

After

Width:  |  Height:  |  Size: 361 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M7.667 45H3v-4.667h4.667zM17 45h-4.667v-4.667H17zm9.333 0h-4.666v-4.667h4.666zm9.334 0H31v-4.667h4.667zM45 45h-4.667v-4.667H45zM7.667 35.667H3V31h4.667zm37.333 0h-4.667V31H45zM7.667 26.333H3v-4.666h4.667zm37.333 0h-4.667V7.667H21.667V3H45zM7.667 17H3v-4.667h4.667zm0-9.333H3V3h4.667zm9.333 0h-4.667V3H17z"/></svg>

Before

Width:  |  Height:  |  Size: 405 B

View File

@@ -1 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M38 42H10v-4H6V10h4V6h28v4h4v28h-4zM18 32h12v-4H18zm-4-4h4v-4h-4zm16 0h4v-4h-4zm-14-8h4v-4h-4zm12 0h4v-4h-4z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48" fill="currentColor">
<path d="M38 42H10V38H6V10H10V6H38V10H42V38H38V42ZM18 32H30V28H18V32ZM14 28H18V24H14V28ZM30 28H34V24H30V28ZM16 20H20V16H16V20ZM28 20H32V16H28V20Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 209 B

After

Width:  |  Height:  |  Size: 263 B

View File

@@ -1 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M42 42H14v-4h24V14h4zM34 6v28H6V6zM18 18h-4v4h4v4h4v-4h4v-4h-4v-4h-4z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48" fill="currentColor">
<path d="M42 42H14V38H38V14H42V42ZM34 6V34H6V6H34ZM18 18H14V22H18V26H22V22H26V18H22V14H18V18Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 170 B

After

Width:  |  Height:  |  Size: 211 B

View File

@@ -1 +1,13 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M39.247 38H8.753v-4.148h30.494zM9.223 22.923H5v-4.308h4.223zm8.444 0h-4.223v-4.308h4.223zm16.889 0h-4.223v-4.308h4.223zm8.444 0h-4.223v-4.308H43zm-29.556-4.308H9.223v-4.307h4.221zm25.333 0h-4.221v-4.307h4.221zM9.223 14.308H5V10h4.223zm8.444 0h-4.223V10h4.223zm16.889 0h-4.223V10h4.223zm8.444 0h-4.223V10H43z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="38" height="27" viewBox="0 0 38 27" fill="currentColor">
<rect x="4.46155" y="4.15381" width="4.15385" height="4.15385"/>
<rect x="29.3846" y="4.15381" width="4.15385" height="4.15385"/>
<rect x="8.61539" width="4.15385" height="4.15385"/>
<rect x="33.5385" width="4.15385" height="4.15385"/>
<rect x="0.307678" width="4.15385" height="4.15385"/>
<rect x="25.2308" width="4.15385" height="4.15385"/>
<rect x="0.307678" y="8.30762" width="4.15385" height="4.15385"/>
<rect x="25.2308" y="8.30762" width="4.15385" height="4.15385"/>
<rect x="8.61539" y="8.30762" width="4.15385" height="4.15385"/>
<rect x="33.5385" y="8.30762" width="4.15385" height="4.15385"/>
<rect x="4" y="23" width="30" height="4"/>
</svg>

Before

Width:  |  Height:  |  Size: 408 B

After

Width:  |  Height:  |  Size: 801 B

View File

@@ -1 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M39.2 39.2H43V43h-3.8v-3.798h-3.8v-3.8h3.8zM27.8 8.8h3.8v22.802h-3.8V35.4H12.6V12.602h7.6V8.8h-7.6V5h15.2zm7.6 26.6h-3.8v-3.8h3.8zM12.6 12.6H8.8v7.6h3.8v11.402H8.8V27.8H5V12.6h3.8V8.8h3.8zm22.8 15.2h-3.8V12.6h3.8z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48" fill="currentColor">
<path d="M39.2002 39.2002H43V43H39.2002V39.2021H35.3994V35.4014H39.2002V39.2002ZM27.7998 8.80078H31.5996V31.6016H27.7998V35.4004H12.6006V12.6016H20.2002V8.80078H12.6006V5H27.7998V8.80078ZM35.4004 35.4004H31.6006V31.6006H35.4004V35.4004ZM12.5996 12.5996H8.7998V20.2002H12.5996V31.6016H8.7998V27.8008H5V12.5996H8.7998V8.80078H12.5996V12.5996ZM35.4004 27.8008H31.6006V12.5996H35.4004V27.8008Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 314 B

After

Width:  |  Height:  |  Size: 507 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor"><path d="M43 43H5V20.2h38zm-3.8-26.6H8.8v-3.8h30.4zm-3.8-7.6H12.6V5h22.8z"/></svg>

Before

Width:  |  Height:  |  Size: 165 B

Some files were not shown because too many files have changed in this diff Show More