Merge pull request 'chore(lib/select): move into subfolder with a test file' (#3175) from hsjobeki/clan-core:lib-cleanup into main

Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/3175
This commit is contained in:
hsjobeki
2025-04-04 12:32:32 +00:00
19 changed files with 558 additions and 54 deletions

View File

@@ -56,7 +56,9 @@ nav:
- Vars Backend: manual/vars-backend.md
- Facts Backend: manual/secrets.md
- Autoincludes: manual/adding-machines.md
- Inventory: manual/inventory.md
- Inventory:
- Inventory: manual/inventory.md
- Services: manual/distributed-services.md
- Secure Boot: manual/secure-boot.md
- Flake-parts: manual/flake-parts.md
- Authoring:

View File

@@ -0,0 +1,282 @@
# Services
First of all it might be needed to explain what we mean by the term *distributed service*
!!! Note
Currently there are two ways of using such services.
1. via `inventory.services` **Will be deprecated**
2. via `inventory.instances` **Will be the new `inventory.services` once everyone has migrated**
## What is considered a service?
A **distributed service** is a system where multiple machines work together to provide a certain functionality, abstracting complexity and allowing for declarative configuration and management.
A VPN service in a closed mesh network is a good example of a distributed service — each machine needs to know the addresses and cryptographic keys of the other machines in advance to establish secure, direct connections, enabling private and encrypted communication without relying on a central server.
The term **Multi-host-service-abstractions** was introduced previously in the [nixus repository](https://github.com/infinisil/nixus) and represents a similar concept.
## How to use such a Service in Clan?
In clan everyone can provide services via modules. Those modules must comply to a certain [specification](#service-module-specification), which is discussed later.
To use a service you need to create an instance of it via the `clan.inventory.instances` attribute:
The source of the module must be specified as a simple string.
```nix
{
inventory = {
instances = {
"my-vpn" = {
# service source
module.name = "zerotier";
# ...
};
};
};
}
```
After specifying the *service source* for an instance, the next step is to configure the service.
Services operate on a strict *role-based membership model*, meaning machines are added by assigning them specific *roles*.
The following example shows a *zerotier service* which consists of a `controller` and some `peer` machines.
```nix
{
inventory = {
instances = {
"my-vpn" = {
# service source
module.name = "zerotier";
roles.peer.machines = {
# Right side needs to be an attribute set. Its purpose will become clear later
"my-machine-name" = {};
};
roles.controller.machines = {
"some-server-name" = {};
};
};
};
};
}
```
The next step is optional for some services. It might be desired to pass some service specific settings.
Either to affect all machines of a given role, or to affect a very specific machine.
For example:
In ZeroTier, the `roles.peer.settings` could specify the allowed IP ranges.
The `roles.controller.settings` could define a how to manage dynamic IP assignments for devices that are not statically configured.
```nix
{
inventory = {
instances = {
"my-vpn" = {
# service source
module.name = "zerotier";
roles.peer.machines = {
# Right side needs to be an attribute set. Its purpose will become clear later
"my-machine-name" = {};
};
roles.peer.settings = {
# Allow all ranges
ipRanges = [ "all" ];
};
roles.controller.machines = {
"some-server-name" = {
settings = {
# Enable the dynamic IP controller feature on this machine only
dynamicIp.enable = true;
};
};
};
};
};
};
}
```
Following all the steps described will result in consistent machine configurations that can be *installed* or *updated* via the [Clan CLI](../reference/cli/index.md)
### Using `clan.modules` from other people (optional)
The following example shows how to use remote modules and configure them for use in your clan.
!!! Note
Typically you would just use the `import` builtin. But we wanted to provide a json-compatible interface to allow for external API integrations.
```nix title="flake.nix"
{
inputs = {
# ...
libstd.url = "github:libfoo/libfoo";
# ...
};
outputs =
inputs: flake-parts.lib.mkFlake { inherit inputs; } (
{
clan = {
inventory.instances = {
"my-foo" = {
# Imports clan.module."mod-A" from inputs.libstd
module.input = "libstd";
module.name = "mod-A";
};
};
};
}
);
}
```
## Service Module Specification
This section explains how to author a clan service module. As decided in [01-clan-service-modules](https://git.clan.lol/clan/clan-core/src/branch/main/decisions/01-ClanModules.md)
!!! Warning
The described modules are fundamentally different to the existing [clanModules](../clanmodules/index.md)
Most of the clanModules will be migrated into the described format. We actively seek for contributions here.
### Minimal module
!!! Tip
Unlike previously modules can now be inlined. There is no file-system structure needed anymore.
First of all we need to hook our module into the `inventory.modules` attribute. Make sure to choose a unique name so the module doesn't have a name collision with any of the core modules.
While not required we recommend to prefix your module attribute name.
If you export the module from your flake, other people will be able to import it and use it within their clan.
i.e. `@hsjobeki/customNetworking`
```nix title=flake.nix
# ...
outputs = inputs: flake-parts.lib.mkFlake { inherit inputs; } ({
imports = [ inputs.clan-core.flakeModules.default ];
# ...
clan = {
inventory = {
# We could also inline the complete module spec here
# For example
# {...}: { _class = "clan.service"; ... };
modules."@hsjobeki/customNetworking" = import ./service-modules/networking.nix;
};
# If needed: Exporting the module for other people
modules."@hsjobeki/customNetworking" = import ./service-modules/networking.nix;
};
})
```
The imported module file must fulfill at least the following requirements:
- Be an actual module. That means: Be either an attribute set; or a function that returns an attribute set.
- Required `_class = "clan.service"
- Required `manifest.name = "<name of the provided service>"`
```nix title="/service-modules/networking.nix"
{
_class = "clan.service";
manifest.name = "zerotier-networking";
# ...
}
```
### Adding functionality to the module
While the very minimal module is valid in itself it has no way of adding any machines to it, because it doesn't specify any roles.
The next logical step is to think about the interactions between the machines and define *roles* for them.
Here is a short guide with some conventions:
- [ ] If they all have the same relation to each other `peer` is commonly used. `peers` can often talk to each other directly.
- [ ] Often machines don't necessarily have direct relation to each other and there is one elevated machine in the middle classically know as `client-server`. `clients` are less likely to talk directly to each other than `peers`
- [ ] If your machines don't have any relation and/or interactions to each other you should reconsider if the desired functionality is really a multi-host service.
```nix title="/service-modules/networking.nix"
{
_class = "clan.service";
manifest.name = "zerotier-networking";
# Define what roles exist
roles.peer = {};
roles.controller = {};
# ...
}
```
Next we need to define the settings and the behavior of these distinct roles.
```nix title="/service-modules/networking.nix"
{
_class = "clan.service";
manifest.name = "zerotier-networking";
# Define what roles exist
roles.peer = {
interface = {
# These options can be set via 'roles.client.settings'
options.ipRanges = mkOption { type = listOf str; };
};
# Maps over all instances and produces one result per instance.
perInstance = { instanceName, settings, machine, roles, ... }: {
# Analog to 'perSystem' of flake-parts.
# For every instance of this service we will add a nixosModule to a client-machine
nixosModule = { config, ... }: {
# Interaction examples what you could do here:
# - Get some settings of this machine
# settings.ipRanges
#
# - Get all controller names:
# allControllerNames = lib.attrNames roles.controller.machines
#
# - Get all roles of the machine:
# machine.roles
#
# - Get the settings that where applied to a specific controller machine:
# roles.controller.machines.jon.settings
#
# Add one systemd service for every instance
systemd.services.zerotier-client-${instanceName} = {
# ... depend on the '.config' and 'perInstance arguments'
};
};
}
};
roles.controller = {
interface = {
# These options can be set via 'roles.server.settings'
options.dynamicIp.enable = mkOption { type = bool; };
};
perInstance = { ... }: {};
};
# Maps over all machines and produces one result per machine.
perMachine = { instances, machine, ... }: {
# Analog to 'perSystem' of flake-parts.
# For every machine of this service we will add exactly one nixosModule to a machine
nixosModule = { config, ... }: {
# Interaction examples what you could do here:
# - Get the name of this machine
# machine.name
#
# - Get all roles of this machine across all instances:
# machine.roles
#
# - Get the settings of a specific instance of a specific machine
# instances.foo.roles.peer.machines.jon.settings
#
# Globally enable something
networking.enable = true;
};
};
# ...
}
```

View File

@@ -72,6 +72,14 @@ in
'';
};
modules = lib.mkOption {
type = types.attrsOf types.raw;
default = { };
description = ''
An attribute set of exported modules.
'';
};
templates = lib.mkOption {
type = types.submodule { imports = [ ./templates/interface.nix ]; };
default = { };

View File

@@ -205,7 +205,7 @@ in
# TODO: unify this interface
# We should have only clan.modules. (consistent with clan.templates)
inherit (clan-core) clanModules clanLib;
modules = clan-core.clanModules;
modules = config.modules;
inherit inventoryFile;
inventoryValuesPrios =

View File

@@ -8,6 +8,7 @@
# flake.clan.{name} <- clanInternals.{name}
clan = [
"templates"
"modules"
];
# flake.{name} <- clan.{name}
topLevel = [

View File

@@ -29,6 +29,6 @@ lib.fix (clanLib: {
modules = clanLib.callLib ./inventory/frontmatter { };
values = import ./introspection { inherit lib; };
jsonschema = import ./jsonschema { inherit lib; };
select = import ./select.nix;
select = import select/default.nix;
facts = import ./facts.nix { inherit lib; };
})

View File

@@ -8,9 +8,10 @@ rec {
# TODO: automatically generate this from the directory conventions
imports = [
./build-clan/flake-module.nix
./introspection/flake-module.nix
./inventory/flake-module.nix
./jsonschema/flake-module.nix
./introspection/flake-module.nix
./select/flake-module.nix
];
flake.clanLib = import ./default.nix {
inherit lib inputs self;

View File

@@ -33,6 +33,9 @@ let
attrName: prioSet:
let
# Evaluate the submodule
# Remove once: https://github.com/NixOS/nixpkgs/pull/391544 lands
# This is currently a workaround to get the submodule options
# It also has a certain loss of information, on nested attrsOf, which is rare, but not ideal.
options = filteredSubOptions;
modules = (
[

View File

@@ -167,6 +167,7 @@ in
};
# TODO(@hsjobeki): Cover this edge case
# Blocked by: https://github.com/NixOS/nixpkgs/pull/390952 check back once that is merged
# test_freeform =
# let
# evaluated = (

View File

@@ -39,4 +39,45 @@
acc ++ tagMembers
) [ ] members.tags or [ ]);
};
/**
Checks whether a module has a specific class
# Arguments
- `module` The module to check.
# Returns
- `string` | null: The specified class, or null if the class is not set
# Throws
- If the module is not a valid module
- If the module has a type that is not supported
*/
getModuleClass =
module:
let
loadModuleForClassCheck =
m:
# Logic path adapted from nixpkgs/lib/modules.nix
if lib.isFunction m then
let
args = lib.functionArgs m;
in
m args
else if lib.isAttrs m then
# module doesn't have a _type attribute
if m._type or "module" == "module" then
m
# module has a _type set but it is not "module"
else if m._type == "if" || m._type == "override" then
throw "Module modifiers are not supported yet. Got: ${m._type}"
else
throw "Unsupported module type ${lib.typeOf m}"
else if lib.isList m then
throw "Invalid or unsupported module type ${lib.typeOf m}"
else
import m;
loaded = loadModuleForClassCheck module;
in
if loaded ? _class then loaded._class else null;
}

View File

@@ -63,6 +63,7 @@ let
resolvedModule =
resolvedModuleSet.${instance.module.name}
or (throw "flake doesn't provide clan-module with name ${instance.module.name}");
moduleClass = clanLib.inventory.getModuleClass resolvedModule;
# Every instance includes machines via roles
# :: { client :: ... }
@@ -86,13 +87,13 @@ let
machineName:
let
machineSettings = instance.roles.${roleName}.machines.${machineName}.settings or { };
# TODO: tag settings
# Wait for this feature until option introspection for 'settings' is done.
# This might get too complex to handle otherwise.
# settingsViaTags = lib.filterAttrs (
# tagName: _: machineHasTag machineName tagName
# ) instance.roles.${roleName}.tags;
in
# TODO: tag settings
# Wait for this feature until option introspection for 'settings' is done.
# This might get too complex to handle otherwise.
# settingsViaTags = lib.filterAttrs (
# tagName: _: machineHasTag machineName tagName
# ) instance.roles.${roleName}.tags;
{
# TODO: Do we want to wrap settings with
# setDefaultModuleLocation "inventory.instances.${instanceName}.roles.${roleName}.tags.${tagName}";
@@ -112,20 +113,29 @@ let
in
{
inherit (instance) module;
inherit resolvedModule instanceRoles;
inherit resolvedModule instanceRoles moduleClass;
}
) inventory.instances;
# TODO: Eagerly check the _class of the resolved module
importedModulesEvaluated = lib.mapAttrs (
_module_ident: instances:
let
matchedClass = "clan.service";
instance = (builtins.head instances).instance;
classCheckedModule =
if instance.moduleClass == matchedClass then
instance.resolvedModule
else
(throw ''Module '${instance.module.name}' is not a valid '${matchedClass}' module. Got module with class:${builtins.toJSON instance.moduleClass}'');
in
(lib.evalModules {
class = "clan.service";
class = matchedClass;
modules =
[
./service-module.nix
# Import the resolved module
(builtins.head instances).instance.resolvedModule
classCheckedModule
]
# Include all the instances that correlate to the resolved module
++ (builtins.map (v: {

View File

@@ -317,20 +317,24 @@ in
*/
v: instanceName: machineName:
(lib.evalModules {
specialArgs = {
inherit instanceName;
machine = {
name = machineName;
specialArgs =
let
roles = applySettings instanceName config.instances.${instanceName};
in
{
inherit instanceName roles;
machine = {
name = machineName;
roles = lib.attrNames (lib.filterAttrs (_n: v: v.machines ? ${machineName}) roles);
};
settings = (
makeExtensibleConfig evalMachineSettings {
inherit roleName instanceName machineName;
settings =
config.instances.${instanceName}.roles.${roleName}.machines.${machineName}.settings or { };
}
);
};
settings = (
makeExtensibleConfig evalMachineSettings {
inherit roleName instanceName machineName;
settings =
config.instances.${instanceName}.roles.${roleName}.machines.${machineName}.settings or { };
}
);
};
modules = [ v ];
}).config;
};

View File

@@ -22,6 +22,15 @@ let
}).config;
flakeInputsFixture = {
# Example upstream module
upstream.clan.modules = {
uzzi = {
_class = "clan.service";
manifest = {
name = "uzzi-from-upstream";
};
};
};
};
callInventoryAdapter =
@@ -32,6 +41,7 @@ let
};
in
{
resolve_module_spec = import ./import_module_spec.nix { inherit lib callInventoryAdapter; };
test_simple =
let
res = callInventoryAdapter {
@@ -57,6 +67,7 @@ in
# We might change the attribute name in the future
expr = res.importedModulesEvaluated ? "self-simple-module";
expected = true;
inherit res;
};
# A module can be imported multiple times

View File

@@ -0,0 +1,65 @@
{ callInventoryAdapter, ... }:
let
# Authored module
# A minimal module looks like this
# It isn't exactly doing anything but it's a valid module that produces an output
modules."A" = {
_class = "clan.service";
manifest = {
name = "network";
};
};
modules."B" =
{ ... }:
{
options.stuff = "legacy-clan-service";
};
machines = {
jon = { };
sara = { };
};
resolve =
spec:
callInventoryAdapter {
inherit modules machines;
instances."instance_foo" = {
module = spec;
};
};
in
{
test_import_local_module_by_name = {
expr = (resolve { name = "A"; }).importedModuleWithInstances.instance_foo.resolvedModule;
expected = {
_class = "clan.service";
manifest = {
name = "network";
};
};
};
test_import_remote_module_by_name = {
expr =
(resolve {
name = "uzzi";
input = "upstream";
}).importedModuleWithInstances.instance_foo.resolvedModule;
expected = {
_class = "clan.service";
manifest = {
name = "uzzi-from-upstream";
};
};
};
# Currently this should fail
# TODO: Can we implement a default wrapper to make migration easy?
test_import_local_legacy_module = {
expr = (resolve { name = "B"; }).allMachines;
expectedError = {
type = "ThrownError";
msg = "Module 'B' is not a valid 'clan.service' module.*";
};
};
}

View File

@@ -10,6 +10,7 @@ let
};
# Define two roles with unmergeable interfaces
# Both define some 'timeout' but with completely different types.
roles.controller = { };
roles.peer.interface =
{ lib, ... }:
{
@@ -23,6 +24,7 @@ let
instanceName,
settings,
machine,
roles,
...
}:
let
@@ -35,7 +37,12 @@ let
in
{
nixosModule = {
inherit instanceName settings machine;
inherit
instanceName
settings
machine
roles
;
# We are double vendoring the settings
# To test that we can do it indefinitely
@@ -64,6 +71,7 @@ let
roles.peer = {
settings.timeout = "foo-peer";
};
roles.controller.machines.jon = { };
};
instances."instance_bar" = {
module = {
@@ -73,6 +81,8 @@ let
settings.timeout = "bar-peer-jon";
};
};
# TODO: move this into a seperate test.
# Seperate out the check that this module is never imported
# import the module "B" (undefined)
# All machines have this instance
instances."instance_zaza" = {
@@ -108,17 +118,9 @@ in
# roles = peer
# machines = jon
settings = filterInternals res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.nixosModule.settings;
machine = mapInternalsRecursive res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.nixosModule.machine;
# hasRoleSettings =
# res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer ? settings;
# # settings are specific.
# # Below we access:
# # instance = instance_foo
# # roles = peer
# # machines = *
# specificRoleSettings = filterInternals res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.settings;
machine =
res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.nixosModule.machine;
roles = mapInternalsRecursive res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.nixosModule.roles;
};
expected = {
instanceName = "instance_foo";
@@ -127,21 +129,37 @@ in
};
machine = {
name = "jon";
roles = {
peer = {
machines = {
jon = {
settings = {
__functor = "__functor";
timeout = "foo-peer-jon";
};
roles = [
"controller"
"peer"
];
};
roles = {
controller = {
machines = {
jon = {
settings = {
__functor = "__functor";
};
};
settings = {
__functor = "__functor";
timeout = "foo-peer";
};
settings = {
__functor = "__functor";
};
};
peer = {
machines = {
jon = {
settings = {
__functor = "__functor";
timeout = "foo-peer-jon";
};
};
};
settings = {
__functor = "__functor";
timeout = "foo-peer";
};
};
};
};

View File

@@ -26,9 +26,11 @@ let
};
perMachine =
{ instances, ... }:
{ instances, machine, ... }:
{
nixosModule = instances;
nixosModule = {
inherit instances machine;
};
};
};
machines = {
@@ -71,9 +73,10 @@ in
# settings should evaluate
test_per_machine_receives_instance_settings = {
inherit res;
expr = {
hasMachineSettings =
res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.machines.jon
res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instances.instance_foo.roles.peer.machines.jon
? settings;
# settings are specific.
@@ -81,10 +84,10 @@ in
# instance = instance_foo
# roles = peer
# machines = jon
specificMachineSettings = filterInternals res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.machines.jon.settings;
specificMachineSettings = filterInternals res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instances.instance_foo.roles.peer.machines.jon.settings;
hasRoleSettings =
res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer
res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instances.instance_foo.roles.peer
? settings;
# settings are specific.
@@ -92,7 +95,7 @@ in
# instance = instance_foo
# roles = peer
# machines = *
specificRoleSettings = filterInternals res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instance_foo.roles.peer.settings;
specificRoleSettings = filterInternals res.importedModulesEvaluated.self-A.config.result.allMachines.jon.nixosModule.instances.instance_foo.roles.peer.settings;
};
expected = {
hasMachineSettings = true;

View File

@@ -0,0 +1,44 @@
{ self, inputs, ... }:
let
inputOverrides = builtins.concatStringsSep " " (
builtins.map (input: " --override-input ${input} ${inputs.${input}}") (builtins.attrNames inputs)
);
in
{
perSystem =
{
pkgs,
lib,
system,
...
}:
{
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests
legacyPackages.evalTests-select = import ./tests.nix {
inherit lib;
inherit (self) clanLib;
};
checks = {
lib-select-eval = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
export HOME="$(realpath .)"
export NIX_ABORT_ON_WARN=1
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
${inputOverrides} \
--flake ${
self.filter {
include = [
"flakeModules"
"lib"
"clanModules/flake-module.nix"
"clanModules/borgbackup"
];
}
}#legacyPackages.${system}.evalTests-select
touch $out
'';
};
};
}

10
lib/select/tests.nix Normal file
View File

@@ -0,0 +1,10 @@
{ clanLib, ... }:
let
inherit (clanLib) select;
in
{
test_simple_1 = {
expr = select "a" { a = 1; };
expected = 1;
};
}