Compare commits

..

1 Commits

Author SHA1 Message Date
pinpox
99270051cd Add prometheus role to monitoring 2025-08-15 11:22:15 +02:00
80 changed files with 3340 additions and 325 deletions

1
.gitignore vendored
View File

@@ -39,6 +39,7 @@ select
# Generated files
pkgs/clan-app/ui/api/API.json
pkgs/clan-app/ui/api/API.ts
pkgs/clan-app/ui/api/Inventory.ts
pkgs/clan-app/ui/api/modules_schemas.json
pkgs/clan-app/ui/api/schema.json
pkgs/clan-app/ui/.fonts

View File

@@ -139,6 +139,33 @@ in
nixosTests
// flakeOutputs
// {
# TODO: Automatically provide this check to downstream users to check their modules
clan-modules-json-compatible =
let
allSchemas = lib.mapAttrs (
_n: m:
let
schema =
(self.clanLib.evalService {
modules = [ m ];
prefix = [
"checks"
system
];
}).config.result.api.schema;
in
schema
) self.clan.modules;
in
pkgs.runCommand "combined-result"
{
schemaFile = builtins.toFile "schemas.json" (builtins.toJSON allSchemas);
}
''
mkdir -p $out
cat $schemaFile > $out/allSchemas.json
'';
clan-core-for-checks = pkgs.runCommand "clan-core-for-checks" { } ''
cp -r ${privateInputs.clan-core-for-checks} $out
chmod -R +w $out

View File

@@ -40,7 +40,7 @@
jobs=$(nproc)
# Spawning worker in pytest is relatively slow, so we limit the number of jobs to 13
# (current number of impure tests)
jobs="$((jobs > 6 ? 6 : jobs))"
jobs="$((jobs > 13 ? 13 : jobs))"
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -n $jobs -m impure ./clan_cli $@"

View File

@@ -241,7 +241,7 @@
target.shutdown()
except BrokenPipeError:
# qemu has already exited
target.connected = False
pass
# Create a new machine instance that boots from the installed system
installed_machine = create_test_machine(target, "${pkgs.qemu_test}", name="after_install")

View File

@@ -0,0 +1,4 @@
{
"publickey": "age1qm0p4vf9jvcnn43s6l4prk8zn6cx0ep9gzvevxecv729xz540v8qa742eg",
"type": "age"
}

View File

@@ -0,0 +1,62 @@
{ ... }:
let
error = builtins.throw ''
###############################################################################
# #
# Clan modules (clanModules) have been deprecated and removed in favor of #
# Clan services! #
# #
# Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services #
# for migration instructions. #
# #
###############################################################################
'';
modnames = [
"admin"
"borgbackup"
"borgbackup-static"
"deltachat"
"disk-id"
"dyndns"
"ergochat"
"garage"
"heisenbridge"
"iwd"
"localbackup"
"localsend"
"matrix-synapse"
"moonlight"
"mumble"
"nginx"
"packages"
"postgresql"
"root-password"
"single-disk"
"sshd"
"state-version"
"static-hosts"
"sunshine"
"syncthing"
"syncthing-static-peers"
"thelounge"
"trusted-nix-caches"
"user-password"
"vaultwarden"
"xfce"
"zerotier-static-peers"
"zt-tcp-relay"
];
in
{
flake.clanModules = builtins.listToAttrs (
map (name: {
inherit name;
value = error;
}) modnames
);
}

View File

@@ -0,0 +1,55 @@
# We don't have a way of specifying dependencies between clanServices for now.
# When it get's added this file should be removed and the users module used instead.
{
roles.default.perInstance =
{ ... }:
{
nixosModule =
{
config,
pkgs,
...
}:
{
users.mutableUsers = false;
users.users.root.hashedPasswordFile =
config.clan.core.vars.generators.root-password.files.password-hash.path;
clan.core.vars.generators.root-password = {
files.password-hash.neededFor = "users";
files.password.deploy = false;
runtimeInputs = [
pkgs.coreutils
pkgs.mkpasswd
pkgs.xkcdpass
];
prompts.password.display = {
group = "Root User";
label = "Password";
required = false;
helperText = ''
Your password will be encrypted and stored securely using the secret store you've configured.
'';
};
prompts.password.type = "hidden";
prompts.password.persist = true;
prompts.password.description = "Leave empty to generate automatically";
script = ''
prompt_value="$(cat "$prompts"/password)"
if [[ -n "''${prompt_value-}" ]]; then
echo "$prompt_value" | tr -d "\n" > "$out"/password
else
xkcdpass --numwords 5 --delimiter - --count 1 | tr -d "\n" > "$out"/password
fi
mkpasswd -s -m sha-512 < "$out"/password | tr -d "\n" > "$out"/password-hash
'';
};
};
};
}

View File

@@ -18,11 +18,4 @@
imports = map (name: ./. + "/${name}/flake-module.nix") validModuleDirs;
in
imports;
flake.clanModules = builtins.throw ''
clanModules have been removed!
Refer to https://docs.clan.lol/guides/migrations/migrate-inventory-services for migration.
'';
}

View File

@@ -24,5 +24,12 @@
};
};
imports = [ ./telegraf.nix ];
# roles.prometheus = {
# interface = { lib, ... }: { };
# };
imports = [
./telegraf.nix
./prometheus.nix
];
}

View File

@@ -0,0 +1,182 @@
{
roles.prometheus.perInstance =
{ settings, roles, ... }:
{
nixosModule =
{ pkgs, lib, ... }:
{
# imports = [
# # ./matrix-alertmanager.nix
# # ./irc-alertmanager.nix
# # ./rules.nix
# ];
services.prometheus = {
# webExternalUrl = "https://prometheus.thalheim.io";
extraFlags = [ "--storage.tsdb.retention.time=30d" ];
scrapeConfigs = [
{
job_name = "telegraf";
scrape_interval = "60s";
metrics_path = "/metrics";
static_configs = [
(map (host: {
labels.host = host;
# labels.org = "TODO";
targets = [ "${host}.clan:9273" ];
}) lib.attrNames roles.telegraf.machines)
# {
# # labels.host = "rauter.r:9273";
# # labels.org = "TODO";
# targets = map (host: "${host}.clan:9273") lib.attrNames roles.telegraf.machines;
# }
];
}
# {
# job_name = "gitea";
# scrape_interval = "60s";
# metrics_path = "/metrics";
#
# scheme = "https";
# static_configs = [ { targets = [ "git.thalheim.io:443" ]; } ];
# }
];
alertmanagers = [ { static_configs = [ { targets = [ "localhost:9093" ]; } ]; } ];
};
services.prometheus.alertmanager = {
enable = true;
# environmentFile = config.sops.secrets.alertmanager.path;
# webExternalUrl = "https://alertmanager.thalheim.io";
# listenAddress = "[::1]";
# configuration = {
# global = {
# # The smarthost and SMTP sender used for mail notifications.
# smtp_smarthost = "mail.thalheim.io:587";
# smtp_from = "alertmanager@thalheim.io";
# smtp_auth_username = "alertmanager@thalheim.io";
# smtp_auth_password = "$SMTP_PASSWORD";
# };
# route = {
# receiver = "default";
# routes = [
# {
# group_by = [ "host" ];
# match_re.org = "krebs";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "krebs";
# }
# {
# group_by = [ "host" ];
# match_re.org = "nixos-wiki";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "nixos-wiki";
# }
# {
# group_by = [ "host" ];
# match_re.org = "numtide";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "numtide";
# }
# {
# group_by = [ "host" ];
# match_re.org = "clan-lol";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "clan-lol";
# }
# {
# group_by = [ "host" ];
# match_re.org = "dave";
# group_wait = "5m";
# group_interval = "5m";
# repeat_interval = "4h";
# receiver = "dave";
# }
# {
# group_by = [ "host" ];
# group_wait = "30s";
# group_interval = "2m";
# repeat_interval = "2h";
# receiver = "all";
# }
# ];
# };
# receivers = [
# {
# name = "krebs";
# webhook_configs = [
# {
# url = "http://127.0.0.1:9223/";
# max_alerts = 5;
# }
# ];
# }
# {
# name = "numtide";
# webhook_configs = [
# # TODO
# #{
# # send_resolved = true;
# # url = "https://chat.ntd.one/plugins/alertmanager/api/webhook?token='xxxxxxxxxxxxxxxxxxx-yyyyyyy'";
# #}
# ];
# }
# {
# name = "nixos-wiki";
# webhook_configs = [
# {
# url = "http://localhost:9088/alert";
# max_alerts = 5;
# }
# ];
# }
# {
# name = "clan-lol";
# webhook_configs = [
# # TODO
# #{
# # url = "http://localhost:4050/services/hooks/YWxlcnRtYW5hZ2VyX3NlcnZpY2U";
# # max_alerts = 5;
# #}
# ];
# }
# {
# name = "dave";
# telegram_configs = [
# {
# chat_id = 42927997;
# bot_token = "$TELEGRAM_BOT_TOKEN";
# }
# ];
# }
# {
# name = "all";
# # pushover_configs = [
# # {
# # user_key = "$PUSHOVER_USER_KEY";
# # token = "$PUSHOVER_TOKEN";
# # priority = "0";
# # }
# # ];
# }
# { name = "default"; }
# ];
# };
};
};
};
}

View File

@@ -4,12 +4,7 @@
{
nixosModule =
{
config,
pkgs,
lib,
...
}:
{ pkgs, lib, ... }:
{
networking.firewall.interfaces = lib.mkIf (settings.allowAllInterfaces == false) (
@@ -23,29 +18,8 @@
networking.firewall.allowedTCPPorts = lib.mkIf (settings.allowAllInterfaces == true) [ 9273 ];
clan.core.vars.generators."telegraf-password" = {
files.telegraf-password.neededFor = "users";
files.telegraf-password.restartUnits = [ "telegraf.service" ];
runtimeInputs = [
pkgs.coreutils
pkgs.xkcdpass
pkgs.mkpasswd
];
script = ''
PASSWORD=$(xkcdpass --numwords 4 --delimiter - --count 1 | tr -d "\n")
echo "BASIC_AUTH_PWD=$PASSWORD" > "$out"/telegraf-password
'';
};
services.telegraf = {
enable = true;
environmentFiles = [
(builtins.toString
config.clan.core.vars.generators."telegraf-password".files.telegraf-password.path
)
];
extraConfig = {
agent.interval = "60s";
inputs = {
@@ -75,8 +49,6 @@
outputs.prometheus_client = {
listen = ":9273";
metric_version = 2;
basic_username = "prometheus";
basic_password = "$${BASIC_AUTH_PWD}";
};
};
};

6
devFlake/flake.lock generated
View File

@@ -84,11 +84,11 @@
},
"nixpkgs-dev": {
"locked": {
"lastModified": 1755375481,
"narHash": "sha256-43PgCQFgFD1nM/7dncytV0c5heNHe/gXrEud18ZWcZU=",
"lastModified": 1755166611,
"narHash": "sha256-sk8pK8kWz4IE4ErAjKE1d8tMChY6VQR32U4yS68FIog=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "35f1742e4f1470817ff8203185e2ce0359947f12",
"rev": "1a341e3c908f4a3105e737bd13af0318dc06fbe3",
"type": "github"
},
"original": {

View File

@@ -40,7 +40,6 @@ writeShellScriptBin "deploy-docs" ''
rsync \
--checksum \
--delete \
-e "ssh -o StrictHostKeyChecking=no $sshExtraArgs" \
-a ${docs}/ \
www@clan.lol:/var/www/docs.clan.lol

View File

@@ -18,8 +18,27 @@
inherit (self) clanModules;
clan-core = self;
inherit pkgs;
evalClanModules = self.clanLib.evalClan.evalClanModules;
modulesRolesOptions = self.clanLib.evalClan.evalClanModulesWithRoles {
allModules = self.clanModules;
inherit pkgs;
clan-core = self;
};
};
# Frontmatter for clanModules
clanModulesFrontmatter =
let
docs = pkgs.nixosOptionsDoc {
options = self.clanLib.modules.frontmatterOptions;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
};
in
docs.optionsJSON;
# Options available when imported via ` inventory.${moduleName}....${rolesName} `
clanModulesViaRoles = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaRoles);
# clan service options
clanModulesViaService = pkgs.writeText "info.json" (builtins.toJSON jsonDocs.clanModulesViaService);
@@ -69,10 +88,12 @@
}
}
export CLAN_CORE_DOCS=${jsonDocs.clanCore}/share/doc/nixos/options.json
# A file that contains the links to all clanModule docs
export CLAN_MODULES_VIA_ROLES=${clanModulesViaRoles}
export CLAN_MODULES_VIA_SERVICE=${clanModulesViaService}
export CLAN_SERVICE_INTERFACE=${self'.legacyPackages.clan-service-module-interface}/share/doc/nixos/options.json
# Frontmatter format for clanModules
export CLAN_MODULES_FRONTMATTER_DOCS=${clanModulesFrontmatter}/share/doc/nixos/options.json
export BUILD_CLAN_PATH=${buildClanOptions}/share/doc/nixos/options.json
@@ -86,6 +107,7 @@
legacyPackages = {
inherit
jsonDocs
clanModulesViaRoles
clanModulesViaService
;
};

View File

@@ -1,5 +1,7 @@
{
modulesRolesOptions,
nixosOptionsDoc,
evalClanModules,
lib,
pkgs,
clan-core,
@@ -8,36 +10,21 @@
let
inherit (clan-core.clanLib.docs) stripStorePathsFromDeclarations;
transformOptions = stripStorePathsFromDeclarations;
nixosConfigurationWithClan =
let
evaled = lib.evalModules {
class = "nixos";
modules = [
# Basemodule
(
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
}
)
{
clan.core.settings.directory = clan-core;
}
clan-core.nixosModules.clanCore
];
};
in
evaled;
in
{
clanModulesViaRoles = lib.mapAttrs (
_moduleName: rolesOptions:
lib.mapAttrs (
_roleName: options:
(nixosOptionsDoc {
inherit options;
warningsAreErrors = true;
inherit transformOptions;
}).optionsJSON
) rolesOptions
) modulesRolesOptions;
# Test with:
# nix build .\#legacyPackages.x86_64-linux.clanModulesViaService
clanModulesViaService = lib.mapAttrs (
@@ -51,6 +38,7 @@ in
{
roles = lib.mapAttrs (
_roleName: role:
(nixosOptionsDoc {
transformOptions =
opt:
@@ -66,13 +54,20 @@ in
warningsAreErrors = true;
}).optionsJSON
) evaluatedService.config.roles;
manifest = evaluatedService.config.manifest;
}
) clan-core.clan.modules;
clanCore =
(nixosOptionsDoc {
options = nixosConfigurationWithClan.options.clan.core;
options =
((evalClanModules {
modules = [ ];
inherit pkgs clan-core;
}).options
).clan.core or { };
warningsAreErrors = true;
inherit transformOptions;
}).optionsJSON;

View File

@@ -33,13 +33,22 @@ from clan_lib.errors import ClanError
from clan_lib.services.modules import (
CategoryInfo,
Frontmatter,
extract_frontmatter,
get_roles,
)
# Get environment variables
CLAN_CORE_PATH = Path(os.environ["CLAN_CORE_PATH"])
CLAN_CORE_DOCS = Path(os.environ["CLAN_CORE_DOCS"])
CLAN_MODULES_FRONTMATTER_DOCS = os.environ.get("CLAN_MODULES_FRONTMATTER_DOCS")
BUILD_CLAN_PATH = os.environ.get("BUILD_CLAN_PATH")
## Clan modules ##
# Some modules can be imported via nix natively
CLAN_MODULES_VIA_NIX = os.environ.get("CLAN_MODULES_VIA_NIX")
# Some modules can be imported via inventory
CLAN_MODULES_VIA_ROLES = os.environ.get("CLAN_MODULES_VIA_ROLES")
# Options how to author clan.modules
# perInstance, perMachine, ...
CLAN_SERVICE_INTERFACE = os.environ.get("CLAN_SERVICE_INTERFACE")
@@ -181,6 +190,23 @@ def module_header(module_name: str, has_inventory_feature: bool = False) -> str:
return f"# {module_name}{indicator}\n\n"
def module_nix_usage(module_name: str) -> str:
return f"""## Usage via Nix
**This module can be also imported directly in your nixos configuration. Although it is recommended to use the [inventory](../../concepts/inventory.md) interface if available.**
Some modules are considered 'low-level' or 'expert modules' and are not available via the inventory interface.
```nix
{{config, lib, inputs, ...}}: {{
imports = [ inputs.clan-core.clanModules.{module_name} ];
# ...
}}
```
"""
clan_core_descr = """
`clan.core` is always present in a clan machine
@@ -197,6 +223,68 @@ The following options are available for this module.
"""
def produce_clan_modules_frontmatter_docs() -> None:
if not CLAN_MODULES_FRONTMATTER_DOCS:
msg = f"Environment variables are not set correctly: $CLAN_CORE_DOCS={CLAN_CORE_DOCS}"
raise ClanError(msg)
if not OUT:
msg = f"Environment variables are not set correctly: $out={OUT}"
raise ClanError(msg)
with Path(CLAN_MODULES_FRONTMATTER_DOCS).open() as f:
options: dict[str, dict[str, Any]] = json.load(f)
# header
output = """# Frontmatter
Every clan module has a `frontmatter` section within its readme. It provides
machine readable metadata about the module.
!!! example
The used format is `TOML`
The content is separated by `---` and the frontmatter must be placed at the very top of the `README.md` file.
```toml
---
description = "A description of the module"
categories = ["category1", "category2"]
[constraints]
roles.client.max = 10
roles.server.min = 1
---
# Readme content
...
```
"""
output += """## Overview
This provides an overview of the available attributes of the `frontmatter`
within the `README.md` of a clan module.
"""
# for option_name, info in options.items():
# if option_name == "_module.args":
# continue
# output += render_option(option_name, info)
root = options_to_tree(options, debug=True)
for option in root.suboptions:
output += options_docs_from_tree(option, init_level=2)
outfile = Path(OUT) / "clanModules/frontmatter/index.md"
outfile.parent.mkdir(
parents=True,
exist_ok=True,
)
with outfile.open("w") as of:
of.write(output)
def produce_clan_core_docs() -> None:
if not CLAN_CORE_DOCS:
msg = f"Environment variables are not set correctly: $CLAN_CORE_DOCS={CLAN_CORE_DOCS}"
@@ -417,6 +505,154 @@ Learn how to use `clanServices` in practice in the [Using clanServices guide](..
of.write(output)
def produce_clan_modules_docs() -> None:
if not CLAN_MODULES_VIA_NIX:
msg = f"Environment variables are not set correctly: $CLAN_MODULES_VIA_NIX={CLAN_MODULES_VIA_NIX}"
raise ClanError(msg)
if not CLAN_MODULES_VIA_ROLES:
msg = f"Environment variables are not set correctly: $CLAN_MODULES_VIA_ROLES={CLAN_MODULES_VIA_ROLES}"
raise ClanError(msg)
if not CLAN_CORE_PATH:
msg = f"Environment variables are not set correctly: $CLAN_CORE_PATH={CLAN_CORE_PATH}"
raise ClanError(msg)
if not OUT:
msg = f"Environment variables are not set correctly: $out={OUT}"
raise ClanError(msg)
modules_index = "# Modules Overview\n\n"
modules_index += clan_modules_descr
modules_index += "## Overview\n\n"
modules_index += '<div class="grid cards" markdown>\n\n'
with Path(CLAN_MODULES_VIA_ROLES).open() as f2:
role_links: dict[str, dict[str, str]] = json.load(f2)
with Path(CLAN_MODULES_VIA_NIX).open() as f:
links: dict[str, str] = json.load(f)
for module_name, options_file in links.items():
print(f"Rendering ClanModule: {module_name}")
readme_file = CLAN_CORE_PATH / "clanModules" / module_name / "README.md"
with readme_file.open() as f:
readme = f.read()
frontmatter: Frontmatter
frontmatter, readme_content = extract_frontmatter(readme, str(readme_file))
# skip if experimental feature enabled
if "experimental" in frontmatter.features:
print(f"Skipping {module_name}: Experimental feature")
continue
modules_index += build_option_card(module_name, frontmatter)
##### Print module documentation #####
# 1. Header
output = module_header(module_name, "inventory" in frontmatter.features)
# 2. Description from README.md
if frontmatter.description:
output += f"*{frontmatter.description}*\n\n"
# 2. Deprecation note if the module is deprecated
if "deprecated" in frontmatter.features:
output += f"""
!!! Warning "Deprecated"
The `{module_name}` module is deprecated.*
Use 'clanServices/{module_name}' or a similar successor instead
"""
else:
output += f"""
!!! Warning "Will be deprecated"
The `{module_name}` module might eventually be migrated to 'clanServices'*
See: [clanServices](../../guides/clanServices.md)
"""
# 3. Categories from README.md
output += "## Categories\n\n"
output += render_categories(frontmatter.categories, frontmatter.categories_info)
output += "\n---\n\n"
# 3. README.md content
output += f"{readme_content}\n"
# 4. Usage
##### Print usage via Inventory #####
# get_roles(str) -> list[str] | None
# if not isinstance(options_file, str):
roles = get_roles(CLAN_CORE_PATH / "clanModules" / module_name)
if roles:
# Render inventory usage
output += """## Usage via Inventory\n\n"""
output += render_roles(roles, module_name)
for role in roles:
role_options_file = role_links[module_name][role]
# Abort if the options file is not found
if not isinstance(role_options_file, str):
print(
f"Error: module: {module_name} in role: {role} - options file not found, Got {role_options_file}"
)
exit(1)
no_options = f"""### Options of `{role}` role
**The `{module_name}` `{role}` doesnt offer / require any options to be set.**
"""
heading = f"""### Options of `{role}` role
The following options are available when using the `{role}` role.
"""
output += print_options(
role_options_file,
heading,
no_options,
replace_prefix=f"clan.{module_name}",
)
else:
# No roles means no inventory usage
output += """## Usage via Inventory
**This module cannot be used via the inventory interface.**
"""
##### Print usage via Nix / nixos #####
if not isinstance(options_file, str):
print(
f"Skipping {module_name}: Cannot be used via import clanModules.{module_name}"
)
output += """## Usage via Nix
**This module cannot be imported directly in your nixos configuration.**
"""
else:
output += module_nix_usage(module_name)
no_options = "** This module doesnt require any options to be set.**"
output += print_options(options_file, options_head, no_options)
outfile = Path(OUT) / f"clanModules/{module_name}.md"
outfile.parent.mkdir(
parents=True,
exist_ok=True,
)
with outfile.open("w") as of:
of.write(output)
modules_index += "</div>"
modules_index += "\n"
modules_outfile = Path(OUT) / "clanModules/index.md"
with modules_outfile.open("w") as of:
of.write(modules_index)
def build_option_card(module_name: str, frontmatter: Frontmatter) -> str:
"""
Build the overview index card for each reference target option.
@@ -627,4 +863,8 @@ if __name__ == "__main__": #
produce_clan_core_docs()
produce_clan_service_author_docs()
# produce_clan_modules_docs()
produce_clan_service_docs()
# produce_clan_modules_frontmatter_docs()

View File

@@ -1,129 +1,110 @@
# :material-clock-fast: Getting Started
Ready to manage your fleet of machines?
Ready to create your own Clan and manage a fleet of machines? Follow these simple steps to get started.
We will create a declarative infrastructure using **clan**, **git**, and **nix flakes**.
This guide walks your through setting up your own declarative infrastructure using clan, git and flakes. By the end of this, you will have one or more machines integrated and installed. You can then import your existing NixOS configuration into this setup if you wish.
You'll finish with a centrally managed fleet, ready to import your existing NixOS configuration.
The following steps are meant to be executed on the machine on which to administer the infrastructure.
In order to get started you should have at least one machine with either physical or ssh access available as an installation target. Your local machine can also be used as an installation target if it is already running NixOS.
## Prerequisites
Make sure you have the following:
=== "**Linux**"
* 💻 **Administration Machine**: Run the setup commands from this machine.
* 🛠️ **Nix**: The Nix package manager, installed on your administration machine.
??? info "**How to install Nix (Linux / MacOS / NixOS)**"
**On Linux or macOS:**
1. Run the recommended installer:
```shellSession
curl --proto '=https' --tlsv1.2 -sSf -L [https://install.determinate.systems/nix](https://install.determinate.systems/nix) | sh -s -- install
```
2. After installation, ensure flakes are enabled by adding this line to `~/.config/nix/nix.conf`:
```
experimental-features = nix-command flakes
```
**On NixOS:**
Nix is already installed. You only need to enable flakes for your user in your `configuration.nix`:
```nix
{
nix.settings.experimental-features = [ "nix-command" "flakes" ];
}
```
Then, run `nixos-rebuild switch` to apply the changes.
* 🎯 **Target Machine(s)**: A remote machine with SSH, or your local machine (if NixOS).
## Create a New Clan
1. Navigate to your desired directory:
```shellSession
cd <your-directory>
```
2. Create a new clan flake:
**Note:** This creates a new directory in your current location
Clan requires Nix to be installed on your system. Run the following command to install Nix:
```shellSession
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
3. Enter a **name** in the prompt:
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
```terminalSession
Enter a name for the new clan: my-clan
=== "**NixOS**"
If you run NixOS the `nix` binary is already installed.
You will also need to enable the `nix-command` and `flakes` experimental features in your `configuration.nix`:
```nix
{ nix.settings.experimental-features = [ "nix-command" "flakes" ]; }
```
## Project Structure
=== "**macOS**"
Your new directory, `my-clan`, should contain the following structure:
Clan requires Nix to be installed on your system. Run the following command to install Nix:
```
my-clan/
├── clan.nix
├── flake.lock
├── flake.nix
├── modules/
└── sops/
```
```shellSession
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
!!! note "Templates"
This is the structure for the `default` template.
If you have previously installed Nix, make sure `experimental-features = nix-command flakes` is present in `~/.config/nix/nix.conf` or `/etc/nix/nix.conf`. If this is not the case, please add it to `~/.config/nix/nix.conf`.
Use `clan templates list` and `clan templates --help` for available templates & more. Keep in mind that the exact files may change as templates evolve.
## Create a new clan
## Activate the Environment
To get started, `cd` into your new project directory.
Initialize a new clan flake
```shellSession
cd my-clan
nix run https://git.clan.lol/clan/clan-core/archive/main.tar.gz#clan-cli --refresh -- flakes create
```
Now, activate the environment using one of the following methods.
This should prompt for a *name*:
```terminalSession
Enter a name for the new clan: my-clan
```
Enter a *name*, confirm with *enter*. A directory with that name will be created and initialized.
!!! Note
This command uses the `default` template
See `clan templates list` and the `--help` reference for how to use other templates.
## Explore the Project Structure
Take a look at all project files:
For example, you might see something like:
```{ .console .no-copy }
$ cd my-clan
$ ls
clan.nix flake.lock flake.nix modules sops
```
Dont worry if your output looks different — Clan templates evolve over time.
To interact with your newly created clan the you need to load the `clan` cli-package it into your environment by running:
=== "Automatic (direnv, recommended)"
**Prerequisite**: You must have [nix-direnv](https://github.com/nix-community/nix-direnv) installed.
- prerequisite: [install nix-direnv](https://github.com/nix-community/nix-direnv)
Run `direnv allow` to automatically load the environment whenever you enter this directory.
```shellSession
direnv allow
```
=== "Manual (nix develop)"
Run nix develop to load the environment for your current shell session.
```shellSession
nix develop
```
## Verify the Setup
Once your environment is active, verify that the clan command is available by running:
verify that you can run `clan` commands:
```shellSession
clan show
```
You should see the default metadata for your new clan:
You should see something like this:
```shellSession
Name: __CHANGE_ME__
Description: None
```
This confirms your setup is working correctly.
You can now change the default name by editing the `meta.name` field in your `clan.nix` file.
To change the name of your clan edit `meta.name` in the `clan.nix` or `flake.nix` file
```{.nix title="clan.nix" hl_lines="3"}
{

12
flake.lock generated
View File

@@ -71,11 +71,11 @@
]
},
"locked": {
"lastModified": 1755275010,
"narHash": "sha256-lEApCoWUEWh0Ifc3k1JdVjpMtFFXeL2gG1qvBnoRc2I=",
"lastModified": 1751313918,
"narHash": "sha256-HsJM3XLa43WpG+665aGEh8iS8AfEwOIQWk3Mke3e7nk=",
"owner": "nix-darwin",
"repo": "nix-darwin",
"rev": "7220b01d679e93ede8d7b25d6f392855b81dd475",
"rev": "e04a388232d9a6ba56967ce5b53a8a6f713cdfcf",
"type": "github"
},
"original": {
@@ -115,10 +115,10 @@
"nixpkgs": {
"locked": {
"lastModified": 315532800,
"narHash": "sha256-moy1MfcGj+Pd+lU3PHYQUJq9OP0Evv9me8MjtmHlnRM=",
"rev": "32f313e49e42f715491e1ea7b306a87c16fe0388",
"narHash": "sha256-2ILJtWugqmMyZnaWnHh+5yyw8RZWbKu9rVdeWmrBVhY=",
"rev": "a595dde4d0d31606e19dcec73db02279db59d201",
"type": "tarball",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre844992.32f313e49e42/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre844295.a595dde4d0d3/nixexprs.tar.xz"
},
"original": {
"type": "tarball",

View File

@@ -67,6 +67,7 @@
clan = {
meta.name = "clan-core";
inventory = {
services = { };
machines = {
"test-darwin-machine" = {
machineClass = "darwin";

View File

@@ -46,6 +46,8 @@
"checks/lib/ssh/privkey"
"checks/lib/ssh/pubkey"
"checks/matrix-synapse/synapse-registration_shared_secret"
"checks/mumble/machines/peer1/facts/mumble-cert"
"checks/mumble/machines/peer2/facts/mumble-cert"
"checks/secrets/clan-secrets"
"checks/secrets/sops/groups/group/machines/machine"
"checks/syncthing/introducer/introducer_device_id"

View File

@@ -33,6 +33,7 @@ lib.fix (
evalService = clanLib.callLib ./modules/inventory/distributed-service/evalService.nix { };
# ------------------------------------
# ClanLib functions
evalClan = clanLib.callLib ./modules/inventory/eval-clan-modules { };
inventory = clanLib.callLib ./modules/inventory { };
modules = clanLib.callLib ./modules/inventory/frontmatter { };
test = clanLib.callLib ./test { };

View File

@@ -0,0 +1,108 @@
{
lib,
clanLib,
}:
let
baseModule =
{ pkgs }:
# Module
{ config, ... }:
{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix"));
nixpkgs.pkgs = pkgs;
clan.core.name = "dummy";
system.stateVersion = config.system.nixos.release;
# Set this to work around a bug where `clan.core.settings.machine.name`
# is forced due to `networking.interfaces` being forced
# somewhere in the nixpkgs options
facter.detected.dhcp.enable = lib.mkForce false;
};
# This function takes a list of module names and evaluates them
# [ module ] -> { config, options, ... }
evalClanModulesLegacy =
{
modules,
pkgs,
clan-core,
}:
let
evaled = lib.evalModules {
class = "nixos";
modules = [
(baseModule { inherit pkgs; })
{
clan.core.settings.directory = clan-core;
}
clan-core.nixosModules.clanCore
]
++ modules;
};
in
# lib.warn ''
# doesn't respect role specific interfaces.
# The following {module}/default.nix file trying to be imported.
# Modules: ${builtins.toJSON modulenames}
# This might result in incomplete or incorrect interfaces.
# FIX: Use evalClanModuleWithRole instead.
# ''
evaled;
/*
This function takes a list of module names and evaluates them
Returns a set of interfaces as described below:
Fn :: { ${moduleName} = Module; } -> {
${moduleName} :: {
${roleName}: JSONSchema
}
}
*/
evalClanModulesWithRoles =
{
allModules,
clan-core,
pkgs,
}:
let
res = builtins.mapAttrs (
moduleName: module:
let
frontmatter = clanLib.modules.getFrontmatter allModules.${moduleName} moduleName;
roles =
if builtins.elem "inventory" frontmatter.features or [ ] then
assert lib.isPath module;
clan-core.clanLib.modules.getRoles "Documentation: inventory.modules" allModules moduleName
else
[ ];
in
lib.listToAttrs (
lib.map (role: {
name = role;
value =
(lib.evalModules {
class = "nixos";
modules = [
(baseModule { inherit pkgs; })
clan-core.nixosModules.clanCore
{
clan.core.settings.directory = clan-core;
}
# Role interface
(module + "/roles/${role}.nix")
];
}).options.clan.${moduleName} or { };
}) roles
)
) allModules;
in
res;
in
{
evalClanModules = evalClanModulesLegacy;
inherit evalClanModulesWithRoles;
}

View File

@@ -1,8 +1,12 @@
{
self,
inputs,
options,
...
}:
let
inputOverrides = self.clanLib.flake-inputs.getOverrides inputs;
in
{
imports = [
./distributed-service/flake-module.nix
@@ -11,13 +15,16 @@
{
pkgs,
lib,
config,
system,
self',
...
}:
{
devShells.inventory-schema = pkgs.mkShell {
name = "clan-inventory-schema";
inputsFrom = [
inputsFrom = with config.checks; [
eval-lib-inventory
self'.devShells.default
];
};
@@ -44,5 +51,41 @@
warningsAreErrors = true;
transformOptions = self.clanLib.docs.stripStorePathsFromDeclarations;
}).optionsJSON;
# Run: nix-unit --extra-experimental-features flakes --flake .#legacyPackages.x86_64-linux.evalTests
legacyPackages.evalTests-inventory = import ./tests {
inherit lib;
clan-core = self;
inherit (self) clanLib;
inherit (self.inputs) nix-darwin;
};
checks = {
eval-lib-inventory = pkgs.runCommand "tests" { nativeBuildInputs = [ pkgs.nix-unit ]; } ''
export HOME="$(realpath .)"
export NIX_ABORT_ON_WARN=1
nix-unit --eval-store "$HOME" \
--extra-experimental-features flakes \
--show-trace \
${inputOverrides} \
--flake ${
lib.fileset.toSource {
root = ../../..;
fileset = lib.fileset.unions [
../../../flake.nix
../../../flake.lock
(lib.fileset.fileFilter (file: file.name == "flake-module.nix") ../../..)
../../../flakeModules
../../../lib
../../../nixosModules/clanCore
../../../machines
../../../inventory.json
];
}
}#legacyPackages.${system}.evalTests-inventory
touch $out
'';
};
};
}

View File

@@ -3,6 +3,51 @@ let
# Trim the .nix extension from a filename
trimExtension = name: builtins.substring 0 (builtins.stringLength name - 4) name;
jsonWithoutHeader = clanLib.jsonschema {
includeDefaults = true;
header = { };
};
getModulesSchema =
{
modules,
clan-core,
pkgs,
}:
lib.mapAttrs
(
_moduleName: rolesOptions:
lib.mapAttrs (_roleName: options: jsonWithoutHeader.parseOptions options { }) rolesOptions
)
(
clanLib.evalClan.evalClanModulesWithRoles {
allModules = modules;
inherit pkgs clan-core;
}
);
evalFrontmatter =
{
moduleName,
instanceName,
resolvedRoles,
allModules,
}:
lib.evalModules {
modules = [
(getFrontmatter allModules.${moduleName} moduleName)
./interface.nix
{
constraints.imports = [
(lib.modules.importApply ../constraints {
inherit moduleName resolvedRoles instanceName;
allRoles = getRoles "inventory.modules" allModules moduleName;
})
];
}
];
};
# For Documentation purposes only
frontmatterOptions =
(lib.evalModules {
@@ -74,12 +119,17 @@ let
builtins.readDir (checkedPath)
)
);
checkConstraints = args: (evalFrontmatter args).config.constraints.assertions;
getFrontmatter = _modulepath: _modulename: "clanModules are removed!";
in
{
inherit
frontmatterOptions
getModulesSchema
getFrontmatter
checkConstraints
getRoles
;
}

View File

@@ -1,14 +1,29 @@
{
self,
self',
lib,
pkgs,
flakeOptions,
...
}:
let
modulesSchema = self.clanLib.modules.getModulesSchema {
modules = self.clanModules;
inherit pkgs;
clan-core = self;
};
jsonLib = self.clanLib.jsonschema { inherit includeDefaults; };
includeDefaults = true;
frontMatterSchema = jsonLib.parseOptions self.clanLib.modules.frontmatterOptions { };
inventorySchema = jsonLib.parseModule ({
imports = [ ../../inventoryClass/interface.nix ];
_module.args = { inherit (self) clanLib; };
});
opts = (flakeOptions.flake.type.getSubOptions [ "flake" ]);
clanOpts = opts.clan.type.getSubOptions [ "clan" ];
include = [
@@ -23,6 +38,13 @@ let
];
clanSchema = jsonLib.parseOptions (lib.filterAttrs (n: _v: lib.elem n include) clanOpts) { };
renderSchema = pkgs.writers.writePython3Bin "render-schema" {
flakeIgnore = [
"F401"
"E501"
];
} ./render_schema.py;
clan-schema-abstract = pkgs.stdenv.mkDerivation {
name = "clan-schema-files";
buildInputs = [ pkgs.cue ];
@@ -41,7 +63,29 @@ in
{
inherit
flakeOptions
frontMatterSchema
clanSchema
inventorySchema
modulesSchema
renderSchema
clan-schema-abstract
;
# Inventory schema, with the modules schema added per role
inventory =
pkgs.runCommand "rendered"
{
buildInputs = [
pkgs.python3
self'.packages.clan-cli
];
}
''
export INVENTORY_SCHEMA_PATH=${builtins.toFile "inventory-schema.json" (builtins.toJSON inventorySchema)}
export MODULES_SCHEMA_PATH=${builtins.toFile "modules-schema.json" (builtins.toJSON modulesSchema)}
mkdir $out
# The python script will place the schemas in the output directory
exec python3 ${renderSchema}/bin/render-schema
'';
}

View File

@@ -0,0 +1,162 @@
"""
Python script to join the abstract inventory schema, with the concrete clan modules
Inventory has slots which are 'Any' type.
We dont want to evaluate the clanModules interface in nix, when evaluating the inventory
"""
import json
import os
from pathlib import Path
from typing import Any
from clan_lib.errors import ClanError
# Get environment variables
INVENTORY_SCHEMA_PATH = Path(os.environ["INVENTORY_SCHEMA_PATH"])
# { [moduleName] :: { [roleName] :: SCHEMA }}
MODULES_SCHEMA_PATH = Path(os.environ["MODULES_SCHEMA_PATH"])
OUT = os.environ.get("out")
if not INVENTORY_SCHEMA_PATH:
msg = f"Environment variables are not set correctly: INVENTORY_SCHEMA_PATH={INVENTORY_SCHEMA_PATH}."
raise ClanError(msg)
if not MODULES_SCHEMA_PATH:
msg = f"Environment variables are not set correctly: MODULES_SCHEMA_PATH={MODULES_SCHEMA_PATH}."
raise ClanError(msg)
if not OUT:
msg = f"Environment variables are not set correctly: OUT={OUT}."
raise ClanError(msg)
def service_roles_to_schema(
schema: dict[str, Any],
service_name: str,
roles: list[str],
roles_schemas: dict[str, dict[str, Any]],
# Original service properties: {'config': Schema, 'machines': Schema, 'meta': Schema, 'extraModules': Schema, ...?}
orig: dict[str, Any],
) -> dict[str, Any]:
"""
Add roles to the service schema
"""
# collect all the roles for the service, to form a type union
all_roles_schema: list[dict[str, Any]] = []
for role_name, role_schema in roles_schemas.items():
role_schema["title"] = f"{module_name}-config-role-{role_name}"
all_roles_schema.append(role_schema)
role_schema = {}
for role in roles:
role_schema[role] = {
"type": "object",
"additionalProperties": False,
"properties": {
**orig["roles"]["additionalProperties"]["properties"],
"config": {
**roles_schemas.get(role, {}),
"title": f"{service_name}-config-role-{role}",
"type": "object",
"default": {},
"additionalProperties": False,
},
},
}
machines_schema = {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
**orig["machines"]["additionalProperties"]["properties"],
"config": {
"title": f"{service_name}-config",
"oneOf": all_roles_schema,
"type": "object",
"default": {},
"additionalProperties": False,
},
},
},
}
services["properties"][service_name] = {
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": False,
"properties": {
# Original inventory schema
**orig,
# Inject the roles schemas
"roles": {
"title": f"{service_name}-roles",
"type": "object",
"properties": role_schema,
"additionalProperties": False,
},
"machines": machines_schema,
"config": {
"title": f"{service_name}-config",
"oneOf": all_roles_schema,
"type": "object",
"default": {},
"additionalProperties": False,
},
},
},
}
return schema
if __name__ == "__main__":
print("Joining inventory schema with modules schema")
print(f"Inventory schema path: {INVENTORY_SCHEMA_PATH}")
print(f"Modules schema path: {MODULES_SCHEMA_PATH}")
modules_schema = {}
with Path.open(MODULES_SCHEMA_PATH) as f:
modules_schema = json.load(f)
inventory_schema = {}
with Path.open(INVENTORY_SCHEMA_PATH) as f:
inventory_schema = json.load(f)
services = inventory_schema["properties"]["services"]
original_service_props = services["additionalProperties"]["additionalProperties"][
"properties"
].copy()
# Init the outer services schema
# Properties (service names) will be filled in the next step
services = {
"type": "object",
"properties": {
# Service names
},
"additionalProperties": False,
}
for module_name, roles_schemas in modules_schema.items():
# Add the roles schemas to the service schema
roles = list(roles_schemas.keys())
if roles:
services = service_roles_to_schema(
services,
module_name,
roles,
roles_schemas,
original_service_props,
)
inventory_schema["properties"]["services"] = services
outpath = Path(OUT)
with (outpath / "schema.json").open("w") as f:
json.dump(inventory_schema, f, indent=2)
with (outpath / "modules_schemas.json").open("w") as f:
json.dump(modules_schema, f, indent=2)

View File

@@ -0,0 +1,90 @@
{
clan-core,
nix-darwin,
lib,
clanLib,
}:
let
# TODO: Unify these tests with clan tests
clan =
m:
lib.evalModules {
specialArgs = { inherit clan-core nix-darwin clanLib; };
modules = [
clan-core.modules.clan.default
{
self = { };
}
m
];
};
in
{
test_inventory_a =
let
eval = clan {
inventory = {
machines = {
A = { };
};
services = {
legacyModule = { };
};
modules = {
legacyModule = ./legacyModule;
};
};
directory = ./.;
};
in
{
inherit eval;
expr = {
legacyModule = lib.filterAttrs (
name: _: name == "isClanModule"
) eval.config.clanInternals.inventoryClass.machines.A.compiledServices.legacyModule;
};
expected = {
legacyModule = {
};
};
};
test_inventory_empty =
let
eval = clan {
inventory = { };
directory = ./.;
};
in
{
# Empty inventory should return an empty module
expr = eval.config.clanInternals.inventoryClass.machines;
expected = { };
};
test_inventory_module_doesnt_exist =
let
eval = clan {
directory = ./.;
inventory = {
services = {
fanatasy.instance_1 = {
roles.default.machines = [ "machine_1" ];
};
};
machines = {
"machine_1" = { };
};
};
};
in
{
inherit eval;
expr = eval.config.clanInternals.inventoryClass.machines.machine_1.machineImports;
expectedError = {
type = "ThrownError";
msg = "ClanModule not found*";
};
};
}

View File

@@ -0,0 +1,4 @@
---
features = [ "inventory" ]
---
Description

View File

@@ -0,0 +1,9 @@
{
lib,
clan-core,
...
}:
{
# Just some random stuff
options.test = lib.mapAttrs clan-core;
}

View File

@@ -0,0 +1,78 @@
# Integrity validation of the inventory
{ config, lib, ... }:
{
# Assertion must be of type
# { assertion :: bool, message :: string, severity :: "error" | "warning" }
imports = [
# Check that each machine used in a service is defined in the top-level machines
{
assertions = lib.foldlAttrs (
ass1: serviceName: c:
ass1
++ lib.foldlAttrs (
ass2: instanceName: instanceConfig:
let
topLevelMachines = lib.attrNames config.machines;
# All machines must be defined in the top-level machines
assertions = lib.foldlAttrs (
assertions: roleName: role:
assertions
++ builtins.filter (a: !a.assertion) (
builtins.map (m: {
assertion = builtins.elem m topLevelMachines;
message = ''
Machine '${m}' is not defined in the inventory. This might still work, if the machine is defined via nix.
Defined in service: '${serviceName}' instance: '${instanceName}' role: '${roleName}'.
Inventory machines:
${builtins.concatStringsSep "\n" (map (n: "'${n}'") topLevelMachines)}
'';
severity = "warning";
}) role.machines
)
) [ ] instanceConfig.roles;
in
ass2 ++ assertions
) [ ] c
) [ ] config.services;
}
# Check that each tag used in a role is defined in at least one machines tags
{
assertions = lib.foldlAttrs (
ass1: serviceName: c:
ass1
++ lib.foldlAttrs (
ass2: instanceName: instanceConfig:
let
allTags = lib.foldlAttrs (
tags: _machineName: machine:
tags ++ machine.tags
) [ ] config.machines;
# All machines must be defined in the top-level machines
assertions = lib.foldlAttrs (
assertions: roleName: role:
assertions
++ builtins.filter (a: !a.assertion) (
builtins.map (m: {
assertion = builtins.elem m allTags;
message = ''
Tag '${m}' is not defined in the inventory.
Defined in service: '${serviceName}' instance: '${instanceName}' role: '${roleName}'.
Available tags:
${builtins.concatStringsSep "\n" (map (n: "'${n}'") allTags)}
'';
severity = "error";
}) role.tags
)
) [ ] instanceConfig.roles;
in
ass2 ++ assertions
) [ ] c
) [ ] config.services;
}
];
}

View File

@@ -1,5 +1,268 @@
{
lib,
config,
clanLib,
...
}:
let
inherit (config) inventory directory;
resolveTags =
# Inventory, { machines :: [string], tags :: [string] }
{
serviceName,
instanceName,
roleName,
inventory,
members,
}:
{
machines =
members.machines or [ ]
++ (builtins.foldl' (
acc: tag:
let
# For error printing
availableTags = lib.foldlAttrs (
acc: _: v:
v.tags or [ ] ++ acc
) [ ] (inventory.machines);
tagMembers = builtins.attrNames (
lib.filterAttrs (_n: v: builtins.elem tag v.tags or [ ]) inventory.machines
);
in
if tagMembers == [ ] then
lib.warn ''
inventory.services.${serviceName}.${instanceName}: - ${roleName} tags: no machine with tag '${tag}' found.
Available tags: ${builtins.toJSON (lib.unique availableTags)}
'' [ ]
else
acc ++ tagMembers
) [ ] members.tags or [ ]);
};
checkService =
modulepath: serviceName:
builtins.elem "inventory" (clanLib.modules.getFrontmatter modulepath serviceName).features or [ ];
compileMachine =
{ machineConfig }:
{
machineImports = [
(lib.optionalAttrs (machineConfig.deploy.targetHost or null != null) {
config.clan.core.networking.targetHost = lib.mkForce machineConfig.deploy.targetHost;
})
(lib.optionalAttrs (machineConfig.deploy.buildHost or null != null) {
config.clan.core.networking.buildHost = lib.mkForce machineConfig.deploy.buildHost;
})
];
assertions = { };
};
resolveImports =
{
supportedRoles,
resolvedRolesPerInstance,
serviceConfigs,
serviceName,
machineName,
getRoleFile,
}:
(lib.foldlAttrs (
# : [ Modules ] -> String -> ServiceConfig -> [ Modules ]
acc2: instanceName: serviceConfig:
let
resolvedRoles = resolvedRolesPerInstance.${instanceName};
isInService = builtins.any (members: builtins.elem machineName members.machines) (
builtins.attrValues resolvedRoles
);
# all roles where the machine is present
machineRoles = builtins.attrNames (
lib.filterAttrs (_role: roleConfig: builtins.elem machineName roleConfig.machines) resolvedRoles
);
machineServiceConfig = (serviceConfig.machines.${machineName} or { }).config or { };
globalConfig = serviceConfig.config or { };
globalExtraModules = serviceConfig.extraModules or [ ];
machineExtraModules = serviceConfig.machines.${machineName}.extraModules or [ ];
roleServiceExtraModules = builtins.foldl' (
acc: role: acc ++ serviceConfig.roles.${role}.extraModules or [ ]
) [ ] machineRoles;
# TODO: maybe optimize this don't lookup the role in inverse roles. Imports are not lazy
roleModules = builtins.map (
role:
if builtins.elem role supportedRoles && inventory.modules ? ${serviceName} then
getRoleFile role
else
throw "Module ${serviceName} doesn't have role: '${role}'. Role: ${
inventory.modules.${serviceName}
}/roles/${role}.nix not found."
) machineRoles;
roleServiceConfigs = builtins.filter (m: m != { }) (
builtins.map (role: serviceConfig.roles.${role}.config or { }) machineRoles
);
extraModules = map (s: if builtins.typeOf s == "string" then "${directory}/${s}" else s) (
globalExtraModules ++ machineExtraModules ++ roleServiceExtraModules
);
features =
(clanLib.modules.getFrontmatter inventory.modules.${serviceName} serviceName).features or [ ];
deprecationWarning = lib.optionalAttrs (builtins.elem "deprecated" features) {
warnings = [
''
The '${serviceName}' module has been migrated from `inventory.services` to `inventory.instances`
See https://docs.clan.lol/guides/clanServices/ for usage.
''
];
};
in
if !(serviceConfig.enabled or true) then
acc2
else if isInService then
acc2
++ [
deprecationWarning
{
imports = roleModules ++ extraModules;
clan.inventory.services.${serviceName}.${instanceName} = {
roles = resolvedRoles;
# TODO: Add inverseRoles to the service config if needed
# inherit inverseRoles;
};
}
(lib.optionalAttrs (globalConfig != { } || machineServiceConfig != { } || roleServiceConfigs != [ ])
{
clan.${serviceName} = lib.mkMerge (
[
globalConfig
machineServiceConfig
]
++ roleServiceConfigs
);
}
)
]
else
acc2
) [ ] (serviceConfigs));
in
{
imports = [
./interface.nix
];
config = {
machines = builtins.mapAttrs (
machineName: machineConfig: m:
let
compiledServices = lib.mapAttrs (
_: serviceConfigs:
(
{ config, ... }:
let
serviceName = config.serviceName;
getRoleFile = role: builtins.seq role inventory.modules.${serviceName} + "/roles/${role}.nix";
in
{
_file = "inventory/builder.nix";
_module.args = {
inherit
resolveTags
inventory
clanLib
machineName
serviceConfigs
;
};
imports = [
./roles.nix
];
machineImports = resolveImports {
supportedRoles = config.supportedRoles;
resolvedRolesPerInstance = config.resolvedRolesPerInstance;
inherit
serviceConfigs
serviceName
machineName
getRoleFile
;
};
# Assertions
assertions = {
"checkservice.${serviceName}" = {
assertion = checkService inventory.modules.${serviceName} serviceName;
message = ''
Service ${serviceName} cannot be used in inventory. It does not declare the 'inventory' feature.
To allow it add the following to the beginning of the README.md of the module:
---
...
features = [ "inventory" ]
---
Also make sure to test the module with the 'inventory' feature enabled.
'';
};
};
}
)
) (config.inventory.services or { });
compiledMachine = compileMachine {
inherit
machineConfig
;
};
machineImports = (
compiledMachine.machineImports
++ builtins.foldl' (
acc: service:
let
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) service.assertions);
failedAssertionsImports =
if failedAssertions != { } then
[
{
clan.inventory.assertions = failedAssertions;
}
]
else
[
{
clan.inventory.assertions = {
"alive.assertion.inventory" = {
assertion = true;
message = ''
No failed assertions found for machine ${machineName}. This will never be displayed.
It is here for testing purposes.
'';
};
};
}
];
in
acc
++ service.machineImports
# Import failed assertions
++ failedAssertionsImports
) [ ] (builtins.attrValues m.config.compiledServices)
);
in
{
inherit machineImports compiledServices compiledMachine;
}
) (inventory.machines or { });
};
}

View File

@@ -16,13 +16,76 @@ in
type = types.raw;
};
machines = mkOption {
type = types.attrsOf (submodule ({
options = {
machineImports = mkOption {
type = types.listOf types.raw;
};
};
}));
type = types.attrsOf (
submodule (
{ name, ... }:
let
machineName = name;
in
{
options = {
compiledMachine = mkOption {
type = types.raw;
};
compiledServices = mkOption {
# type = types.attrsOf;
type = types.attrsOf (
types.submoduleWith {
modules = [
(
{ name, ... }:
let
serviceName = name;
in
{
options = {
machineName = mkOption {
default = machineName;
readOnly = true;
};
serviceName = mkOption {
default = serviceName;
readOnly = true;
};
# Outputs
machineImports = mkOption {
type = types.listOf types.raw;
};
supportedRoles = mkOption {
type = types.listOf types.str;
};
matchedRoles = mkOption {
type = types.listOf types.str;
};
machinesRoles = mkOption {
type = types.attrsOf (types.listOf types.str);
};
resolvedRolesPerInstance = mkOption {
type = types.attrsOf (
types.attrsOf (submodule {
options.machines = mkOption {
type = types.listOf types.str;
};
})
);
};
assertions = mkOption {
type = types.attrsOf types.raw;
};
};
}
)
];
}
);
};
machineImports = mkOption {
type = types.listOf types.raw;
};
};
}
)
);
};
};
}

View File

@@ -0,0 +1,65 @@
{
lib,
config,
resolveTags,
inventory,
clanLib,
machineName,
serviceConfigs,
...
}:
let
serviceName = config.serviceName;
in
{
# Roles resolution
# : List String
supportedRoles = clanLib.modules.getRoles "inventory.modules" inventory.modules serviceName;
matchedRoles = builtins.attrNames (
lib.filterAttrs (_: ms: builtins.elem machineName ms) config.machinesRoles
);
resolvedRolesPerInstance = lib.mapAttrs (
instanceName: instanceConfig:
let
resolvedRoles = lib.genAttrs config.supportedRoles (
roleName:
resolveTags {
members = instanceConfig.roles.${roleName} or { };
inherit
instanceName
serviceName
roleName
inventory
;
}
);
usedRoles = builtins.attrNames instanceConfig.roles;
unmatchedRoles = builtins.filter (role: !builtins.elem role config.supportedRoles) usedRoles;
in
if unmatchedRoles != [ ] then
throw ''
Roles ${builtins.toJSON unmatchedRoles} are not defined in the service ${serviceName}.
Instance: '${instanceName}'
Please use one of available roles: ${builtins.toJSON config.supportedRoles}
''
else
resolvedRoles
) serviceConfigs;
machinesRoles = builtins.zipAttrsWith (
_n: vs:
let
flat = builtins.foldl' (acc: s: acc ++ s.machines) [ ] vs;
in
lib.unique flat
) (builtins.attrValues config.resolvedRolesPerInstance);
assertions = lib.concatMapAttrs (
instanceName: resolvedRoles:
clanLib.modules.checkConstraints {
moduleName = serviceName;
allModules = inventory.modules;
inherit resolvedRoles instanceName;
}
) config.resolvedRolesPerInstance;
}

View File

@@ -31,13 +31,70 @@ let
'';
};
};
moduleConfig = lib.mkOption {
default = { };
# TODO: use types.deferredModule
# clan.borgbackup MUST be defined as submodule
type = types.attrsOf types.anything;
description = ''
Configuration of the specific clanModule.
!!! Note
Configuration is passed to the nixos configuration scoped to the module.
```nix
clan.<serviceName> = { ... # Config }
```
'';
};
extraModulesOption = lib.mkOption {
description = ''
List of additionally imported `.nix` expressions.
Supported types:
- **Strings**: Interpreted relative to the 'directory' passed to `lib.clan`.
- **Paths**: should be relative to the current file.
- **Any**: Nix expression must be serializable to JSON.
!!! Note
**The import only happens if the machine is part of the service or role.**
Other types are passed through to the nixos configuration.
???+ Example
To import the `special.nix` file
```
. Clan Directory
flake.nix
...
modules
special.nix
...
```
```nix
{
extraModules = [ "modules/special.nix" ];
}
```
'';
apply = value: if lib.isString value then value else builtins.seq (builtins.toJSON value) value;
default = [ ];
type = types.listOf (
types.oneOf [
types.str
types.anything
]
);
};
in
{
imports = [
(lib.mkRemovedOptionModule [ "services" ] ''
The `inventory.services` option has been removed. Use `inventory.instances` instead.
See: https://docs.clan.lol/concepts/inventory/#services
'')
./assertions.nix
];
options = {
# Internal things
@@ -358,5 +415,160 @@ in
);
default = { };
};
services = lib.mkOption {
# TODO: deprecate these options
# services are deprecated in favor of `instances`
# visible = false;
description = ''
Services of the inventory.
- The first `<name>` is the moduleName. It must be a valid clanModule name.
- The second `<name>` is an arbitrary instance name.
???+ Example
```nix
# ClanModule name. See the module documentation for the available modules.
# Instance name, can be anything, some services might use it as a unique identifier.
services.borgbackup."instance_1" = {
roles.client.machines = ["machineA"];
};
```
!!! Note
Services MUST be added to machines via `roles` exclusively.
See [`roles.<rolename>.machines`](#inventory.services.roles.machines) or [`roles.<rolename>.tags`](#inventory.services.roles.tags) for more information.
'';
default = { };
type = types.attrsOf (
types.attrsOf (
types.submodule (
# instance name
{ name, ... }:
{
options.enabled = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Enable or disable the complete service.
If the service is disabled, it will not be added to any machine.
!!! Note
This flag is primarily used to temporarily disable a service.
I.e. A 'backup service' without any 'server' might be incomplete and would cause failure if enabled.
'';
};
options.meta = metaOptionsWith name;
options.extraModules = extraModulesOption;
options.config = moduleConfig // {
description = ''
Configuration of the specific clanModule.
!!! Note
Configuration is passed to the nixos configuration scoped to the module.
```nix
clan.<serviceName> = { ... # Config }
```
???+ Example
For `services.borgbackup` the config is the passed to the machine with the prefix of `clan.borgbackup`.
This means all config values are mapped to the `borgbackup` clanModule exclusively (`config.clan.borgbackup`).
```nix
{
services.borgbackup."instance_1".config = {
destinations = [ ... ];
# See the 'borgbackup' module docs for all options
};
}
```
!!! Note
The module author is responsible for supporting multiple instance configurations in different roles.
See each clanModule's documentation for more information.
'';
};
options.machines = lib.mkOption {
description = ''
Attribute set of machines specific config for the service.
Will be merged with other service configs, such as the role config and the global config.
For machine specific overrides use `mkForce` or other higher priority methods.
???+ Example
```{.nix hl_lines="4-7"}
services.borgbackup."instance_1" = {
roles.client.machines = ["machineA"];
machines.machineA.config = {
# Additional specific config for the machine
# This is merged with all other config places
};
};
```
'';
default = { };
type = types.attrsOf (
types.submodule {
options.extraModules = extraModulesOption;
options.config = moduleConfig // {
description = ''
Additional configuration of the specific machine.
See how [`service.<name>.<name>.config`](#inventory.services.config) works in general for further information.
'';
};
}
);
};
options.roles = lib.mkOption {
default = { };
type = types.attrsOf (
types.submodule {
options.machines = lib.mkOption {
default = [ ];
type = types.listOf types.str;
example = [ "machineA" ];
description = ''
List of machines which are part of the role.
The machines are referenced by their `attributeName` in the `inventory.machines` attribute set.
Memberships are declared here to determine which machines are part of the service.
Alternatively, `tags` can be used to determine the membership, more dynamically.
'';
};
options.tags = lib.mkOption {
default = [ ];
apply = lib.unique;
type = types.listOf types.str;
description = ''
List of tags which are used to determine the membership of the role.
The tags are matched against the `inventory.machines.<machineName>.tags` attribute set.
If a machine has at least one tag of the role, it is part of the role.
'';
};
options.config = moduleConfig // {
description = ''
Additional configuration of the specific role.
See how [`service.<name>.<name>.config`](#inventory.services.config) works in general for further information.
'';
};
options.extraModules = extraModulesOption;
}
);
};
}
)
)
);
};
};
}

View File

@@ -11,10 +11,6 @@
default =
builtins.removeAttrs (clanLib.introspection.getPrios { options = config.inventory.options; })
# tags are freeformType which is not supported yet.
# services is removed and throws an error if accessed.
[
"tags"
"services"
];
[ "tags" ];
};
}

View File

@@ -5,7 +5,7 @@
{
clan.nixosTests.machine-id = {
name = "machine-id";
name = "service-machine-id";
clan = {
directory = ./.;

View File

@@ -0,0 +1,236 @@
{
lib,
config,
pkgs,
...
}:
let
cfg = config.clan.core.postgresql;
createDatabaseState =
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
compression = lib.optionalString (lib.versionAtLeast config.services.postgresql.package.version "16") "--compress=zstd";
in
{
folders = [ folder ];
preBackupScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
mkdir -p "${folder}"
runuser -u postgres -- pg_dump ${compression} --dbname=${db.name} -Fc -c > "${current}.tmp"
mv "${current}.tmp" ${current}
'';
postRestoreScript = ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
systemctl stop ${lib.concatStringsSep " " db.restore.stopOnRestore}
trap "systemctl start ${lib.concatStringsSep " " db.restore.stopOnRestore}" EXIT
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
'';
};
createDatabase = db: ''
CREATE DATABASE "${db.name}" ${
lib.concatStringsSep " " (
lib.mapAttrsToList (name: value: "${name} = '${value}'") db.create.options
)
}
'';
userClauses = lib.mapAttrsToList (
_: user:
''$PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"' ''
) cfg.users;
databaseClauses = lib.mapAttrsToList (
name: db:
lib.optionalString db.create.enable ''$PSQL -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${name}'" | grep -q 1 || $PSQL -d postgres -c ${lib.escapeShellArg (createDatabase db)} ''
) cfg.databases;
in
{
options.clan.core.postgresql = {
enable = lib.mkEnableOption "Whether to enable PostgreSQL Server";
# we are reimplemeting ensureDatabase and ensureUser options here to allow to create databases with options
databases = lib.mkOption {
description = "Databases to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "Database name.";
};
service = lib.mkOption {
type = lib.types.str;
default = name;
description = "Service name that we associate with the database.";
};
# set to false, in case the upstream module uses ensureDatabase option
create.enable = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Create the database if it does not exist.";
};
create.options = lib.mkOption {
description = "Options to pass to the CREATE DATABASE command.";
type = lib.types.lazyAttrsOf lib.types.str;
default = { };
example = {
TEMPLATE = "template0";
LC_COLLATE = "C";
LC_CTYPE = "C";
ENCODING = "UTF8";
OWNER = "foo";
};
};
restore.stopOnRestore = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = "List of systemd services to stop before restoring the database.";
};
};
}
)
);
};
users = lib.mkOption {
description = "Users to create";
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options.name = lib.mkOption {
description = "User name";
type = lib.types.str;
default = name;
};
}
)
);
};
};
config = lib.mkIf (config.clan.core.postgresql.enable) {
clan.core.settings.state-version.enable = true;
# services.postgresql.package = lib.mkDefault pkgs.postgresql_16;
services.postgresql.enable = true;
services.postgresql.settings = {
wal_level = "replica";
max_wal_senders = 3;
};
# We are duplicating a bit the upstream module but allow to create databases with options
systemd.services.postgresql.postStart = ''
PSQL="psql --port=${builtins.toString config.services.postgresql.settings.port}"
while ! $PSQL -d postgres -c "" 2> /dev/null; do
if ! kill -0 "$MAINPID"; then exit 1; fi
sleep 0.1
done
${lib.concatStringsSep "\n" userClauses}
${lib.concatStringsSep "\n" databaseClauses}
'';
clan.core.state = lib.mapAttrs' (
_: db: lib.nameValuePair db.service (createDatabaseState db)
) config.clan.core.postgresql.databases;
environment.systemPackages = builtins.map (
db:
let
folder = "/var/backup/postgres/${db.name}";
current = "${folder}/pg-dump";
in
pkgs.writeShellScriptBin "postgres-db-restore-command-${db.name}" ''
export PATH=${
lib.makeBinPath [
config.services.postgresql.package
config.systemd.package
pkgs.coreutils
pkgs.util-linux
pkgs.zstd
pkgs.gnugrep
]
}
while [[ "$(systemctl is-active postgresql)" == activating ]]; do
sleep 1
done
echo "Waiting for postgres to be ready..."
while ! runuser -u postgres -- psql --port=${builtins.toString config.services.postgresql.settings.port} -d postgres -c "" ; do
if ! systemctl is-active postgresql; then exit 1; fi
sleep 0.1
done
if [[ -e "${current}" ]]; then
(
${lib.optionalString (db.restore.stopOnRestore != [ ]) ''
systemctl stop ${builtins.toString db.restore.stopOnRestore}
trap "systemctl start ${builtins.toString db.restore.stopOnRestore}" EXIT
''}
mkdir -p "${folder}"
if runuser -u postgres -- psql -d postgres -c "SELECT 1 FROM pg_database WHERE datname = '${db.name}'" | grep -q 1; then
runuser -u postgres -- dropdb "${db.name}"
fi
runuser -u postgres -- pg_restore -C -d postgres "${current}"
)
else
echo No database backup found, skipping restore
fi
''
) (builtins.attrValues config.clan.core.postgresql.databases);
};
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.8 KiB

View File

@@ -1,10 +1,17 @@
import { API } from "@/api/API";
import { Schema as Inventory } from "@/api/Inventory";
export type OperationNames = keyof API;
type Services = NonNullable<Inventory["services"]>;
type ServiceNames = keyof Services;
export type OperationArgs<T extends OperationNames> = API[T]["arguments"];
export type OperationResponse<T extends OperationNames> = API[T]["return"];
export type ClanServiceInstance<T extends ServiceNames> = NonNullable<
Services[T]
>[string];
export type SuccessQuery<T extends OperationNames> = Extract<
OperationResponse<T>,
{ status: "success" }

View File

@@ -64,66 +64,11 @@ const mockFetcher: Fetcher = <K extends OperationNames>(
description: "Name of the gritty",
prompt_type: "line",
display: {
label: "(1) Name",
label: "Gritty Name",
group: "User",
required: true,
},
},
{
name: "gritty.foo",
description: "Name of the gritty",
prompt_type: "line",
display: {
label: "(2) Password",
group: "Root",
required: true,
},
},
{
name: "gritty.bar",
description: "Name of the gritty",
prompt_type: "line",
display: {
label: "(3) Gritty",
group: "Root",
required: true,
},
},
],
},
{
name: "funny.dodo",
prompts: [
{
name: "gritty.name",
description: "Name of the gritty",
prompt_type: "line",
display: {
label: "(4) Name",
group: "User",
required: true,
},
},
{
name: "gritty.foo",
description: "Name of the gritty",
prompt_type: "line",
display: {
label: "(5) Password",
group: "Lonely",
required: true,
},
},
{
name: "gritty.bar",
description: "Name of the gritty",
prompt_type: "line",
display: {
label: "(6) Batty",
group: "Root",
required: true,
},
},
],
},
],

View File

@@ -19,7 +19,6 @@ import Icon from "@/src/components/Icon/Icon";
import { useSystemStorageOptions } from "@/src/hooks/queries";
import { useApiClient } from "@/src/hooks/ApiClient";
import { onMount } from "solid-js";
import cx from "classnames";
const Prose = () => (
<StepLayout
@@ -273,7 +272,6 @@ const ChooseDisk = () => {
)}
</Field>
<Alert
transparent
type="error"
icon="Info"
title="You're about to format this drive"
@@ -316,17 +314,8 @@ const FlashProgress = () => {
};
return (
<div
class={cx(
"relative flex size-full flex-col items-center justify-center bg-inv-4",
)}
>
<img
src="/logos/usb-stick-min.png"
alt="usb logo"
class="absolute z-0 top-2"
/>
<div class="mb-6 flex w-full max-w-md flex-col items-center gap-3 fg-inv-1 z-10">
<div class="flex size-full flex-col items-center justify-center bg-inv-4">
<div class="mb-6 flex w-full max-w-md flex-col items-center gap-3 fg-inv-1">
<Typography
hierarchy="title"
size="default"
@@ -338,7 +327,7 @@ const FlashProgress = () => {
<LoadingBar />
<Button
hierarchy="primary"
class="w-fit mt-3"
class="w-fit"
size="s"
onClick={handleCancel}
>

View File

@@ -60,7 +60,6 @@ const ConfigureAddress = () => {
});
const [isReachable, setIsReachable] = createSignal<string | null>(null);
const [loading, setLoading] = createSignal<boolean>(false);
const client = useApiClient();
// TODO: push values to the parent form Store
@@ -81,15 +80,12 @@ const ConfigureAddress = () => {
return;
}
setLoading(true);
const call = client.fetch("check_machine_ssh_login", {
remote: {
address,
},
});
const result = await call.result;
setLoading(false);
console.log("SSH login check result:", result);
if (result.status === "success") {
setIsReachable(address);
@@ -122,28 +118,28 @@ const ConfigureAddress = () => {
)}
</Field>
</Fieldset>
<Button
disabled={!getValue(formStore, "targetHost")}
endIcon="ArrowRight"
onClick={tryReachable}
hierarchy="secondary"
>
Test Connection
</Button>
</div>
}
footer={
<div class="flex justify-between">
<BackButton />
<Show
when={
<NextButton
type="submit"
disabled={
!isReachable() ||
isReachable() !== getValue(formStore, "targetHost")
}
fallback={<NextButton type="submit">Next</NextButton>}
>
<Button
endIcon="ArrowRight"
onClick={tryReachable}
hierarchy="secondary"
loading={loading()}
>
Test Connection
</Button>
</Show>
Next
</NextButton>
</div>
}
/>
@@ -439,7 +435,7 @@ const PromptsFields = (props: PromptsFieldsProps) => {
};
return (
<Form onSubmit={handleSubmit}>
<Form onSubmit={handleSubmit} class="h-full">
<StepLayout
body={
<div class="flex flex-col gap-2">
@@ -584,7 +580,7 @@ const InstallSummary = () => {
progress: runInstall,
}));
await runInstall.result;
await runInstall.result; // Wait for the installation to finish
stepSignal.setActiveStep("install:done");
};
@@ -649,13 +645,8 @@ const InstallProgress = () => {
);
return (
<div class="relative flex size-full flex-col items-center justify-center bg-inv-4">
<img
src="/logos/usb-stick-min.png"
alt="usb logo"
class="absolute z-0 top-2"
/>
<div class="mb-6 flex w-full max-w-md flex-col items-center gap-3 fg-inv-1 z-10">
<div class="flex size-full flex-col items-center justify-center bg-inv-4">
<div class="mb-6 flex w-full max-w-md flex-col items-center gap-3 fg-inv-1">
<Typography
hierarchy="title"
size="default"
@@ -664,11 +655,10 @@ const InstallProgress = () => {
>
Machine is beeing installed
</Typography>
<LoadingBar />
<Typography
hierarchy="label"
size="default"
class=""
class="py-2"
color="secondary"
inverted
>
@@ -704,9 +694,10 @@ const InstallProgress = () => {
</Match>
</Switch>
</Typography>
<LoadingBar />
<Button
hierarchy="primary"
class="w-fit mt-3"
class="w-fit"
size="s"
onClick={handleCancel}
>

View File

@@ -11,4 +11,5 @@ pytest_plugins = [
"clan_cli.tests.runtime",
"clan_cli.tests.fixtures_flakes",
"clan_cli.tests.stdout",
"clan_cli.tests.nix_config",
]

View File

@@ -173,6 +173,7 @@ class ClanFlake:
"git+https://git.clan.lol/clan/clan-core": clan_core_replacement,
"https://git.clan.lol/clan/clan-core/archive/main.tar.gz": clan_core_replacement,
}
self.clan_modules: list[str] = []
self.temporary_home = temporary_home
self.path = temporary_home / "flake"
if not suppress_tmp_home_warning:
@@ -234,6 +235,9 @@ class ClanFlake:
if self.inventory:
inventory_path = self.path / "inventory.json"
inventory_path.write_text(json.dumps(self.inventory, indent=2))
imports = "\n".join(
[f"clan-core.clanModules.{module}" for module in self.clan_modules]
)
for machine_name, machine_config in self.machines.items():
configuration_nix = (
self.path / "machines" / machine_name / "configuration.nix"
@@ -245,6 +249,7 @@ class ClanFlake:
{{
imports = [
(builtins.fromJSON (builtins.readFile ./configuration.json))
{imports}
];
}}
"""

View File

@@ -0,0 +1,24 @@
import json
import subprocess
from dataclasses import dataclass
import pytest
@dataclass
class ConfigItem:
aliases: list[str]
defaultValue: bool # noqa: N815
description: str
documentDefault: bool # noqa: N815
experimentalFeature: str # noqa: N815
value: str | bool | list[str] | dict[str, str]
@pytest.fixture(scope="session")
def nix_config() -> dict[str, ConfigItem]:
proc = subprocess.run(
["nix", "config", "show", "--json"], check=True, stdout=subprocess.PIPE
)
data = json.loads(proc.stdout)
return {name: ConfigItem(**c) for name, c in data.items()}

View File

@@ -3,7 +3,7 @@ from clan_cli.tests.fixtures_flakes import FlakeForTest
from clan_cli.tests.helpers import cli
@pytest.mark.with_core
@pytest.mark.impure
def test_backups(
test_flake_with_core: FlakeForTest,
) -> None:

View File

@@ -6,14 +6,12 @@
inputs':
let
# fake clan-core input
# TODO should this be removed as well?
# fake-clan-core = {
# clanModules.fake-module = ./fake-module.nix;
# };
inputs = inputs';
# inputs = inputs' // {
# clan-core = fake-clan-core;
# };
fake-clan-core = {
clanModules.fake-module = ./fake-module.nix;
};
inputs = inputs' // {
clan-core = fake-clan-core;
};
lib = inputs.nixpkgs.lib;
clan_attrs_json =
if lib.pathExists ./clan_attrs.json then

View File

@@ -9,7 +9,7 @@ if TYPE_CHECKING:
pass
@pytest.mark.with_core
@pytest.mark.impure
def test_flakes_inspect(
test_flake_with_core: FlakeForTest, capture_output: CaptureOutput
) -> None:

View File

@@ -0,0 +1,44 @@
from clan_lib.nix_models.clan import Inventory
from clan_lib.nix_models.clan import InventoryMachine as Machine
from clan_lib.nix_models.clan import InventoryMeta as Meta
from clan_lib.nix_models.clan import InventoryService as Service
def test_make_meta_minimal() -> None:
# Name is required
res = Meta(
{
"name": "foo",
}
)
assert res == {"name": "foo"}
def test_make_inventory_minimal() -> None:
# Meta is required
res = Inventory(
{
"meta": Meta(
{
"name": "foo",
}
),
}
)
assert res == {"meta": {"name": "foo"}}
def test_make_machine_minimal() -> None:
# Empty is valid
res = Machine({})
assert res == {}
def test_make_service_minimal() -> None:
# Empty is valid
res = Service({})
assert res == {}

View File

@@ -53,6 +53,7 @@ def test_inventory_deserialize_variants(
# Check that all keys are present
assert "meta" in inventory
assert "machines" in inventory
assert "services" in inventory
# assert "tags" in inventory
# assert "modules" in inventory
assert "instances" in inventory

View File

@@ -14,7 +14,7 @@ if TYPE_CHECKING:
from .age_keys import KeyPair
@pytest.mark.with_core
@pytest.mark.impure
def test_generate_secret(
monkeypatch: pytest.MonkeyPatch,
test_flake_with_core: FlakeForTest,

View File

@@ -7,23 +7,24 @@ import pytest
from clan_cli.tests.age_keys import SopsSetup
from clan_cli.tests.fixtures_flakes import ClanFlake
from clan_cli.tests.helpers import cli
from clan_cli.tests.nix_config import ConfigItem
from clan_cli.vms.run import inspect_vm, spawn_vm
from clan_lib import cmd
from clan_lib.flake import Flake
from clan_lib.machines.machines import Machine
from clan_lib.nix import nix_config, nix_eval, run
from clan_lib.nix import nix_eval, run
@pytest.mark.impure
@pytest.mark.skipif(sys.platform == "darwin", reason="preload doesn't work on darwin")
def test_vm_deployment(
flake: ClanFlake,
nix_config: dict[str, ConfigItem],
sops_setup: SopsSetup,
) -> None:
# machine 1
config = nix_config()
machine1_config = flake.machines["m1_machine"]
machine1_config["nixpkgs"]["hostPlatform"] = config["system"]
machine1_config["nixpkgs"]["hostPlatform"] = nix_config["system"].value
machine1_config["clan"]["virtualisation"]["graphics"] = False
machine1_config["services"]["getty"]["autologinUser"] = "root"
machine1_config["services"]["openssh"]["enable"] = True
@@ -47,6 +48,19 @@ def test_vm_deployment(
echo hello > "$out"/shared_secret
echo hello > "$out"/no_deploy_secret
"""
# machine 2
machine2_config = flake.machines["m2_machine"]
machine2_config["nixpkgs"]["hostPlatform"] = nix_config["system"].value
machine2_config["clan"]["virtualisation"]["graphics"] = False
machine2_config["services"]["getty"]["autologinUser"] = "root"
machine2_config["services"]["openssh"]["enable"] = True
machine2_config["users"]["users"]["root"]["openssh"]["authorizedKeys"]["keys"] = [
# put your key here when debugging and pass ssh_port in run_vm_in_thread call below
]
machine2_config["networking"]["firewall"]["enable"] = False
machine2_config["clan"]["core"]["vars"]["generators"]["my_shared_generator"] = (
m1_shared_generator.copy()
)
flake.refresh()
@@ -54,16 +68,17 @@ def test_vm_deployment(
cli.run(["vars", "generate", "--flake", str(flake.path)])
# check sops secrets not empty
sops_secrets = json.loads(
run(
nix_eval(
[
f"{flake.path}#nixosConfigurations.m1_machine.config.sops.secrets",
]
)
).stdout.strip()
)
assert sops_secrets != {}
for machine in ["m1_machine", "m2_machine"]:
sops_secrets = json.loads(
run(
nix_eval(
[
f"{flake.path}#nixosConfigurations.{machine}.config.sops.secrets",
]
)
).stdout.strip()
)
assert sops_secrets != {}
my_secret_path = run(
nix_eval(
[
@@ -72,23 +87,28 @@ def test_vm_deployment(
)
).stdout.strip()
assert "no-such-path" not in my_secret_path
shared_secret_path = run(
nix_eval(
[
f"{flake.path}#nixosConfigurations.m1_machine.config.clan.core.vars.generators.my_shared_generator.files.shared_secret.path",
]
)
).stdout.strip()
assert "no-such-path" not in shared_secret_path
for machine in ["m1_machine", "m2_machine"]:
shared_secret_path = run(
nix_eval(
[
f"{flake.path}#nixosConfigurations.{machine}.config.clan.core.vars.generators.my_shared_generator.files.shared_secret.path",
]
)
).stdout.strip()
assert "no-such-path" not in shared_secret_path
# run nix flake lock
cmd.run(["nix", "flake", "lock"], cmd.RunOpts(cwd=flake.path))
vm1_config = inspect_vm(machine=Machine("m1_machine", Flake(str(flake.path))))
vm2_config = inspect_vm(machine=Machine("m2_machine", Flake(str(flake.path))))
with ExitStack() as stack:
vm1 = stack.enter_context(spawn_vm(vm1_config, stdin=subprocess.DEVNULL))
vm2 = stack.enter_context(spawn_vm(vm2_config, stdin=subprocess.DEVNULL))
qga_m1 = stack.enter_context(vm1.qga_connect())
qga_m2 = stack.enter_context(vm2.qga_connect())
# run these always successful commands to make sure all vms have started before continuing
qga_m1.run(["echo"])
qga_m2.run(["echo"])
# check my_secret is deployed
result = qga_m1.run(["cat", "/run/secrets/vars/m1_generator/my_secret"])
assert result.stdout == "hello\n"
@@ -97,6 +117,11 @@ def test_vm_deployment(
["cat", "/run/secrets/vars/my_shared_generator/shared_secret"]
)
assert result.stdout == "hello\n"
# check shared_secret is deployed on m2
result = qga_m2.run(
["cat", "/run/secrets/vars/my_shared_generator/shared_secret"]
)
assert result.stdout == "hello\n"
# check no_deploy_secret is not deployed
result = qga_m1.run(
["test", "-e", "/run/secrets/vars/my_shared_generator/no_deploy_secret"],

View File

@@ -15,7 +15,7 @@ if TYPE_CHECKING:
no_kvm = not Path("/dev/kvm").exists()
@pytest.mark.with_core
@pytest.mark.impure
def test_inspect(
test_flake_with_core: FlakeForTest, capture_output: CaptureOutput
) -> None:

View File

@@ -97,10 +97,18 @@ class InventoryMeta(TypedDict):
class InventoryService(TypedDict):
pass
InventoryInstancesType = dict[str, InventoryInstance]
InventoryMachinesType = dict[str, InventoryMachine]
InventoryMetaType = InventoryMeta
InventoryModulesType = dict[str, dict[str, Any] | list[Any] | bool | float | int | str | None]
InventoryServicesType = dict[str, InventoryService]
InventoryTagsType = dict[str, list[str]]
class Inventory(TypedDict):
@@ -108,6 +116,7 @@ class Inventory(TypedDict):
machines: NotRequired[InventoryMachinesType]
meta: NotRequired[InventoryMetaType]
modules: NotRequired[InventoryModulesType]
services: NotRequired[InventoryServicesType]
tags: NotRequired[InventoryTagsType]

View File

@@ -11,6 +11,7 @@ from clan_lib.nix_models.clan import (
InventoryInstancesType,
InventoryMachinesType,
InventoryMetaType,
InventoryServicesType,
InventoryTagsType,
)
@@ -105,6 +106,7 @@ class InventorySnapshot(TypedDict):
machines: NotRequired[InventoryMachinesType]
instances: NotRequired[InventoryInstancesType]
meta: NotRequired[InventoryMetaType]
services: NotRequired[InventoryServicesType]
tags: NotRequired[InventoryTagsType]
@@ -161,8 +163,7 @@ class InventoryStore:
return sanitized
def get_readonly_raw(self) -> Inventory:
attrs = "{" + ",".join(self._keys) + "}"
return self._flake.select(f"clanInternals.inventoryClass.inventory.{attrs}")
return self._flake.select("clanInternals.inventoryClass.inventory")
def _get_persisted(self) -> InventorySnapshot:
"""

View File

@@ -101,7 +101,7 @@ def test_simple_read_write(setup_test_files: Path) -> None:
store = InventoryStore(
flake=MockFlake(nix_file),
inventory_file_name=json_file.name,
_keys=["foo", "protected"],
_keys=[], # disable toplevel filtering
)
store._flake.invalidate_cache()
data: dict = store.read() # type: ignore
@@ -149,7 +149,7 @@ def test_simple_deferred(setup_test_files: Path) -> None:
inventory_file_name=json_file.name,
# Needed to allow auto-transforming deferred modules
_allowed_path_transforms=["foo.*"],
_keys=["foo"], # disable toplevel filtering
_keys=[], # disable toplevel filtering
)
data = store.read()
@@ -230,7 +230,7 @@ def test_manipulate_list(setup_test_files: Path) -> None:
store = InventoryStore(
flake=MockFlake(nix_file),
inventory_file_name=json_file.name,
_keys=["empty", "predefined"],
_keys=[], # disable toplevel filtering
)
data = store.read()
@@ -275,7 +275,7 @@ def test_static_list_items(setup_test_files: Path) -> None:
store = InventoryStore(
flake=MockFlake(nix_file),
inventory_file_name=json_file.name,
_keys=["empty", "predefined"],
_keys=[], # disable toplevel filtering
)
data = store.read()

View File

@@ -5,7 +5,7 @@ import shutil
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import cast
from typing import Any, cast
import clan_cli.clan.create
import pytest
@@ -26,6 +26,7 @@ from clan_lib.nix import nix_command
from clan_lib.nix_models.clan import (
InventoryInstancesType,
InventoryMachine,
InventoryServicesType,
Unknown,
)
from clan_lib.nix_models.clan import InventoryMachineDeploy as MachineDeploy
@@ -40,6 +41,7 @@ log = logging.getLogger(__name__)
@dataclass
class InventoryWrapper:
services: InventoryServicesType
instances: InventoryInstancesType
@@ -63,6 +65,8 @@ def create_base_inventory(ssh_keys_pairs: list[SSHKeyPair]) -> InventoryWrapper:
ssh_keys.append(InvSSHKeyEntry(f"user_{num}", ssh_key.public.read_text()))
"""Create the base inventory structure."""
legacy_services: dict[str, Any] = {}
instances = InventoryInstancesType(
{
"admin-inst": {
@@ -84,7 +88,7 @@ def create_base_inventory(ssh_keys_pairs: list[SSHKeyPair]) -> InventoryWrapper:
}
)
return InventoryWrapper(instances=instances)
return InventoryWrapper(services=legacy_services, instances=instances)
# TODO: We need a way to calculate the narHash of the current clan-core
@@ -208,6 +212,7 @@ def test_clan_create_api(
== "clan-core/admin"
)
set_value_by_path(inventory, "services", inventory_conf.services)
set_value_by_path(inventory, "instances", inventory_conf.instances)
store.write(
inventory,

View File

@@ -17,8 +17,6 @@
pythonRuntime,
setupNixInNix,
templateDerivation,
zerotierone,
minifakeroot,
}:
let
pyDeps = ps: [
@@ -217,10 +215,7 @@ pythonRuntime.pkgs.buildPythonApplication {
pkgs.mkpasswd
pkgs.xkcdpass
pkgs.pass
zerotierone
minifakeroot
nix-select
../../nixosModules/clanCore/zerotier/generate.py
# needed by flash list tests
nixpkgs.legacyPackages.x86_64-linux.kbd

View File

@@ -16,14 +16,13 @@
clanCoreWithVendoredDeps = self'.packages.clan-core-flake.override {
clanCore = self.filter {
include = [
"clanModules"
"flakeModules"
"lib"
"nixosModules"
"flake.lock"
"templates"
"clanServices"
"pkgs/zerotierone"
"pkgs/minifakeroot"
];
};
};
@@ -45,7 +44,6 @@
clan-cli = pkgs.callPackage ./default.nix {
inherit (inputs) nixpkgs nix-select;
inherit (self.legacyPackages.${system}) setupNixInNix;
inherit (self'.packages) zerotierone minifakeroot;
templateDerivation = templateDerivation;
pythonRuntime = pkgs.python3;
clan-core-path = clanCoreWithVendoredDeps;
@@ -57,7 +55,6 @@
clan-cli-full = pkgs.callPackage ./default.nix {
inherit (inputs) nixpkgs nix-select;
inherit (self.legacyPackages.${system}) setupNixInNix;
inherit (self'.packages) zerotierone minifakeroot;
clan-core-path = clanCoreWithVendoredDeps;
templateDerivation = templateDerivation;
pythonRuntime = pkgs.python3;
@@ -99,6 +96,12 @@
# It treats it not as the type of an empty object, but as non-nullish.
# Should be fixed in json2ts: https://github.com/bcherny/json-schema-to-typescript/issues/557
sed -i -e 's/{}/Record<string, never>/g' $out/API.ts
# Retrieve python API Typescript types
# delete the reserved tags from typechecking because the conversion library doesn't support them
jq 'del(.properties.tags.properties)' ${self'.legacyPackages.schemas.inventory}/schema.json > schema.json
json2ts --input schema.json > $out/Inventory.ts
cp ${self'.legacyPackages.schemas.inventory}/* $out
'';
};
clan-lib-openapi = pkgs.stdenv.mkDerivation {

View File

@@ -19,13 +19,13 @@
agit = pkgs.callPackage ./agit { };
tea-create-pr = pkgs.callPackage ./tea-create-pr { };
zerotier-members = pkgs.callPackage ./zerotier-members { };
moonlight-sunshine-accept = pkgs.callPackage ./moonlight-sunshine-accept { };
merge-after-ci = pkgs.callPackage ./merge-after-ci { inherit (config.packages) tea-create-pr; };
minifakeroot = pkgs.callPackage ./minifakeroot { };
pending-reviews = pkgs.callPackage ./pending-reviews { };
editor = pkgs.callPackage ./editor/clan-edit-codium.nix { };
classgen = pkgs.callPackage ./classgen { };
zerotierone = pkgs.callPackage ./zerotierone { };
update-clan-core-for-checks = pkgs.callPackage ./update-clan-core-for-checks { };
};
};
}

View File

@@ -0,0 +1,2 @@
# shellcheck shell=bash
use flake .#moonlight-sunshine-accept

View File

@@ -0,0 +1,37 @@
{
lib,
python3Packages,
makeDesktopItem,
copyDesktopItems,
}:
let
desktop-file = makeDesktopItem {
name = "org.clan.moonlight-sunset-accept";
exec = "moonlight-sunshine-accept moonlight join %u";
desktopName = "moonlight-handler";
startupWMClass = "moonlight-handler";
mimeTypes = [ "x-scheme-handler/moonlight" ];
};
in
python3Packages.buildPythonApplication {
name = "moonlight-sunshine-accept";
src = ./.;
format = "pyproject";
propagatedBuildInputs = [ python3Packages.cryptography ];
nativeBuildInputs = [
python3Packages.setuptools
copyDesktopItems
];
desktopItems = [ desktop-file ];
meta = with lib; {
description = "Moonlight Sunshine Bridge";
license = licenses.mit;
maintainers = with maintainers; [ a-kenji ];
mainProgram = "moonlight-sunshine-accept";
};
}

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python
import argparse
from . import moonlight, sunshine
def main() -> None:
parser = argparse.ArgumentParser(
prog="moonlight-sunshine-accept",
description="Manage moonlight machines",
)
subparsers = parser.add_subparsers()
parser_sunshine = subparsers.add_parser(
"sunshine",
aliases=["sun"],
description="Sunshine configuration",
help="Sunshine configuration",
)
sunshine.register_parser(parser_sunshine)
parser_moonlight = subparsers.add_parser(
"moonlight",
aliases=["moon"],
description="Moonlight configuration",
help="Moonlight configuration",
)
moonlight.register_parser(parser_moonlight)
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
else:
args.func(args)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
class Error(Exception):
pass

View File

@@ -0,0 +1,37 @@
import argparse
from .init_certificates import register_initialization_parser
from .init_config import register_config_initialization_parser
from .join import register_join_parser
def register_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
initialization_parser = subparser.add_parser(
"init",
aliases=["i"],
description="Initialize the moonlight credentials",
help="Initialize the moonlight credentials",
)
register_initialization_parser(initialization_parser)
config_initialization_parser = subparser.add_parser(
"init-config",
description="Initialize the moonlight configuration",
help="Initialize the moonlight configuration",
)
register_config_initialization_parser(config_initialization_parser)
join_parser = subparser.add_parser(
"join",
aliases=["j"],
description="Join a sunshine host",
help="Join a sunshine host",
)
register_join_parser(join_parser)

View File

@@ -0,0 +1,74 @@
import argparse
import datetime
from datetime import timedelta
from pathlib import Path
from cryptography import hazmat, x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
def generate_private_key() -> rsa.RSAPrivateKey:
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=hazmat.backends.default_backend()
)
return private_key
def generate_certificate(private_key: rsa.RSAPrivateKey) -> bytes:
subject = issuer = x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, "NVIDIA GameStream Client"),
]
)
cert_builder = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(private_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.now(tz=datetime.UTC))
.not_valid_after(
datetime.datetime.now(tz=datetime.UTC) + timedelta(days=365 * 20)
)
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("localhost")]),
critical=False,
)
.sign(private_key, hashes.SHA256(), default_backend())
)
pem_certificate = cert_builder.public_bytes(serialization.Encoding.PEM)
return pem_certificate
def private_key_to_pem(private_key: rsa.RSAPrivateKey) -> bytes:
pem_private_key = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
# format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
return pem_private_key
def init_credentials() -> tuple[bytes, bytes]:
private_key = generate_private_key()
certificate = generate_certificate(private_key)
private_key_pem = private_key_to_pem(private_key)
return certificate, private_key_pem
def write_credentials(_args: argparse.Namespace) -> None:
pem_certificate, pem_private_key = init_credentials()
credentials_path = Path.cwd() / "credentials"
Path(credentials_path).mkdir(parents=True, exist_ok=True)
(credentials_path / "cacert.pem").write_bytes(pem_certificate)
(credentials_path / "cakey.pem").write_bytes(pem_private_key)
print("Finished writing moonlight credentials")
def register_initialization_parser(parser: argparse.ArgumentParser) -> None:
parser.set_defaults(func=write_credentials)

View File

@@ -0,0 +1,15 @@
import argparse
from pathlib import Path
from .state import init_state
def init_config(args: argparse.Namespace) -> None:
init_state(args.certificate.read_text(), args.key.read_text())
print("Finished initializing moonlight state.")
def register_config_initialization_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--certificate", type=Path)
parser.add_argument("--key", type=Path)
parser.set_defaults(func=init_config)

View File

@@ -0,0 +1,131 @@
import argparse
import base64
import json
import socket
from .run import MoonlightPairing
from .state import add_sunshine_host, gen_pin, get_moonlight_certificate
from .uri import parse_moonlight_uri
def send_join_request(host: str, port: int, cert: str) -> bool:
max_tries = 3
response = False
for _tries in range(max_tries):
response = send_join_request_api(host, port)
if response:
return response
return bool(send_join_request_native(host, port, cert))
# This is the preferred join method, but sunshines pin mechanism
# seems to be somewhat brittle in repeated testing, retry then fallback to native
def send_join_request_api(host: str, port: int) -> bool:
moonlight = MoonlightPairing()
# is_paired = moonlight.check(host)
is_paired = False
if is_paired:
print(f"Moonlight is already paired with this host: {host}")
return True
pin = gen_pin()
moonlight.init_pairing(host, pin)
moonlight.wait_until_started()
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
s.connect((host, port))
json_body = json.dumps({"type": "api", "pin": pin})
request = (
f"POST / HTTP/1.1\r\n"
f"Content-Type: application/json\r\n"
f"Content-Length: {len(json_body)}\r\n"
f"Connection: close\r\n\r\n"
f"{json_body}"
)
try:
s.sendall(request.encode("utf-8"))
response = s.recv(16384).decode("utf-8")
print(response)
body = response.split("\n")[-1]
print(body)
moonlight.terminate()
except Exception as e:
print(f"An error occurred: {e}")
moonlight.terminate()
return False
else:
return True
def send_join_request_native(host: str, port: int, cert: str) -> bool:
# This is the hardcoded UUID for the moonlight client
uuid = "123456789ABCD"
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
s.connect((host, port))
encoded_cert = base64.urlsafe_b64encode(cert.encode("utf-8")).decode("utf-8")
json_body_str = json.dumps(
{"type": "native", "uuid": uuid, "cert": encoded_cert}
)
request = (
f"POST / HTTP/1.1\r\n"
f"Content-Type: application/json\r\n"
f"Content-Length: {len(json_body_str)}\r\n"
f"Connection: close\r\n\r\n"
f"{json_body_str}"
)
try:
s.sendall(request.encode("utf-8"))
response = s.recv(16384).decode("utf-8")
print(response)
lines = response.split("\n")
body = "\n".join(lines[2:])[2:]
print(body)
except Exception as e:
print(f"An error occurred: {e}")
else:
return True
# TODO: fix
try:
print(f"response: {response}")
data = json.loads(response)
print(f"Data: {data}")
print(f"Host uuid: {data['uuid']}")
print(f"Host certificate: {data['cert']}")
print("Joining sunshine host")
cert = data["cert"]
cert = base64.urlsafe_b64decode(cert).decode("utf-8")
uuid = data["uuid"]
hostname = data["hostname"]
add_sunshine_host(hostname, host, cert, uuid)
except json.JSONDecodeError as e:
print(f"Failed to decode JSON: {e}")
pos = e.pos
print(f"Failed to decode JSON: unexpected character {response[pos]}")
return False
def join(args: argparse.Namespace) -> None:
if args.url:
(host, port) = parse_moonlight_uri(args.url)
if port is None:
port = 48011
else:
port = args.port
host = args.host
print(f"Host: {host}, port: {port}")
# TODO: If cert is not provided parse from config
# cert = args.cert
cert = get_moonlight_certificate()
assert port is not None
if send_join_request(host, port, cert):
print(f"Successfully joined sunshine host: {host}")
else:
print(f"Failed to join sunshine host: {host}")
def register_join_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("url", nargs="?")
parser.add_argument("--port", type=int, default=48011)
parser.add_argument("--host")
parser.add_argument("--cert")
parser.set_defaults(func=join)

View File

@@ -0,0 +1,63 @@
import subprocess
import sys
import threading
class MoonlightPairing:
def __init__(self) -> None:
self.process: subprocess.Popen | None = None
self.output = ""
self.found = threading.Event()
def init_pairing(self, host: str, pin: str) -> bool:
# host = f"[{host}]"
args = ["moonlight", "pair", host, "--pin", pin]
print("Trying to pair")
try:
print(f"Running command: {args}")
self.process = subprocess.Popen(args, stdout=subprocess.PIPE)
print("Pairing initiated")
thread = threading.Thread(
target=self.stream_output,
args=('Latest supported GFE server: "99.99.99.99"',),
)
thread.start()
print("Thread started")
except Exception as e:
print(
"Error occurred while starting the process: ", str(e), file=sys.stderr
)
return False
else:
return True
def check(self, host: str) -> bool:
try:
result = subprocess.run(
["moonlight", "list", "localhost", host], check=True
)
except subprocess.CalledProcessError:
return False
else:
return result.returncode == 0
def terminate(self) -> None:
if self.process:
self.process.terminate()
self.process.wait()
def stream_output(self, target_string: str) -> None:
assert self.process is not None
assert self.process.stdout is not None
for line in iter(self.process.stdout.readline, b""):
line = line.decode()
self.output += line
if target_string in line:
self.found.set()
break
def wait_until_started(self, timeout: int = 10) -> None:
if self.found.wait(timeout):
print("Started up.")
else:
print("Starting up took took too long. Terminated the process.")

View File

@@ -0,0 +1,149 @@
import contextlib
import random
import string
from configparser import ConfigParser, DuplicateSectionError, NoOptionError
from pathlib import Path
def moonlight_config_dir() -> Path:
return Path.home() / ".config" / "Moonlight Game Streaming Project"
def moonlight_state_file() -> Path:
return moonlight_config_dir() / "Moonlight.conf"
def load_state() -> ConfigParser | None:
try:
with moonlight_state_file().open() as file:
config = ConfigParser()
config.read_file(file)
print(config.sections())
return config
except FileNotFoundError:
print("Sunshine state file not found.")
return None
# prepare the string for the config file
# this is how qt handles byte arrays
def convert_string_to_bytearray(data: str) -> str:
byte_array = '"@ByteArray('
byte_array += data.replace("\n", "\\n")
byte_array += ')"'
return byte_array
def convert_bytearray_to_string(byte_array: str) -> str:
if byte_array.startswith('"@ByteArray(') and byte_array.endswith(')"'):
byte_array = byte_array[12:-2]
return byte_array.replace("\\n", "\n")
return byte_array
# this must be created before moonlight is first run
def init_state(certificate: str, key: str) -> None:
print("Initializing moonlight state.")
moonlight_config_dir().mkdir(parents=True, exist_ok=True)
print("Initialized moonlight config directory.")
print("Writing moonlight state file.")
# write the initial bootstrap config file
with moonlight_state_file().open("w") as file:
config = ConfigParser()
# bytearray objects are not supported by ConfigParser,
# so we need to adjust them ourselves
config.add_section("General")
config.set("General", "certificate", convert_string_to_bytearray(certificate))
config.set("General", "key", convert_string_to_bytearray(key))
config.set("General", "latestsupportedversion-v1", "99.99.99.99")
config.add_section("gcmapping")
config.set("gcmapping", "size", "0")
config.write(file)
def write_state(data: ConfigParser) -> None:
with moonlight_state_file().open("w") as file:
data.write(file)
def add_sunshine_host_to_parser(
config: ConfigParser, hostname: str, manual_host: str, certificate: str, uuid: str
) -> bool:
with contextlib.suppress(DuplicateSectionError):
config.add_section("hosts")
# amount of hosts
try:
no_hosts = int(config.get("hosts", "size"))
except NoOptionError:
no_hosts = 0
new_host = no_hosts + 1
config.set(
"hosts", f"{new_host}\\srvcert", convert_string_to_bytearray(certificate)
)
config.set("hosts", "size", str(new_host))
config.set("hosts", f"{new_host}\\uuid", uuid)
config.set("hosts", f"{new_host}\\hostname", hostname)
config.set("hosts", f"{new_host}\\nvidiasv", "false")
config.set("hosts", f"{new_host}\\customname", "false")
config.set("hosts", f"{new_host}\\manualaddress", manual_host)
config.set("hosts", f"{new_host}\\manualport", "47989")
config.set("hosts", f"{new_host}\\remoteport", "0")
config.set("hosts", f"{new_host}\\remoteaddress", "")
config.set("hosts", f"{new_host}\\localaddress", "")
config.set("hosts", f"{new_host}\\localport", "0")
config.set("hosts", f"{new_host}\\ipv6port", "0")
config.set("hosts", f"{new_host}\\ipv6address", "")
config.set(
"hosts", f"{new_host}\\mac", convert_string_to_bytearray("\\xceop\\x8d\\xfc{")
)
add_app(config, "Desktop", new_host, 1, 881448767)
add_app(config, "Low Res Desktop", new_host, 2, 303580669)
add_app(config, "Steam Big Picture", new_host, 3, 1093255277)
print(config.items("hosts"))
return True
# set default apps for the host for now
# TODO: do this dynamically
def add_app(
config: ConfigParser, name: str, host_id: int, app_id: int, app_no: int
) -> None:
identifier = f"{host_id}\\apps\\{app_id}\\"
config.set("hosts", f"{identifier}appcollector", "false")
config.set("hosts", f"{identifier}directlaunch", "false")
config.set("hosts", f"{identifier}hdr", "false")
config.set("hosts", f"{identifier}hidden", "false")
config.set("hosts", f"{identifier}id", f"{app_no}")
config.set("hosts", f"{identifier}name", f"{name}")
def get_moonlight_certificate() -> str:
config = load_state()
if config is None:
msg = "Moonlight state file not found."
raise FileNotFoundError(msg)
certificate = config.get("General", "certificate")
certificate = convert_bytearray_to_string(certificate)
return certificate
def gen_pin() -> str:
return "".join(random.choice(string.digits) for _ in range(4))
def add_sunshine_host(
hostname: str, manual_host: str, certificate: str, uuid: str
) -> bool:
config = load_state()
if config is None:
return False
hostname = "test"
add_sunshine_host_to_parser(config, hostname, manual_host, certificate, uuid)
write_state(config)
return True

View File

@@ -0,0 +1,22 @@
from urllib.parse import urlparse
from moonlight_sunshine_accept.errors import Error
def parse_moonlight_uri(uri: str) -> tuple[str, int | None]:
print(uri)
if uri.startswith("moonlight:"):
# Fixes a bug where moonlight:// is not parsed correctly
uri = uri[10:]
uri = "moonlight://" + uri
print(uri)
parsed = urlparse(uri)
if parsed.scheme != "moonlight":
msg = f"Invalid moonlight URI: {uri}"
raise Error(msg)
hostname = parsed.hostname
if hostname is None:
msg = f"Invalid moonlight URI: {uri}"
raise Error(msg)
port = parsed.port
return (hostname, port)

View File

@@ -0,0 +1,63 @@
import argparse
from .init_certificates import register_initialization_parser
from .init_state import register_state_initialization_parser
from .listen import register_socket_listener
def register_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
subparser.add_parser(
"generate",
# title="command",
aliases=["gen"],
description="Generate a shareable link",
help="Generate a shareable link",
)
# TODO: add a timeout for the link
# generate.add_argument(
# "--timeout",
# default="10",
# )
# copy = subparsers.add_parser("copy", description="Copy the link to the clipboard")
initialization_parser = subparser.add_parser(
"init",
aliases=["i"],
description="Initialize the sunshine credentials",
help="Initialize the sunshine credentials",
)
register_initialization_parser(initialization_parser)
state_initialization_parser = subparser.add_parser(
"init-state",
description="Initialize the sunshine state file",
help="Initialize the sunshine state file",
)
register_state_initialization_parser(state_initialization_parser)
listen_parser = subparser.add_parser(
"listen",
description="Listen for incoming connections",
help="Listen for incoming connections",
)
register_socket_listener(listen_parser)
# TODO: Add a machine directly <- useful when using dependent secrets
# sunshine_add = subparser.add_parser(
# "add",
# aliases=["a"],
# description="Add a new moonlight machine to sunshine",
# help="Add a new moonlight machine to sunshine",
# )
# sunshine_add.add_argument("--url", type=str, help="URL of the moonlight machine")
# sunshine_add.add_argument(
# "--cert", type=str, help="Certificate of the moonlight machine"
# )
# sunshine_add.add_argument("--uuid", type=str, help="UUID of the moonlight machine")

View File

@@ -0,0 +1,63 @@
import base64
import http.client
import json
def get_context() -> http.client.ssl.SSLContext: # type: ignore
# context = http.client.ssl.create_default_context()
# # context.load_cert_chain(
# # certfile="/var/lib/sunshine/sunshine.cert", keyfile="/var/lib/sunshine/sunshine.key"
# # )
# context.load_cert_chain(
# certfile="/home/kenji/.config/sunshine/credentials/cacert.pem",
# keyfile="/home/kenji/.config/sunshine/credentials/cakey.pem",
# )
return http.client.ssl._create_unverified_context() # type: ignore # noqa: SLF001
def pair(pin: str) -> str:
conn = http.client.HTTPSConnection("localhost", 47990, context=get_context())
# TODO: dynamic username and password
user_and_pass = base64.b64encode(b"sunshine:sunshine").decode("ascii")
headers = {
"Content-Type": "application/json",
"Authorization": f"Basic {user_and_pass}",
}
# Define the parameters
params = json.dumps({"pin": f"{pin}"})
# Make the request
conn.request("POST", "/api/pin", params, headers)
# Get and print the response
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))
return data.decode("utf-8")
def restart() -> None:
# Define the connection
conn = http.client.HTTPSConnection(
"localhost",
47990,
context=http.client.ssl._create_unverified_context(), # type: ignore # noqa: SLF001
)
user_and_pass = base64.b64encode(b"sunshine:sunshine").decode("ascii")
headers = {
"Content-Type": "application/json",
"Authorization": f"Basic {user_and_pass}",
}
# Make the request
conn.request("POST", "/api/restart", {}, headers)
# Get and print the response
# There wont be a response, because it is restarted
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))

View File

@@ -0,0 +1,49 @@
import configparser
from dataclasses import dataclass
from pathlib import Path
# address_family = both
# channels = 5
# pkey = /var/lib/sunshine/sunshine.key
# cert = /var/lib/sunshine/sunshine.cert
# file_state = /var/lib/sunshine/state.json
# credentials_file = /var/lib/sunshine/credentials.json
PSEUDO_SECTION = "DEFAULT"
@dataclass
class Config:
config: configparser.ConfigParser
config_location: Path
_instance = None
def __new__(cls, config_location: Path | None = None) -> "Config":
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.config = configparser.ConfigParser()
config = config_location or cls._instance.default_sunshine_config_file()
cls._instance.config_location = config
with config.open() as f:
config_string = f"[{PSEUDO_SECTION}]\n" + f.read()
print(config_string)
cls._instance.config.read_string(config_string)
return cls._instance
def default_sunshine_config_dir(self) -> Path:
return Path.home() / ".config" / "sunshine"
def default_sunshine_config_file(self) -> Path:
return self.default_sunshine_config_dir() / "sunshine.conf"
def get_private_key(self) -> str:
return self.config.get(PSEUDO_SECTION, "pkey")
def get_certificate(self) -> str:
return self.config.get(PSEUDO_SECTION, "cert")
def get_state_file(self) -> str:
return self.config.get(PSEUDO_SECTION, "file_state")
def get_credentials_file(self) -> str:
return self.config.get(PSEUDO_SECTION, "credentials_file")

View File

@@ -0,0 +1,76 @@
import argparse
import datetime
import uuid
from pathlib import Path
from cryptography import hazmat, x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
def generate_private_key() -> rsa.RSAPrivateKey:
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=hazmat.backends.default_backend()
)
return private_key
def generate_certificate(private_key: rsa.RSAPrivateKey) -> bytes:
subject = issuer = x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, "Sunshine Gamestream Host"),
]
)
cert_builder = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(private_key.public_key())
.serial_number(61093384576940497812448570031200738505731293357)
.not_valid_before(datetime.datetime(2024, 2, 27, tzinfo=datetime.UTC))
.not_valid_after(datetime.datetime(2044, 2, 22, tzinfo=datetime.UTC))
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("localhost")]),
critical=False,
)
.sign(private_key, hashes.SHA256(), default_backend())
)
pem_certificate = cert_builder.public_bytes(serialization.Encoding.PEM)
return pem_certificate
def private_key_to_pem(private_key: rsa.RSAPrivateKey) -> bytes:
pem_private_key = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
return pem_private_key
def init_credentials() -> tuple[bytes, bytes]:
private_key = generate_private_key()
certificate = generate_certificate(private_key)
private_key_pem = private_key_to_pem(private_key)
return certificate, private_key_pem
def uniqueid() -> str:
return str(uuid.uuid4()).upper()
def write_credentials(_args: argparse.Namespace) -> None:
print("Writing sunshine credentials")
pem_certificate, pem_private_key = init_credentials()
credentials_dir = Path("credentials")
credentials_dir.mkdir(parents=True, exist_ok=True)
(credentials_dir / "cacert.pem").write_bytes(pem_certificate)
(credentials_dir / "cakey.pem").write_bytes(pem_private_key)
print("Generating sunshine UUID")
Path("uuid").write_text(uniqueid())
def register_initialization_parser(parser: argparse.ArgumentParser) -> None:
parser.set_defaults(func=write_credentials)

View File

@@ -0,0 +1,16 @@
import argparse
from .state import init_state
def init_state_file(args: argparse.Namespace) -> None:
uuid = args.uuid
state_file = args.state_file
init_state(uuid, state_file)
print("Finished initializing sunshine state file.")
def register_state_initialization_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--uuid")
parser.add_argument("--state-file")
parser.set_defaults(func=init_state_file)

View File

@@ -0,0 +1,90 @@
import argparse
import base64
import json
import socket
import traceback
from .api import pair
from .state import default_sunshine_state_file
# listen on a specific port for information from the moonlight side
def listen(port: int, cert: str, uuid: str, state_file: str) -> bool:
host = ""
# Create a socket object with dual-stack support
server_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
# Enable dual-stack support
server_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
# Bind the socket to the host and port
server_socket.bind((host, port))
# Listen for incoming connections (accept up to 5)
server_socket.listen(5)
while True:
# Accept incoming connection
client_socket, addr = server_socket.accept()
print(f"Connection accepted from {addr}")
# Receive data from the client
data = client_socket.recv(16384)
try:
request = data.decode("utf-8")
raw_body = request.split("\n")[-1]
print(raw_body)
body = json.loads(raw_body)
pair_type = body.get("type", "")
if pair_type == "api":
print("Api request")
status = pair(body.get("pin", ""))
status = json.dumps(status)
response = f"HTTP/1.1 200 OK\r\nContent-Type:application/json\r\n\r\\{status}\r\n"
client_socket.sendall(response.encode("utf-8"))
if pair_type == "native":
# url = unquote(data_str.split()[1])
# rec_uuid = parse_qs(urlparse(url).query).get("uuid", [""])[0]
# rec_cert = parse_qs(urlparse(url).query).get("cert", [""])[0]
# decoded_cert = base64.urlsafe_b64decode(rec_cert).decode("utf-8")
# print(f"Received URL: {url}")
# print(f"Extracted UUID: {rec_uuid}")
# print(f"Extracted Cert: {decoded_cert}")
encoded_cert = base64.urlsafe_b64encode(cert.encode("utf-8")).decode(
"utf-8"
)
json_data = {}
json_data["uuid"] = uuid
json_data["cert"] = encoded_cert
json_data["hostname"] = socket.gethostname()
json_body = json.dumps(json_data)
response = f"HTTP/1.1 200 OK\r\nContent-Type:application/json\r\n\r\\{json_body}\r\n"
client_socket.sendall(response.encode("utf-8"))
# add_moonlight_client(decoded_cert, state_file, rec_uuid)
except UnicodeDecodeError:
print(f"UnicodeDecodeError: Cannot decode byte {data[8]}")
traceback.print_exc()
client_socket.close()
def init_listener(args: argparse.Namespace) -> None:
port = args.port
cert = args.cert
uuid = args.uuid
state = args.state
listen(port, cert, uuid, state)
def register_socket_listener(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--port", default=48011, type=int)
parser.add_argument("--cert")
parser.add_argument("--uuid")
parser.add_argument("--state", default=default_sunshine_state_file())
# TODO: auto accept
# parser.add_argument("--auto-accept")
parser.set_defaults(func=init_listener)

View File

@@ -0,0 +1,67 @@
import json
from pathlib import Path
from typing import Any
def default_sunshine_config_dir() -> Path:
return Path.home() / ".config" / "sunshine"
def default_sunshine_state_file() -> Path:
return default_sunshine_config_dir() / "sunshine_state.json"
def load_state(sunshine_state_path: Path) -> str | None:
sunshine_state_path = sunshine_state_path or default_sunshine_state_file()
print(f"Loading sunshine state from {sunshine_state_path}")
try:
return json.loads(sunshine_state_path.read_text())
except FileNotFoundError:
print("Sunshine state file not found.")
return None
# this needs to be created before sunshine is first run
def init_state(uuid: str, sunshine_state_path: Path) -> None:
print("Initializing sunshine state.")
data: dict[str, Any] = {}
data["root"] = {}
data["root"]["uniqueid"] = uuid
data["root"]["devices"] = []
# write the initial bootstrap config file
write_state(data, sunshine_state_path)
def write_state(data: dict[str, Any], sunshine_state_path: Path) -> None:
sunshine_state_path = sunshine_state_path or default_sunshine_state_file()
with sunshine_state_path.open("w") as file:
json.dump(data, file, indent=4)
# this is used by moonlight-qt
def pseudo_uuid() -> str:
return "0123456789ABCDEF"
# TODO: finish this function
def add_moonlight_client(
certificate: str, sunshine_state_path: Path, uuid: str
) -> None:
print("Adding moonlight client to sunshine state.")
raw_state = load_state(sunshine_state_path)
if raw_state:
state = json.loads(raw_state)
if not state["root"]["devices"]:
state["root"]["devices"].append(
{"uniqueid": pseudo_uuid(), "certs": [certificate]}
)
write_state(state, sunshine_state_path)
if certificate not in state["root"]["devices"][0]["certs"]:
state["root"]["devices"][0]["certs"].append(certificate)
state["root"]["devices"][0]["uniqueid"] = pseudo_uuid()
write_state(state, sunshine_state_path)
else:
print("Moonlight certificate already added.")

View File

@@ -0,0 +1,9 @@
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "moonlight-sunshine-accept"
description = "Moonlight Sunshine Bridge"
dynamic = ["version"]
scripts = { moonlight-sunshine-accept = "moonlight_sunshine_accept:main" }