Compare commits
32 Commits
speed-up-v
...
update-tem
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c456ac1334 | ||
|
|
f7c48d560d | ||
|
|
bb9b535f20 | ||
|
|
f0c5a8c84a | ||
|
|
a27321a826 | ||
|
|
d7dcb55001 | ||
|
|
d3d337a51e | ||
|
|
2fd6426f28 | ||
|
|
f70f6d6d80 | ||
|
|
b9a386c881 | ||
|
|
db3e8b9984 | ||
|
|
117224e6a4 | ||
|
|
ea77b48d83 | ||
|
|
5d99d0e1e7 | ||
|
|
1ec67ecfaf | ||
|
|
d5064ce465 | ||
|
|
9080e7c7f6 | ||
|
|
8e00363584 | ||
|
|
672db4a33f | ||
|
|
82c80a9a53 | ||
|
|
16116505ab | ||
|
|
80713f93af | ||
|
|
b6f00ed1f6 | ||
|
|
d6646ecc62 | ||
|
|
fc1c64985f | ||
|
|
695574988e | ||
|
|
dc6648520f | ||
|
|
454936336f | ||
|
|
0093836272 | ||
|
|
e026ada443 | ||
|
|
38bb2dfb56 | ||
|
|
d10fe7a8ee |
@@ -22,7 +22,6 @@ in
|
||||
imports = filter pathExists [
|
||||
./backups/flake-module.nix
|
||||
../nixosModules/clanCore/machine-id/tests/flake-module.nix
|
||||
../nixosModules/clanCore/state-version/tests/flake-module.nix
|
||||
./devshell/flake-module.nix
|
||||
./flash/flake-module.nix
|
||||
./impure/flake-module.nix
|
||||
|
||||
@@ -185,7 +185,6 @@ in
|
||||
];
|
||||
|
||||
clan.core.vars.generators.borgbackup = {
|
||||
|
||||
files."borgbackup.ssh.pub".secret = false;
|
||||
files."borgbackup.ssh" = { };
|
||||
files."borgbackup.repokey" = { };
|
||||
|
||||
@@ -33,6 +33,7 @@ in
|
||||
root-password = ./root-password;
|
||||
single-disk = ./single-disk;
|
||||
sshd = ./sshd;
|
||||
state-version = ./state-version;
|
||||
static-hosts = ./static-hosts;
|
||||
sunshine = ./sunshine;
|
||||
syncthing = ./syncthing;
|
||||
|
||||
18
clanModules/state-version/README.md
Normal file
18
clanModules/state-version/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
description = "Automatically generate the state version of the nixos installation."
|
||||
features = [ "inventory", "deprecated" ]
|
||||
---
|
||||
|
||||
This module generates the `system.stateVersion` of the nixos installation automatically.
|
||||
|
||||
Options: [system.stateVersion](https://search.nixos.org/options?channel=unstable&show=system.stateVersion&from=0&size=50&sort=relevance&type=packages&query=stateVersion)
|
||||
|
||||
Migration:
|
||||
If you are already setting `system.stateVersion`, then import the module and then either let the automatic generation happen, or trigger the generation manually for the machine. The module will take the specified version, if one is already supplied through the config.
|
||||
To manually generate the version for a specified machine run:
|
||||
|
||||
```
|
||||
clan vars generate [MACHINE]
|
||||
```
|
||||
|
||||
If the setting was already set you can then remove `system.stateVersion` from your machine configuration. For new machines, just import the module.
|
||||
6
clanModules/state-version/default.nix
Normal file
6
clanModules/state-version/default.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
# Dont import this file
|
||||
# It is only here for backwards compatibility.
|
||||
# Dont author new modules with this file.
|
||||
{
|
||||
imports = [ ./roles/default.nix ];
|
||||
}
|
||||
25
clanModules/state-version/roles/default.nix
Normal file
25
clanModules/state-version/roles/default.nix
Normal file
@@ -0,0 +1,25 @@
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
var = config.clan.core.vars.generators.state-version.files.version or { };
|
||||
in
|
||||
{
|
||||
|
||||
warnings = [
|
||||
"The clan.state-version module is deprecated and will be removed on 2025-07-15.
|
||||
Please migrate to user-maintained configuration or the new equivalent clan services
|
||||
(https://docs.clan.lol/reference/clanServices)."
|
||||
];
|
||||
|
||||
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
|
||||
|
||||
clan.core.vars.generators.state-version = {
|
||||
files.version = {
|
||||
secret = false;
|
||||
value = lib.mkDefault config.system.nixos.release;
|
||||
};
|
||||
runtimeInputs = [ ];
|
||||
script = ''
|
||||
echo -n ${config.system.stateVersion} > "$out"/version
|
||||
'';
|
||||
};
|
||||
}
|
||||
37
clanServices/state-version/README.md
Normal file
37
clanServices/state-version/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
This service generates the `system.stateVersion` of the nixos installation
|
||||
automatically.
|
||||
|
||||
Possible values:
|
||||
[system.stateVersion](https://search.nixos.org/options?channel=unstable&show=system.stateVersion&from=0&size=50&sort=relevance&type=packages&query=stateVersion)
|
||||
|
||||
## Usage
|
||||
|
||||
The following configuration will set `stateVersion` for all machines:
|
||||
|
||||
```
|
||||
inventory.instances = {
|
||||
state-version = {
|
||||
module = {
|
||||
name = "state-version";
|
||||
input = "clan";
|
||||
};
|
||||
roles.default.tags.all = { };
|
||||
};
|
||||
```
|
||||
|
||||
## Migration
|
||||
|
||||
If you are already setting `system.stateVersion`, either let the automatic
|
||||
generation happen, or trigger the generation manually for the machine. The
|
||||
service will take the specified version, if one is already supplied through the
|
||||
config.
|
||||
|
||||
To manually generate the version for a specified machine run:
|
||||
|
||||
```
|
||||
clan vars generate [MACHINE]
|
||||
```
|
||||
|
||||
If the setting was already set, you can then remove `system.stateVersion` from
|
||||
your machine configuration. For new machines, just import the service as shown
|
||||
above.
|
||||
39
clanServices/state-version/default.nix
Normal file
39
clanServices/state-version/default.nix
Normal file
@@ -0,0 +1,39 @@
|
||||
{ ... }:
|
||||
{
|
||||
_class = "clan.service";
|
||||
manifest.name = "clan-core/state-version";
|
||||
manifest.description = "Automatically generate the state version of the nixos installation.";
|
||||
manifest.categories = [ "System" ];
|
||||
|
||||
roles.default = {
|
||||
|
||||
perInstance =
|
||||
{ ... }:
|
||||
{
|
||||
nixosModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
var = config.clan.core.vars.generators.state-version.files.version or { };
|
||||
in
|
||||
{
|
||||
system.stateVersion = lib.mkDefault (lib.removeSuffix "\n" var.value);
|
||||
|
||||
clan.core.vars.generators.state-version = {
|
||||
files.version = {
|
||||
secret = false;
|
||||
value = lib.mkDefault config.system.nixos.release;
|
||||
};
|
||||
runtimeInputs = [ ];
|
||||
script = ''
|
||||
echo -n ${config.system.stateVersion} > "$out"/version
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
16
clanServices/state-version/flake-module.nix
Normal file
16
clanServices/state-version/flake-module.nix
Normal file
@@ -0,0 +1,16 @@
|
||||
{ lib, ... }:
|
||||
let
|
||||
module = lib.modules.importApply ./default.nix { };
|
||||
in
|
||||
{
|
||||
clan.modules.state-version = module;
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
clan.nixosTests.state-version = {
|
||||
imports = [ ./tests/vm/default.nix ];
|
||||
|
||||
clan.modules."@clan/state-version" = module;
|
||||
};
|
||||
};
|
||||
}
|
||||
21
clanServices/state-version/tests/vm/default.nix
Normal file
21
clanServices/state-version/tests/vm/default.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
name = "state-version";
|
||||
|
||||
clan = {
|
||||
directory = ./.;
|
||||
inventory = {
|
||||
machines.server = { };
|
||||
instances.default = {
|
||||
module.name = "@clan/state-version";
|
||||
module.input = "self";
|
||||
roles.default.machines."server" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes.server = { };
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
'';
|
||||
}
|
||||
@@ -92,6 +92,7 @@ nav:
|
||||
- reference/clanServices/mycelium.md
|
||||
- reference/clanServices/packages.md
|
||||
- reference/clanServices/sshd.md
|
||||
- reference/clanServices/state-version.md
|
||||
- reference/clanServices/trusted-nix-caches.md
|
||||
- reference/clanServices/users.md
|
||||
- reference/clanServices/wifi.md
|
||||
@@ -126,6 +127,7 @@ nav:
|
||||
- reference/clanModules/root-password.md
|
||||
- reference/clanModules/single-disk.md
|
||||
- reference/clanModules/sshd.md
|
||||
- reference/clanModules/state-version.md
|
||||
- reference/clanModules/static-hosts.md
|
||||
- reference/clanModules/sunshine.md
|
||||
- reference/clanModules/syncthing-static-peers.md
|
||||
|
||||
14
flake.lock
generated
14
flake.lock
generated
@@ -16,11 +16,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1751241706,
|
||||
"narHash": "sha256-T3hOK/yQexsrgTfkSceRVpWOtkMqbbKYWUCPwQnrUl0=",
|
||||
"rev": "97d8e88ec1d43b52f9886a722c013af2db15bb47",
|
||||
"lastModified": 1751413887,
|
||||
"narHash": "sha256-+ut7DrSwamExIvaCFdiTYD88NTSYJFG2CEOvCha59vI=",
|
||||
"rev": "246f0d66547d073af6249e4f7852466197e871ed",
|
||||
"type": "tarball",
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/97d8e88ec1d43b52f9886a722c013af2db15bb47.tar.gz"
|
||||
"url": "https://git.clan.lol/api/v1/repos/clan/data-mesher/archive/246f0d66547d073af6249e4f7852466197e871ed.tar.gz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
@@ -54,11 +54,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1749398372,
|
||||
"narHash": "sha256-tYBdgS56eXYaWVW3fsnPQ/nFlgWi/Z2Ymhyu21zVM98=",
|
||||
"lastModified": 1751413152,
|
||||
"narHash": "sha256-Tyw1RjYEsp5scoigs1384gIg6e0GoBVjms4aXFfRssQ=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "9305fe4e5c2a6fcf5ba6a3ff155720fbe4076569",
|
||||
"rev": "77826244401ea9de6e3bac47c2db46005e1f30b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -67,6 +67,44 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: make this writable by moving the options from inventoryClass into clan.
|
||||
exports = lib.mkOption {
|
||||
readOnly = true;
|
||||
visible = false;
|
||||
internal = true;
|
||||
};
|
||||
|
||||
exportsModule = lib.mkOption {
|
||||
internal = true;
|
||||
visible = false;
|
||||
type = types.deferredModule;
|
||||
default = { };
|
||||
description = ''
|
||||
A module that is used to define the module of flake level exports -
|
||||
|
||||
such as 'exports.machines.<name>' and 'exports.instances.<name>'
|
||||
|
||||
Example:
|
||||
|
||||
```nix
|
||||
{
|
||||
options.vars.generators = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submoduleWith {
|
||||
modules = [
|
||||
{
|
||||
options.script = lib.mkOption { type = lib.types.str; };
|
||||
}
|
||||
];
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
}
|
||||
```
|
||||
'';
|
||||
};
|
||||
|
||||
specialArgs = lib.mkOption {
|
||||
type = types.attrsOf types.raw;
|
||||
default = { };
|
||||
|
||||
@@ -224,6 +224,8 @@ in
|
||||
inherit nixosConfigurations;
|
||||
inherit darwinConfigurations;
|
||||
|
||||
exports = config.clanInternals.inventoryClass.distributedServices.servicesEval.config.exports;
|
||||
|
||||
clanInternals = {
|
||||
inventoryClass =
|
||||
let
|
||||
@@ -244,10 +246,13 @@ in
|
||||
inherit inventory directory;
|
||||
}
|
||||
(
|
||||
let
|
||||
clanConfig = config;
|
||||
in
|
||||
{ config, ... }:
|
||||
{
|
||||
distributedServices = clanLib.inventory.mapInstances {
|
||||
inherit (config) inventory;
|
||||
inherit (clanConfig) inventory exportsModule;
|
||||
inherit flakeInputs;
|
||||
clanCoreModules = clan-core.clan.modules;
|
||||
prefix = [ "distributedServices" ];
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
# Wraps all services in one fixed point module
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
specialArgs,
|
||||
_ctx,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
inherit (types) attrsWith submoduleWith;
|
||||
in
|
||||
{
|
||||
# TODO: merge these options into clan options
|
||||
options = {
|
||||
exportsModule = mkOption {
|
||||
type = types.deferredModule;
|
||||
readOnly = true;
|
||||
};
|
||||
mappedServices = mkOption {
|
||||
visible = false;
|
||||
type = attrsWith {
|
||||
placeholder = "mappedServiceName";
|
||||
elemType = submoduleWith {
|
||||
modules = [
|
||||
(
|
||||
{ name, ... }:
|
||||
{
|
||||
_module.args._ctx = [ name ];
|
||||
_module.args.exports' = config.exports;
|
||||
}
|
||||
)
|
||||
./service-module.nix
|
||||
# feature modules
|
||||
(lib.modules.importApply ./api-feature.nix {
|
||||
inherit (specialArgs) clanLib;
|
||||
prefix = _ctx;
|
||||
})
|
||||
];
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
exports = mkOption {
|
||||
type = submoduleWith {
|
||||
modules = [
|
||||
{
|
||||
options = {
|
||||
instances = lib.mkOption {
|
||||
# instances.<instanceName>...
|
||||
type = types.attrsOf (submoduleWith {
|
||||
modules = [
|
||||
config.exportsModule
|
||||
];
|
||||
});
|
||||
};
|
||||
# instances.<machineName>...
|
||||
machines = lib.mkOption {
|
||||
type = types.attrsOf (submoduleWith {
|
||||
modules = [
|
||||
config.exportsModule
|
||||
];
|
||||
});
|
||||
};
|
||||
};
|
||||
}
|
||||
] ++ lib.mapAttrsToList (_: service: service.exports) config.mappedServices;
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
debug = mkOption {
|
||||
default = lib.mapAttrsToList (_: service: service.exports) config.mappedServices;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -26,6 +26,7 @@ in
|
||||
inventory,
|
||||
clanCoreModules,
|
||||
prefix ? [ ],
|
||||
exportsModule,
|
||||
}:
|
||||
let
|
||||
# machineHasTag = machineName: tagName: lib.elem tagName inventory.machines.${machineName}.tags;
|
||||
@@ -89,23 +90,6 @@ in
|
||||
}
|
||||
) inventory.instances or { };
|
||||
|
||||
# TODO: Eagerly check the _class of the resolved module
|
||||
importedModulesEvaluated = lib.mapAttrs (
|
||||
module_ident: instances:
|
||||
clanLib.evalService {
|
||||
prefix = prefix ++ [ module_ident ];
|
||||
modules =
|
||||
[
|
||||
# Import the resolved module.
|
||||
# i.e. clan.modules.admin
|
||||
(builtins.head instances).instance.resolvedModule
|
||||
] # Include all the instances that correlate to the resolved module
|
||||
++ (builtins.map (v: {
|
||||
instances.${v.instanceName}.roles = v.instance.instanceRoles;
|
||||
}) instances);
|
||||
}
|
||||
) grouped;
|
||||
|
||||
# Group the instances by the module they resolve to
|
||||
# This is necessary to evaluate the module in a single pass
|
||||
# :: { <module.input>_<module.name> :: [ { name, value } ] }
|
||||
@@ -126,16 +110,52 @@ in
|
||||
}
|
||||
) { } importedModuleWithInstances;
|
||||
|
||||
# servicesEval.config.mappedServices.self-A.result.final.jon.nixosModule
|
||||
allMachines = lib.mapAttrs (machineName: _: {
|
||||
# This is the list of nixosModules for each machine
|
||||
machineImports = lib.foldlAttrs (
|
||||
acc: _module_ident: eval:
|
||||
acc ++ [ eval.config.result.final.${machineName}.nixosModule or { } ]
|
||||
) [ ] importedModulesEvaluated;
|
||||
acc: _module_ident: serviceModule:
|
||||
acc ++ [ serviceModule.result.final.${machineName}.nixosModule or { } ]
|
||||
) [ ] servicesEval.config.mappedServices;
|
||||
}) inventory.machines or { };
|
||||
|
||||
evalServices =
|
||||
{ modules, prefix }:
|
||||
lib.evalModules {
|
||||
specialArgs = {
|
||||
inherit clanLib;
|
||||
_ctx = prefix;
|
||||
};
|
||||
modules = [
|
||||
./all-services-wrapper.nix
|
||||
] ++ modules;
|
||||
};
|
||||
|
||||
servicesEval = evalServices {
|
||||
inherit prefix;
|
||||
modules = [
|
||||
{
|
||||
inherit exportsModule;
|
||||
mappedServices = lib.mapAttrs (_module_ident: instances: {
|
||||
imports =
|
||||
[
|
||||
# Import the resolved module.
|
||||
# i.e. clan.modules.admin
|
||||
(builtins.head instances).instance.resolvedModule
|
||||
] # Include all the instances that correlate to the resolved module
|
||||
++ (builtins.map (v: {
|
||||
instances.${v.instanceName}.roles = v.instance.instanceRoles;
|
||||
}) instances);
|
||||
}) grouped;
|
||||
}
|
||||
];
|
||||
};
|
||||
importedModulesEvaluated = servicesEval.config.mappedServices;
|
||||
|
||||
in
|
||||
{
|
||||
inherit
|
||||
servicesEval
|
||||
importedModuleWithInstances
|
||||
grouped
|
||||
allMachines
|
||||
|
||||
@@ -104,6 +104,13 @@ let
|
||||
in
|
||||
{
|
||||
options = {
|
||||
# Option to disable some behavior during docs rendering
|
||||
_docs_rendering = mkOption {
|
||||
default = false;
|
||||
visible = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
instances = mkOption {
|
||||
visible = false;
|
||||
defaultText = "Throws: 'The service must define its instances' when not defined";
|
||||
@@ -384,6 +391,33 @@ in
|
||||
type = types.deferredModuleWith {
|
||||
staticModules = [
|
||||
({
|
||||
options.exports = mkOption {
|
||||
type = types.deferredModule;
|
||||
default = { };
|
||||
description = ''
|
||||
export modules defined in 'perInstance'
|
||||
mapped to their instance name
|
||||
|
||||
Example
|
||||
|
||||
with instances:
|
||||
|
||||
```nix
|
||||
instances.A = { ... };
|
||||
instances.B= { ... };
|
||||
|
||||
roles.peer.perInstance = { instanceName, machine, ... }:
|
||||
{
|
||||
exports.foo = 1;
|
||||
}
|
||||
|
||||
This yields all other services can access these exports
|
||||
=>
|
||||
exports.instances.A.foo = 1;
|
||||
exports.instances.B.foo = 1;
|
||||
```
|
||||
'';
|
||||
};
|
||||
options.nixosModule = mkOption {
|
||||
type = types.deferredModule;
|
||||
default = { };
|
||||
@@ -412,27 +446,6 @@ in
|
||||
```
|
||||
'';
|
||||
};
|
||||
options.services = mkOption {
|
||||
visible = false;
|
||||
type = attrsWith {
|
||||
placeholder = "serviceName";
|
||||
elemType = submoduleWith {
|
||||
modules = [
|
||||
{
|
||||
_module.args._ctx = _ctx ++ [
|
||||
config.manifest.name
|
||||
"roles"
|
||||
roleName
|
||||
"perInstance"
|
||||
"services"
|
||||
];
|
||||
}
|
||||
./service-module.nix
|
||||
];
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
@@ -514,6 +527,32 @@ in
|
||||
type = types.deferredModuleWith {
|
||||
staticModules = [
|
||||
({
|
||||
options.exports = mkOption {
|
||||
type = types.deferredModule;
|
||||
default = { };
|
||||
description = ''
|
||||
export modules defined in 'perMachine'
|
||||
mapped to their machine name
|
||||
|
||||
Example
|
||||
|
||||
with machines:
|
||||
```nix
|
||||
instances.A = { roles.peer.machines.jon = ... };
|
||||
instances.B = { roles.peer.machines.jon = ... };
|
||||
|
||||
perMachine = { machine, ... }:
|
||||
{
|
||||
exports.foo = 1;
|
||||
}
|
||||
|
||||
This yields all other services can access these exports
|
||||
=>
|
||||
exports.machines.jon.foo = 1;
|
||||
exports.machines.sara.foo = 1;
|
||||
```
|
||||
'';
|
||||
};
|
||||
options.nixosModule = mkOption {
|
||||
type = types.deferredModule;
|
||||
default = { };
|
||||
@@ -537,25 +576,6 @@ in
|
||||
```
|
||||
'';
|
||||
};
|
||||
options.services = mkOption {
|
||||
visible = false;
|
||||
type = attrsWith {
|
||||
placeholder = "serviceName";
|
||||
elemType = submoduleWith {
|
||||
modules = [
|
||||
{
|
||||
_module.args._ctx = _ctx ++ [
|
||||
config.manifest.name
|
||||
"perMachine"
|
||||
"services"
|
||||
];
|
||||
}
|
||||
./service-module.nix
|
||||
];
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
@@ -608,6 +628,96 @@ in
|
||||
modules = [ v ];
|
||||
}).config;
|
||||
};
|
||||
|
||||
exports = mkOption {
|
||||
description = ''
|
||||
This services exports.
|
||||
Gets merged with all other services exports
|
||||
|
||||
Final value (merged and evaluated with other services) available as `exports'` in the arguments of this module.
|
||||
|
||||
```nix
|
||||
{ exports', ... }: {
|
||||
_class = "clan.service";
|
||||
# ...
|
||||
}
|
||||
```
|
||||
'';
|
||||
default = { };
|
||||
type = types.submoduleWith {
|
||||
# Static modules
|
||||
modules = [
|
||||
{
|
||||
options.instances = mkOption {
|
||||
type = types.attrsOf types.deferredModule;
|
||||
description = ''
|
||||
export modules defined in 'perInstance'
|
||||
mapped to their instance name
|
||||
|
||||
Example
|
||||
|
||||
with instances:
|
||||
|
||||
```nix
|
||||
instances.A = { ... };
|
||||
instances.B= { ... };
|
||||
|
||||
roles.peer.perInstance = { instanceName, machine, ... }:
|
||||
{
|
||||
exports.foo = 1;
|
||||
}
|
||||
|
||||
This yields all other services can access these exports
|
||||
=>
|
||||
exports.instances.A.foo = 1;
|
||||
exports.instances.B.foo = 1;
|
||||
```
|
||||
'';
|
||||
};
|
||||
options.machines = mkOption {
|
||||
type = types.attrsOf types.deferredModule;
|
||||
description = ''
|
||||
export modules defined in 'perMachine'
|
||||
mapped to their machine name
|
||||
|
||||
Example
|
||||
|
||||
with machines:
|
||||
|
||||
```nix
|
||||
instances.A = { roles.peer.machines.jon = ... };
|
||||
instances.B = { roles.peer.machines.jon = ... };
|
||||
|
||||
perMachine = { machine, ... }:
|
||||
{
|
||||
exports.foo = 1;
|
||||
}
|
||||
|
||||
This yields all other services can access these exports
|
||||
=>
|
||||
exports.machines.jon.foo = 1;
|
||||
exports.machines.sara.foo = 1;
|
||||
```
|
||||
'';
|
||||
};
|
||||
# Lazy default via imports
|
||||
# should probably be moved to deferredModuleWith { staticModules = [ ]; }
|
||||
imports =
|
||||
if config._docs_rendering then
|
||||
[ ]
|
||||
else
|
||||
lib.mapAttrsToList (_roleName: role: {
|
||||
instances = lib.mapAttrs (_instanceName: instance: {
|
||||
imports = lib.mapAttrsToList (_machineName: v: v.exports) instance.allMachines;
|
||||
}) role.allInstances;
|
||||
}) config.result.allRoles
|
||||
++ lib.mapAttrsToList (machineName: machine: {
|
||||
machines.${machineName} = machine.exports;
|
||||
}) config.result.allMachines;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
# ---
|
||||
# Place the result in _module.result to mark them as "internal" and discourage usage/overrides
|
||||
#
|
||||
@@ -727,40 +837,18 @@ in
|
||||
instanceAcc: instanceName: instance:
|
||||
instanceAcc
|
||||
// {
|
||||
nixosModules =
|
||||
(
|
||||
(lib.mapAttrsToList (
|
||||
nestedServiceName: serviceModule:
|
||||
let
|
||||
unmatchedMachines = lib.attrNames (
|
||||
lib.removeAttrs serviceModule.result.final (lib.attrNames config.result.allMachines)
|
||||
);
|
||||
in
|
||||
if unmatchedMachines != [ ] then
|
||||
throw ''
|
||||
The following machines are not part of the parent service: ${builtins.toJSON unmatchedMachines}
|
||||
Either remove the machines, or include them into the parent via a role.
|
||||
(Added via roles.${roleName}.perInstance.services.${nestedServiceName})
|
||||
|
||||
${errorContext}
|
||||
''
|
||||
else
|
||||
serviceModule.result.final.${machineName}.nixosModule
|
||||
) instance.allMachines.${machineName}.services or { })
|
||||
|
||||
)
|
||||
++ (
|
||||
if instance.allMachines.${machineName}.nixosModule or { } != { } then
|
||||
instanceAcc.nixosModules
|
||||
++ [
|
||||
(lib.setDefaultModuleLocation
|
||||
"Via instances.${instanceName}.roles.${roleName}.machines.${machineName}"
|
||||
instance.allMachines.${machineName}.nixosModule
|
||||
)
|
||||
]
|
||||
else
|
||||
instanceAcc.nixosModules
|
||||
);
|
||||
nixosModules = (
|
||||
if instance.allMachines.${machineName}.nixosModule or { } != { } then
|
||||
instanceAcc.nixosModules
|
||||
++ [
|
||||
(lib.setDefaultModuleLocation
|
||||
"Via instances.${instanceName}.roles.${roleName}.machines.${machineName}"
|
||||
instance.allMachines.${machineName}.nixosModule
|
||||
)
|
||||
]
|
||||
else
|
||||
instanceAcc.nixosModules
|
||||
);
|
||||
}
|
||||
) roleAcc role.allInstances
|
||||
)
|
||||
@@ -773,38 +861,18 @@ in
|
||||
{
|
||||
inherit instanceResults machineResult;
|
||||
nixosModule = {
|
||||
imports =
|
||||
[
|
||||
# include service assertions:
|
||||
(
|
||||
let
|
||||
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) config.result.assertions);
|
||||
in
|
||||
{
|
||||
assertions = lib.attrValues failedAssertions;
|
||||
}
|
||||
)
|
||||
(lib.setDefaultModuleLocation "Via ${config.manifest.name}.perMachine - machine='${machineName}';" machineResult.nixosModule)
|
||||
]
|
||||
++ (lib.mapAttrsToList (
|
||||
nestedServiceName: serviceModule:
|
||||
imports = [
|
||||
# include service assertions:
|
||||
(
|
||||
let
|
||||
unmatchedMachines = lib.attrNames (
|
||||
lib.removeAttrs serviceModule.result.final (lib.attrNames config.result.allMachines)
|
||||
);
|
||||
failedAssertions = (lib.filterAttrs (_: v: !v.assertion) config.result.assertions);
|
||||
in
|
||||
if unmatchedMachines != [ ] then
|
||||
throw ''
|
||||
The following machines are not part of the parent service: ${builtins.toJSON unmatchedMachines}
|
||||
Either remove the machines, or include them into the parent via a role.
|
||||
(Added via perMachine.services.${nestedServiceName})
|
||||
|
||||
${errorContext}
|
||||
''
|
||||
else
|
||||
serviceModule.result.final.${machineName}.nixosModule
|
||||
) machineResult.services)
|
||||
++ instanceResults.nixosModules;
|
||||
{
|
||||
assertions = lib.attrValues failedAssertions;
|
||||
}
|
||||
)
|
||||
(lib.setDefaultModuleLocation "Via ${config.manifest.name}.perMachine - machine='${machineName}';" machineResult.nixosModule)
|
||||
] ++ instanceResults.nixosModules;
|
||||
};
|
||||
}
|
||||
) config.result.allMachines;
|
||||
|
||||
@@ -48,9 +48,11 @@ let
|
||||
clanCoreModules = { };
|
||||
flakeInputs = flakeInputsFixture;
|
||||
inherit inventory;
|
||||
exportsModule = { };
|
||||
};
|
||||
in
|
||||
{
|
||||
exports = import ./exports.nix { inherit lib clanLib; };
|
||||
resolve_module_spec = import ./import_module_spec.nix { inherit lib callInventoryAdapter; };
|
||||
test_simple =
|
||||
let
|
||||
@@ -171,7 +173,7 @@ in
|
||||
{
|
||||
# Test that the module is mapped into the output
|
||||
# We might change the attribute name in the future
|
||||
expr = lib.attrNames res.importedModulesEvaluated.self-A.config.instances;
|
||||
expr = lib.attrNames res.importedModulesEvaluated.self-A.instances;
|
||||
expected = [
|
||||
"instance_bar"
|
||||
"instance_foo"
|
||||
@@ -227,7 +229,7 @@ in
|
||||
{
|
||||
# Test that the module is mapped into the output
|
||||
# We might change the attribute name in the future
|
||||
expr = lib.attrNames res.importedModulesEvaluated.self-A.config.result.allMachines;
|
||||
expr = lib.attrNames res.importedModulesEvaluated.self-A.result.allMachines;
|
||||
expected = [
|
||||
"jon"
|
||||
"sara"
|
||||
@@ -279,14 +281,14 @@ in
|
||||
{
|
||||
# Test that the module is mapped into the output
|
||||
# We might change the attribute name in the future
|
||||
expr = lib.attrNames res.importedModulesEvaluated.self-A.config.result.allMachines;
|
||||
expr = lib.attrNames res.importedModulesEvaluated.self-A.result.allMachines;
|
||||
expected = [
|
||||
"jon"
|
||||
"sara"
|
||||
];
|
||||
};
|
||||
|
||||
machine_imports = import ./machine_imports.nix { inherit lib clanLib; };
|
||||
per_machine_args = import ./per_machine_args.nix { inherit lib callInventoryAdapter; };
|
||||
per_instance_args = import ./per_instance_args.nix { inherit lib callInventoryAdapter; };
|
||||
nested = import ./nested_services { inherit lib clanLib; };
|
||||
}
|
||||
|
||||
170
lib/modules/inventory/distributed-service/tests/exports.nix
Normal file
170
lib/modules/inventory/distributed-service/tests/exports.nix
Normal file
@@ -0,0 +1,170 @@
|
||||
{ lib, clanLib }:
|
||||
let
|
||||
clan = clanLib.clan {
|
||||
self = { };
|
||||
directory = ./.;
|
||||
|
||||
exportsModule = {
|
||||
options.vars.generators = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submoduleWith {
|
||||
# TODO: import the vars submodule here
|
||||
modules = [
|
||||
{
|
||||
options.script = lib.mkOption { type = lib.types.str; };
|
||||
}
|
||||
];
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
||||
machines.jon = { };
|
||||
machines.sara = { };
|
||||
# A module that adds exports perMachine
|
||||
modules.A =
|
||||
{ exports', ... }:
|
||||
{
|
||||
manifest.name = "A";
|
||||
roles.peer.perInstance =
|
||||
{ machine, ... }:
|
||||
{
|
||||
# Cross reference a perMachine exports
|
||||
exports.vars.generators."${machine.name}-network-ip".script =
|
||||
"A:" + exports'.machines.${machine.name}.vars.generators.key.script;
|
||||
# Cross reference a perInstance exports from a different service
|
||||
exports.vars.generators."${machine.name}-full-hostname".script =
|
||||
"A:" + exports'.instances."B-1".vars.generators.hostname.script;
|
||||
};
|
||||
roles.server = { };
|
||||
perMachine =
|
||||
{ machine, ... }:
|
||||
{
|
||||
exports = {
|
||||
vars.generators.key.script = machine.name;
|
||||
};
|
||||
};
|
||||
};
|
||||
# A module that adds exports perInstance
|
||||
modules.B = {
|
||||
manifest.name = "B";
|
||||
roles.peer.perInstance =
|
||||
{ instanceName, ... }:
|
||||
{
|
||||
exports = {
|
||||
vars.generators.hostname.script = instanceName;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
inventory = {
|
||||
instances.B-1 = {
|
||||
module.name = "B";
|
||||
module.input = "self";
|
||||
roles.peer.tags.all = { };
|
||||
};
|
||||
instances.B-2 = {
|
||||
module.name = "B";
|
||||
module.input = "self";
|
||||
roles.peer.tags.all = { };
|
||||
};
|
||||
instances.A-1 = {
|
||||
module.name = "A";
|
||||
module.input = "self";
|
||||
roles.peer.tags.all = { };
|
||||
roles.server.tags.all = { };
|
||||
};
|
||||
instances.A-2 = {
|
||||
module.name = "A";
|
||||
module.input = "self";
|
||||
roles.peer.tags.all = { };
|
||||
roles.server.tags.all = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
test_1 = {
|
||||
inherit clan;
|
||||
expr = clan.config.exports;
|
||||
expected = {
|
||||
instances = {
|
||||
A-1 = {
|
||||
vars = {
|
||||
generators = {
|
||||
jon-full-hostname = {
|
||||
script = "A:B-1";
|
||||
};
|
||||
jon-network-ip = {
|
||||
script = "A:jon";
|
||||
};
|
||||
sara-full-hostname = {
|
||||
script = "A:B-1";
|
||||
};
|
||||
sara-network-ip = {
|
||||
script = "A:sara";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
A-2 = {
|
||||
vars = {
|
||||
generators = {
|
||||
jon-full-hostname = {
|
||||
script = "A:B-1";
|
||||
};
|
||||
jon-network-ip = {
|
||||
script = "A:jon";
|
||||
};
|
||||
sara-full-hostname = {
|
||||
script = "A:B-1";
|
||||
};
|
||||
sara-network-ip = {
|
||||
script = "A:sara";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
B-1 = {
|
||||
vars = {
|
||||
generators = {
|
||||
hostname = {
|
||||
script = "B-1";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
B-2 = {
|
||||
vars = {
|
||||
generators = {
|
||||
hostname = {
|
||||
script = "B-2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
machines = {
|
||||
jon = {
|
||||
vars = {
|
||||
generators = {
|
||||
key = {
|
||||
script = "jon";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
sara = {
|
||||
vars = {
|
||||
generators = {
|
||||
key = {
|
||||
script = "sara";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
{ lib, clanLib }:
|
||||
let
|
||||
clan = clanLib.clan {
|
||||
self = { };
|
||||
directory = ./.;
|
||||
|
||||
machines.jon = { };
|
||||
machines.sara = { };
|
||||
# A module that adds exports perMachine
|
||||
modules.A =
|
||||
{ ... }:
|
||||
{
|
||||
manifest.name = "A";
|
||||
roles.peer.perInstance =
|
||||
{ ... }:
|
||||
{
|
||||
nixosModule = {
|
||||
options.bar = lib.mkOption {
|
||||
default = 1;
|
||||
};
|
||||
};
|
||||
};
|
||||
roles.server = { };
|
||||
perMachine =
|
||||
{ ... }:
|
||||
{
|
||||
nixosModule = {
|
||||
options.foo = lib.mkOption {
|
||||
default = 1;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
inventory.instances.A = {
|
||||
module.input = "self";
|
||||
roles.peer.tags.all = { };
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
test_1 = {
|
||||
inherit clan;
|
||||
expr = { inherit (clan.config.clanInternals.machines.x86_64-linux.jon.config) bar foo; };
|
||||
expected = {
|
||||
foo = 1;
|
||||
bar = 1;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{ clanLib, lib, ... }:
|
||||
{
|
||||
test_simple = import ./simple.nix { inherit clanLib lib; };
|
||||
|
||||
test_multi_machine = import ./multi_machine.nix { inherit clanLib lib; };
|
||||
|
||||
test_multi_import_duplication = import ./multi_import_duplication.nix { inherit clanLib lib; };
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
{ clanLib, lib, ... }:
|
||||
let
|
||||
# Potentially imported many times
|
||||
# To add the ssh key
|
||||
example-admin = (
|
||||
{ lib, ... }:
|
||||
{
|
||||
manifest.name = "example-admin";
|
||||
|
||||
roles.client.interface = {
|
||||
options.keys = lib.mkOption { };
|
||||
};
|
||||
|
||||
roles.client.perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule = {
|
||||
inherit (settings) keys;
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
consumer-A =
|
||||
{ ... }:
|
||||
{
|
||||
manifest.name = "consumer-A";
|
||||
|
||||
instances.foo = {
|
||||
roles.server.machines."jon" = { };
|
||||
};
|
||||
instances.bar = {
|
||||
roles.server.machines."jon" = { };
|
||||
};
|
||||
|
||||
roles.server = {
|
||||
perInstance =
|
||||
{ machine, instanceName, ... }:
|
||||
{
|
||||
services."example-admin" = {
|
||||
imports = [
|
||||
example-admin
|
||||
];
|
||||
instances."${instanceName}" = {
|
||||
roles.client.machines.${machine.name} = {
|
||||
settings.keys = [ "pubkey-1" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
consumer-B =
|
||||
{ ... }:
|
||||
{
|
||||
manifest.name = "consumer-A";
|
||||
|
||||
instances.foo = {
|
||||
roles.server.machines."jon" = { };
|
||||
};
|
||||
instances.bar = {
|
||||
roles.server.machines."jon" = { };
|
||||
};
|
||||
|
||||
roles.server = {
|
||||
perInstance =
|
||||
{ machine, instanceName, ... }:
|
||||
{
|
||||
services."example-admin" = {
|
||||
imports = [
|
||||
example-admin
|
||||
];
|
||||
instances."${instanceName}" = {
|
||||
roles.client.machines.${machine.name} = {
|
||||
settings.keys = [
|
||||
"pubkey-1"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
eval = clanLib.evalService {
|
||||
modules = [
|
||||
(consumer-A)
|
||||
];
|
||||
prefix = [ ];
|
||||
};
|
||||
eval2 = clanLib.evalService {
|
||||
modules = [
|
||||
(consumer-B)
|
||||
];
|
||||
prefix = [ ];
|
||||
};
|
||||
|
||||
evalNixos = lib.evalModules {
|
||||
modules = [
|
||||
{
|
||||
options.assertions = lib.mkOption { };
|
||||
# This is suboptimal
|
||||
options.keys = lib.mkOption { };
|
||||
}
|
||||
eval.config.result.final.jon.nixosModule
|
||||
eval2.config.result.final.jon.nixosModule
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
# Check that the nixos system has the settings from the nested module, as well as those from the "perMachine" and "perInstance"
|
||||
inherit eval;
|
||||
expr = evalNixos.config;
|
||||
expected = {
|
||||
assertions = [ ];
|
||||
# TODO: Some deduplication mechanism is nice
|
||||
# Could add types.set or do 'apply = unique', or something else ?
|
||||
keys = [
|
||||
"pubkey-1"
|
||||
"pubkey-1"
|
||||
"pubkey-1"
|
||||
"pubkey-1"
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
{ clanLib, lib, ... }:
|
||||
let
|
||||
service-B = (
|
||||
{ lib, ... }:
|
||||
{
|
||||
manifest.name = "service-B";
|
||||
|
||||
roles.client.interface = {
|
||||
options.user = lib.mkOption { };
|
||||
options.host = lib.mkOption { };
|
||||
};
|
||||
roles.client.perInstance =
|
||||
{ settings, instanceName, ... }:
|
||||
{
|
||||
nixosModule = {
|
||||
units.${instanceName} = {
|
||||
script = settings.user + "@" + settings.host;
|
||||
};
|
||||
};
|
||||
};
|
||||
perMachine =
|
||||
{ ... }:
|
||||
{
|
||||
nixosModule = {
|
||||
ssh.enable = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
service-A =
|
||||
{ ... }:
|
||||
{
|
||||
manifest.name = "service-A";
|
||||
|
||||
instances.foo = {
|
||||
roles.server.machines."jon" = { };
|
||||
roles.server.machines."sara" = { };
|
||||
};
|
||||
|
||||
roles.server = {
|
||||
perInstance =
|
||||
{ machine, instanceName, ... }:
|
||||
{
|
||||
services."B" = {
|
||||
imports = [
|
||||
service-B
|
||||
];
|
||||
instances."A-${instanceName}-B" = {
|
||||
roles.client.machines.${machine.name} = {
|
||||
settings.user = "johnny";
|
||||
settings.host = machine.name;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
eval = clanLib.evalService {
|
||||
modules = [
|
||||
(service-A)
|
||||
];
|
||||
prefix = [ ];
|
||||
};
|
||||
|
||||
evalNixos = lib.mapAttrs (
|
||||
_n: v:
|
||||
(lib.evalModules {
|
||||
modules = [
|
||||
{
|
||||
options.assertions = lib.mkOption { };
|
||||
options.units = lib.mkOption { };
|
||||
options.ssh = lib.mkOption { };
|
||||
}
|
||||
v.nixosModule
|
||||
];
|
||||
}).config
|
||||
) eval.config.result.final;
|
||||
in
|
||||
{
|
||||
# Check that the nixos system has the settings from the nested module, as well as those from the "perMachine" and "perInstance"
|
||||
inherit eval;
|
||||
expr = evalNixos;
|
||||
expected = {
|
||||
jon = {
|
||||
assertions = [ ];
|
||||
ssh = {
|
||||
enable = true;
|
||||
};
|
||||
units = {
|
||||
A-foo-B = {
|
||||
script = "johnny@jon";
|
||||
};
|
||||
};
|
||||
};
|
||||
sara = {
|
||||
assertions = [ ];
|
||||
ssh = {
|
||||
enable = true;
|
||||
};
|
||||
units = {
|
||||
A-foo-B = {
|
||||
script = "johnny@sara";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
/*
|
||||
service-B :: Service
|
||||
exports a nixosModule which set "address" and "hostname"
|
||||
Note: How we use null together with mkIf to create optional values.
|
||||
This is a method, to create mergable modules
|
||||
|
||||
service-A :: Service
|
||||
|
||||
service-A.roles.server.perInstance.services."B"
|
||||
imports service-B
|
||||
configures a client with hostname = "johnny"
|
||||
|
||||
service-A.perMachine.services."B"
|
||||
imports service-B
|
||||
configures a client with address = "root"
|
||||
*/
|
||||
{ clanLib, lib, ... }:
|
||||
let
|
||||
service-B = (
|
||||
{ lib, ... }:
|
||||
{
|
||||
manifest.name = "service-B";
|
||||
|
||||
roles.client.interface = {
|
||||
options.hostname = lib.mkOption { default = null; };
|
||||
options.address = lib.mkOption { default = null; };
|
||||
};
|
||||
roles.client.perInstance =
|
||||
{ settings, ... }:
|
||||
{
|
||||
nixosModule = {
|
||||
imports = [
|
||||
# Only export the value that is actually set.
|
||||
(lib.mkIf (settings.hostname != null) {
|
||||
hostname = settings.hostname;
|
||||
})
|
||||
(lib.mkIf (settings.address != null) {
|
||||
address = settings.address;
|
||||
})
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
service-A =
|
||||
{ ... }:
|
||||
{
|
||||
manifest.name = "service-A";
|
||||
|
||||
instances.foo = {
|
||||
roles.server.machines."jon" = { };
|
||||
};
|
||||
instances.bar = {
|
||||
roles.server.machines."jon" = { };
|
||||
};
|
||||
|
||||
roles.server = {
|
||||
perInstance =
|
||||
{ machine, instanceName, ... }:
|
||||
{
|
||||
services."B" = {
|
||||
imports = [
|
||||
service-B
|
||||
];
|
||||
instances."B-for-A" = {
|
||||
roles.client.machines.${machine.name} = {
|
||||
settings.hostname = instanceName + "+johnny";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
perMachine =
|
||||
{ machine, ... }:
|
||||
{
|
||||
services."B" = {
|
||||
imports = [
|
||||
service-B
|
||||
];
|
||||
instances."B-for-A" = {
|
||||
roles.client.machines.${machine.name} = {
|
||||
settings.address = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
eval = clanLib.evalService {
|
||||
modules = [
|
||||
(service-A)
|
||||
];
|
||||
prefix = [ ];
|
||||
};
|
||||
|
||||
evalNixos = lib.evalModules {
|
||||
modules = [
|
||||
{
|
||||
options.assertions = lib.mkOption { };
|
||||
options.hostname = lib.mkOption { type = lib.types.separatedString " "; };
|
||||
options.address = lib.mkOption { type = lib.types.str; };
|
||||
}
|
||||
eval.config.result.final."jon".nixosModule
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
# Check that the nixos system has the settings from the nested module, as well as those from the "perMachine" and "perInstance"
|
||||
inherit eval;
|
||||
expr = evalNixos.config;
|
||||
expected = {
|
||||
address = "root";
|
||||
assertions = [ ];
|
||||
# Concatenates hostnames from both instances
|
||||
hostname = "bar+johnny foo+johnny";
|
||||
};
|
||||
}
|
||||
@@ -106,7 +106,7 @@ in
|
||||
test_per_instance_arguments = {
|
||||
expr = {
|
||||
instanceName =
|
||||
res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances."instance_foo".allMachines.jon.passthru.instanceName;
|
||||
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances."instance_foo".allMachines.jon.passthru.instanceName;
|
||||
|
||||
# settings are specific.
|
||||
# Below we access:
|
||||
@@ -114,11 +114,11 @@ in
|
||||
# roles = peer
|
||||
# machines = jon
|
||||
settings =
|
||||
res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.settings;
|
||||
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.settings;
|
||||
machine =
|
||||
res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.machine;
|
||||
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.machine;
|
||||
roles =
|
||||
res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.roles;
|
||||
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.roles;
|
||||
};
|
||||
expected = {
|
||||
instanceName = "instance_foo";
|
||||
@@ -161,9 +161,9 @@ in
|
||||
|
||||
# TODO: Cannot be tested like this anymore
|
||||
test_per_instance_settings_vendoring = {
|
||||
x = res.importedModulesEvaluated.self-A.config;
|
||||
x = res.importedModulesEvaluated.self-A;
|
||||
expr =
|
||||
res.importedModulesEvaluated.self-A.config.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.vendoredSettings;
|
||||
res.importedModulesEvaluated.self-A.result.allRoles.peer.allInstances.instance_foo.allMachines.jon.passthru.vendoredSettings;
|
||||
expected = {
|
||||
timeout = "config.thing";
|
||||
};
|
||||
|
||||
@@ -81,7 +81,7 @@ in
|
||||
inherit res;
|
||||
expr = {
|
||||
hasMachineSettings =
|
||||
res.importedModulesEvaluated.self-A.config.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.machines.jon
|
||||
res.importedModulesEvaluated.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.machines.jon
|
||||
? settings;
|
||||
|
||||
# settings are specific.
|
||||
@@ -89,10 +89,10 @@ in
|
||||
# instance = instance_foo
|
||||
# roles = peer
|
||||
# machines = jon
|
||||
specificMachineSettings = filterInternals res.importedModulesEvaluated.self-A.config.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.machines.jon.settings;
|
||||
specificMachineSettings = filterInternals res.importedModulesEvaluated.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.machines.jon.settings;
|
||||
|
||||
hasRoleSettings =
|
||||
res.importedModulesEvaluated.self-A.config.result.allMachines.jon.passthru.instances.instance_foo.roles.peer
|
||||
res.importedModulesEvaluated.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer
|
||||
? settings;
|
||||
|
||||
# settings are specific.
|
||||
@@ -100,7 +100,7 @@ in
|
||||
# instance = instance_foo
|
||||
# roles = peer
|
||||
# machines = *
|
||||
specificRoleSettings = filterInternals res.importedModulesEvaluated.self-A.config.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.settings;
|
||||
specificRoleSettings = filterInternals res.importedModulesEvaluated.self-A.result.allMachines.jon.passthru.instances.instance_foo.roles.peer.settings;
|
||||
};
|
||||
expected = {
|
||||
hasMachineSettings = true;
|
||||
|
||||
@@ -47,7 +47,7 @@ in
|
||||
(pkgs.nixosOptionsDoc {
|
||||
options =
|
||||
(self.clanLib.evalService {
|
||||
modules = [ ];
|
||||
modules = [ { _docs_rendering = true; } ];
|
||||
prefix = [ ];
|
||||
}).options;
|
||||
warningsAreErrors = true;
|
||||
|
||||
@@ -394,6 +394,7 @@ in
|
||||
options = {
|
||||
# ModuleSpec
|
||||
module = lib.mkOption {
|
||||
default = { };
|
||||
type = types.submodule {
|
||||
options.input = lib.mkOption {
|
||||
type = types.nullOr types.str;
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
./nixos-facter.nix
|
||||
./vm.nix
|
||||
./machine-id
|
||||
./state-version
|
||||
./wayland-proxy-virtwl.nix
|
||||
./zerotier
|
||||
./zfs.nix
|
||||
|
||||
@@ -11,8 +11,7 @@ in
|
||||
enable = lib.mkEnableOption "automatic state-version generation.
|
||||
|
||||
The option will take the specified version, if one is already supplied through
|
||||
the config or generate one if not.
|
||||
";
|
||||
the config or generate one if not";
|
||||
};
|
||||
|
||||
config = lib.mkIf (config.clan.core.settings.state-version.enable) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# shellcheck shell=bash
|
||||
source_up
|
||||
|
||||
watch_file flake-module.nix shell.nix webview-ui/flake-module.nix
|
||||
watch_file .local.env flake-module.nix shell.nix webview-ui/flake-module.nix
|
||||
|
||||
# Because we depend on nixpkgs sources, uploading to builders takes a long time
|
||||
use flake .#clan-app --builders ''
|
||||
|
||||
@@ -103,6 +103,18 @@ GTK_DEBUG=interactive ./bin/clan-app --debug
|
||||
|
||||
Appending `--debug` flag enables debug logging printed into the console.
|
||||
|
||||
Debugging crashes in the `webview` library can be done by executing:
|
||||
|
||||
```bash
|
||||
$ ./pygdb.sh ./bin/clan-app --content-uri http://localhost:3000/ --debug
|
||||
```
|
||||
|
||||
I recommend creating the file `.local.env` with the content:
|
||||
```bash
|
||||
export WEBVIEW_LIB_DIR=$HOME/Projects/webview/build/core
|
||||
```
|
||||
where `WEBVIEW_LIB_DIR` points to a local checkout of the webview lib source, that has been build by hand. The `.local.env` file will be automatically sourced if it exists and will be ignored by git.
|
||||
|
||||
### Profiling
|
||||
|
||||
To activate profiling you can run
|
||||
@@ -111,51 +123,3 @@ To activate profiling you can run
|
||||
CLAN_CLI_PERF=1 ./bin/clan-app
|
||||
```
|
||||
|
||||
### Library Components
|
||||
|
||||
> Note:
|
||||
>
|
||||
> we recognized bugs when starting some cli-commands through the integrated vs-code terminal.
|
||||
> If encountering issues make sure to run commands in a regular os-shell.
|
||||
|
||||
lib-Adw has a demo application showing all widgets. You can run it by executing
|
||||
|
||||
```bash
|
||||
adwaita-1-demo
|
||||
```
|
||||
|
||||
GTK4 has a demo application showing all widgets. You can run it by executing
|
||||
|
||||
```bash
|
||||
gtk4-widget-factory
|
||||
```
|
||||
|
||||
To find available icons execute
|
||||
|
||||
```bash
|
||||
gtk4-icon-browser
|
||||
```
|
||||
|
||||
### Links
|
||||
|
||||
Here are some important documentation links related to the Clan App:
|
||||
|
||||
- [GTK4 PyGobject Reference](http://lazka.github.io/pgi-docs/index.html#Gtk-4.0): This link provides the PyGObject reference documentation for GTK4, the toolkit used for building the user interface of the clan app. It includes information about GTK4 widgets, signals, and other features.
|
||||
|
||||
- [Adw Widget Gallery](https://gnome.pages.gitlab.gnome.org/libadwaita/doc/main/widget-gallery.html): This link showcases a widget gallery for Adw, allowing you to see the available widgets and their visual appearance. It can be helpful for designing the user interface of the clan app.
|
||||
|
||||
- [GNOME Human Interface Guidelines](https://developer.gnome.org/hig/): This link provides the GNOME Human Interface Guidelines, which offer design and usability recommendations for creating GNOME applications. It covers topics such as layout, navigation, and interaction patterns.
|
||||
|
||||
## Error handling
|
||||
|
||||
> Error dialogs should be avoided where possible, since they are disruptive.
|
||||
>
|
||||
> For simple non-critical errors, toasts can be a good alternative.
|
||||
|
||||
|
||||
[direnv]: https://direnv.net/
|
||||
[process-compose]: https://f1bonacc1.github.io/process-compose/
|
||||
[vite]: https://vite.dev/
|
||||
[webview]: https://github.com/webview/webview
|
||||
[Storybook]: https://storybook.js.org/
|
||||
[webkit]: https://webkit.org/
|
||||
@@ -8,7 +8,7 @@ from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
import clan_lib.machines.actions # noqa: F401
|
||||
from clan_lib.api import API, ErrorDataClass, SuccessDataClass
|
||||
from clan_lib.api import API, tasks
|
||||
|
||||
# TODO: We have to manually import python files to make the API.register be triggered.
|
||||
# We NEED to fix this, as this is super unintuitive and error-prone.
|
||||
@@ -46,45 +46,16 @@ def app_run(app_opts: ClanAppOptions) -> int:
|
||||
webview = Webview(debug=app_opts.debug)
|
||||
webview.title = "Clan App"
|
||||
# This seems to call the gtk api correctly but and gtk also seems to our icon, but somehow the icon is not loaded.
|
||||
webview.icon = "clan-white"
|
||||
|
||||
# Init LogManager global in log_manager_api module
|
||||
log_manager_api.LOG_MANAGER_INSTANCE = LogManager(
|
||||
base_dir=user_data_dir() / "clan-app" / "logs"
|
||||
)
|
||||
|
||||
def cancel_task(
|
||||
task_id: str, *, op_key: str
|
||||
) -> SuccessDataClass[None] | ErrorDataClass:
|
||||
"""Cancel a task by its op_key."""
|
||||
log.debug(f"Cancelling task with op_key: {task_id}")
|
||||
future = webview.threads.get(task_id)
|
||||
if future:
|
||||
future.stop_event.set()
|
||||
log.debug(f"Task {task_id} cancelled.")
|
||||
else:
|
||||
log.warning(f"Task {task_id} not found.")
|
||||
return SuccessDataClass(
|
||||
op_key=op_key,
|
||||
data=None,
|
||||
status="success",
|
||||
)
|
||||
# Init BAKEND_THREADS in tasks module
|
||||
tasks.BAKEND_THREADS = webview.threads
|
||||
|
||||
def list_tasks(
|
||||
*,
|
||||
op_key: str,
|
||||
) -> SuccessDataClass[list[str]] | ErrorDataClass:
|
||||
"""List all tasks."""
|
||||
log.debug("Listing all tasks.")
|
||||
tasks = list(webview.threads.keys())
|
||||
return SuccessDataClass(
|
||||
op_key=op_key,
|
||||
data=tasks,
|
||||
status="success",
|
||||
)
|
||||
|
||||
API.overwrite_fn(list_tasks)
|
||||
API.overwrite_fn(open_file)
|
||||
API.overwrite_fn(cancel_task)
|
||||
webview.bind_jsonschema_api(API, log_manager=log_manager_api.LOG_MANAGER_INSTANCE)
|
||||
webview.size = Size(1280, 1024, SizeHint.NONE)
|
||||
webview.navigate(content_uri)
|
||||
|
||||
@@ -88,9 +88,6 @@ class _WebviewLibrary:
|
||||
self.webview_set_title = self.lib.webview_set_title
|
||||
self.webview_set_title.argtypes = [c_void_p, c_char_p]
|
||||
|
||||
self.webview_set_icon = self.lib.webview_set_icon
|
||||
self.webview_set_icon.argtypes = [c_void_p, c_char_p]
|
||||
|
||||
self.webview_set_size = self.lib.webview_set_size
|
||||
self.webview_set_size.argtypes = [c_void_p, c_int, c_int, c_int]
|
||||
|
||||
@@ -112,6 +109,8 @@ class _WebviewLibrary:
|
||||
self.webview_return = self.lib.webview_return
|
||||
self.webview_return.argtypes = [c_void_p, c_char_p, c_int, c_char_p]
|
||||
|
||||
self.binding_callback_t = CFUNCTYPE(None, c_char_p, c_char_p, c_void_p)
|
||||
|
||||
self.CFUNCTYPE = CFUNCTYPE
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import ctypes
|
||||
import functools
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass
|
||||
from enum import IntEnum
|
||||
from typing import Any
|
||||
|
||||
@@ -16,6 +14,7 @@ from clan_lib.api import (
|
||||
dataclass_to_dict,
|
||||
from_dict,
|
||||
)
|
||||
from clan_lib.api.tasks import WebThread
|
||||
from clan_lib.async_run import AsyncContext, get_async_ctx, set_async_ctx
|
||||
from clan_lib.custom_logger import setup_logging
|
||||
from clan_lib.log_manager import LogManager
|
||||
@@ -44,12 +43,6 @@ class Size:
|
||||
self.hint = hint
|
||||
|
||||
|
||||
@dataclass
|
||||
class WebThread:
|
||||
thread: threading.Thread
|
||||
stop_event: threading.Event
|
||||
|
||||
|
||||
class Webview:
|
||||
def __init__(
|
||||
self, debug: bool = False, size: Size | None = None, window: int | None = None
|
||||
@@ -73,21 +66,26 @@ class Webview:
|
||||
) -> None:
|
||||
op_key = op_key_bytes.decode()
|
||||
args = json.loads(request_data.decode())
|
||||
log.debug(f"Calling {method_name}({args[0]})")
|
||||
log.debug(f"Calling {method_name}({args})")
|
||||
header: dict[str, Any]
|
||||
|
||||
try:
|
||||
# Initialize dataclasses from the payload
|
||||
reconciled_arguments = {}
|
||||
for k, v in args[0].items():
|
||||
# Some functions expect to be called with dataclass instances
|
||||
# But the js api returns dictionaries.
|
||||
# Introspect the function and create the expected dataclass from dict dynamically
|
||||
# Depending on the introspected argument_type
|
||||
arg_class = api.get_method_argtype(method_name, k)
|
||||
if len(args) > 1:
|
||||
header = args[1]
|
||||
for k, v in args[0].items():
|
||||
# Some functions expect to be called with dataclass instances
|
||||
# But the js api returns dictionaries.
|
||||
# Introspect the function and create the expected dataclass from dict dynamically
|
||||
# Depending on the introspected argument_type
|
||||
arg_class = api.get_method_argtype(method_name, k)
|
||||
|
||||
# TODO: rename from_dict into something like construct_checked_value
|
||||
# from_dict really takes Anything and returns an instance of the type/class
|
||||
reconciled_arguments[k] = from_dict(arg_class, v)
|
||||
# TODO: rename from_dict into something like construct_checked_value
|
||||
# from_dict really takes Anything and returns an instance of the type/class
|
||||
reconciled_arguments[k] = from_dict(arg_class, v)
|
||||
elif len(args) == 1:
|
||||
header = args[0]
|
||||
|
||||
reconciled_arguments["op_key"] = op_key
|
||||
except Exception as e:
|
||||
@@ -112,8 +110,16 @@ class Webview:
|
||||
def thread_task(stop_event: threading.Event) -> None:
|
||||
ctx: AsyncContext = get_async_ctx()
|
||||
ctx.should_cancel = lambda: stop_event.is_set()
|
||||
# If the API call has set log_group in metadata,
|
||||
# create the log file under that group.
|
||||
log_group = header.get("logging", {}).get("group", None)
|
||||
if log_group is not None:
|
||||
log.warning(
|
||||
f"Using log group {log_group} for {method_name} with op_key {op_key}"
|
||||
)
|
||||
|
||||
log_file = log_manager.create_log_file(
|
||||
wrap_method, op_key=op_key
|
||||
wrap_method, op_key=op_key, group=log_group
|
||||
).get_file_path()
|
||||
|
||||
with log_file.open("ab") as log_f:
|
||||
@@ -129,15 +135,15 @@ class Webview:
|
||||
handler = setup_logging(
|
||||
log.getEffectiveLevel(), log_file=handler_stream
|
||||
)
|
||||
log.info("Starting thread for webview API call")
|
||||
|
||||
try:
|
||||
# Original logic: call the wrapped API method.
|
||||
result = wrap_method(**reconciled_arguments)
|
||||
wrapped_result = {"body": dataclass_to_dict(result), "header": {}}
|
||||
|
||||
# Serialize the result to JSON.
|
||||
serialized = json.dumps(
|
||||
dataclass_to_dict(result), indent=4, ensure_ascii=False
|
||||
dataclass_to_dict(wrapped_result), indent=4, ensure_ascii=False
|
||||
)
|
||||
|
||||
# This log message will now also be written to log_f
|
||||
@@ -204,15 +210,6 @@ class Webview:
|
||||
_webview_lib.webview_set_title(self._handle, _encode_c_string(value))
|
||||
self._title = value
|
||||
|
||||
@property
|
||||
def icon(self) -> str:
|
||||
return self._icon
|
||||
|
||||
@icon.setter
|
||||
def icon(self, value: str) -> None:
|
||||
_webview_lib.webview_set_icon(self._handle, _encode_c_string(value))
|
||||
self._icon = value
|
||||
|
||||
def destroy(self) -> None:
|
||||
for name in list(self._callbacks.keys()):
|
||||
self.unbind(name)
|
||||
@@ -237,9 +234,7 @@ class Webview:
|
||||
name,
|
||||
method,
|
||||
)
|
||||
c_callback = _webview_lib.CFUNCTYPE(
|
||||
None, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p
|
||||
)(wrapper)
|
||||
c_callback = _webview_lib.binding_callback_t(wrapper)
|
||||
|
||||
if name in self._callbacks:
|
||||
msg = f"Callback {name} already exists. Skipping binding."
|
||||
@@ -261,9 +256,7 @@ class Webview:
|
||||
success = False
|
||||
self.return_(seq.decode(), 0 if success else 1, json.dumps(result))
|
||||
|
||||
c_callback = _webview_lib.CFUNCTYPE(
|
||||
None, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p
|
||||
)(wrapper)
|
||||
c_callback = _webview_lib.binding_callback_t(wrapper)
|
||||
self._callbacks[name] = c_callback
|
||||
_webview_lib.webview_bind(
|
||||
self._handle, _encode_c_string(name), c_callback, None
|
||||
|
||||
5
pkgs/clan-app/pygdb.sh
Executable file
5
pkgs/clan-app/pygdb.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
PYTHON_DIR=$(dirname "$(which python3)")/..
|
||||
gdb --quiet -ex "source $PYTHON_DIR/share/gdb/libpython.py" --ex "sharedlib $WEBVIEW_LIB_DIR/libwebview.so" --ex "run" --args python "$@"
|
||||
@@ -89,9 +89,10 @@ mkShell {
|
||||
popd
|
||||
|
||||
# configure process-compose
|
||||
if test -f "$GIT_ROOT/pkgs/clan-app/.local.env"; then
|
||||
source "$GIT_ROOT/pkgs/clan-app/.local.env"
|
||||
if test -f "$CLAN_CORE_PATH/pkgs/clan-app/.local.env"; then
|
||||
source "$CLAN_CORE_PATH/pkgs/clan-app/.local.env"
|
||||
fi
|
||||
|
||||
export PC_CONFIG_FILES="$CLAN_CORE_PATH/pkgs/clan-app/process-compose.yaml"
|
||||
|
||||
echo -e "${GREEN}To launch a qemu VM for testing, run:\n start-vm <number of VMs>${NC}"
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
{
|
||||
"name": "@clan/ui",
|
||||
"version": "0.0.1",
|
||||
"description": "",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"start": "vite",
|
||||
"dev": "vite",
|
||||
"build": "npm run check && npm run test && vite build && npm run convert-html",
|
||||
"convert-html": "node gtk.webview.js",
|
||||
"serve": "vite preview",
|
||||
"check": "tsc --noEmit --skipLibCheck && eslint ./src --fix",
|
||||
"knip": "knip --fix",
|
||||
"test": "vitest run --project unit --typecheck",
|
||||
"storybook": "storybook",
|
||||
"storybook-build": "storybook build",
|
||||
"storybook-dev": "storybook dev -p 6006",
|
||||
"test-storybook": "vitest run --project storybook",
|
||||
"test-storybook-update-snapshots": "vitest run --project storybook --update",
|
||||
"test-storybook-static": "npm run storybook-build && concurrently -k -s first -n 'SB,TEST' -c 'magenta,blue' 'npx http-server storybook-static --port 6006 --silent' 'npx wait-on tcp:127.0.0.1:6006 && npm run test-storybook'"
|
||||
},
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.3.0",
|
||||
"@kachurun/storybook-solid": "^9.0.11",
|
||||
"@kachurun/storybook-solid-vite": "^9.0.11",
|
||||
"@storybook/addon-a11y": "^9.0.8",
|
||||
"@storybook/addon-docs": "^9.0.8",
|
||||
"@storybook/addon-links": "^9.0.8",
|
||||
"@storybook/addon-onboarding": "^9.0.8",
|
||||
"@storybook/addon-vitest": "^9.0.8",
|
||||
"@tailwindcss/typography": "^0.5.13",
|
||||
"@types/json-schema": "^7.0.15",
|
||||
"@types/node": "^22.15.19",
|
||||
"@vitest/browser": "^3.2.3",
|
||||
"autoprefixer": "^10.4.19",
|
||||
"classnames": "^2.5.1",
|
||||
"concurrently": "^9.1.2",
|
||||
"eslint": "^9.27.0",
|
||||
"eslint-plugin-tailwindcss": "^3.17.0",
|
||||
"jsdom": "^26.1.0",
|
||||
"knip": "^5.61.2",
|
||||
"postcss": "^8.4.38",
|
||||
"prettier": "^3.2.5",
|
||||
"solid-devtools": "^0.34.0",
|
||||
"storybook": "^9.0.8",
|
||||
"tailwindcss": "^4.0.0",
|
||||
"typescript": "^5.4.5",
|
||||
"typescript-eslint": "^8.32.1",
|
||||
"vite": "^7.0.0",
|
||||
"vite-plugin-solid": "^2.8.2",
|
||||
"vite-plugin-solid-svg": "^0.8.1",
|
||||
"vitest": "^3.2.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"@floating-ui/dom": "^1.6.8",
|
||||
"@kobalte/core": "^0.13.10",
|
||||
"@kobalte/tailwindcss": "^0.9.0",
|
||||
"@modular-forms/solid": "^0.25.1",
|
||||
"@solid-primitives/storage": "^4.3.2",
|
||||
"@solidjs/router": "^0.15.3",
|
||||
"@tanstack/eslint-plugin-query": "^5.51.12",
|
||||
"@tanstack/solid-query": "^5.76.0",
|
||||
"corvu": "^0.7.1",
|
||||
"nanoid": "^5.0.7",
|
||||
"solid-js": "^1.9.7",
|
||||
"solid-toast": "^0.5.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@esbuild/darwin-arm64": "^0.25.4",
|
||||
"@esbuild/darwin-x64": "^0.25.4",
|
||||
"@esbuild/linux-arm64": "^0.25.4",
|
||||
"@esbuild/linux-x64": "^0.25.4"
|
||||
},
|
||||
"overrides": {
|
||||
"vite": {
|
||||
"rollup": "npm:@rollup/wasm-node@^4.34.9"
|
||||
},
|
||||
"@rollup/rollup-darwin-x64": "npm:@rollup/wasm-node@^4.34.9",
|
||||
"@rollup/rollup-linux-x64": "npm:@rollup/wasm-node@^4.34.9",
|
||||
"@rollup/rollup-darwin-arm64": "npm:@rollup/wasm-node@^4.34.9",
|
||||
"@rollup/rollup-linux-arm64": "npm:@rollup/wasm-node@^4.34.9"
|
||||
}
|
||||
}
|
||||
1
pkgs/clan-app/ui-2d/package.json
Symbolic link
1
pkgs/clan-app/ui-2d/package.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../ui/package.json
|
||||
@@ -19,7 +19,6 @@ import {
|
||||
} from "@/src/components/inputBase";
|
||||
import { FieldLayout } from "./layout";
|
||||
import Icon from "@/src/components/icon";
|
||||
import { useContext } from "corvu/dialog";
|
||||
|
||||
interface Option {
|
||||
value: string;
|
||||
@@ -51,9 +50,6 @@ interface SelectInputpProps {
|
||||
}
|
||||
|
||||
export function SelectInput(props: SelectInputpProps) {
|
||||
const dialogContext = (dialogContextId?: string) =>
|
||||
useContext(dialogContextId);
|
||||
|
||||
const _id = createUniqueId();
|
||||
|
||||
const [reference, setReference] = createSignal<HTMLElement>();
|
||||
|
||||
@@ -23,37 +23,85 @@ export type SuccessQuery<T extends OperationNames> = Extract<
|
||||
>;
|
||||
export type SuccessData<T extends OperationNames> = SuccessQuery<T>["data"];
|
||||
|
||||
function isMachine(obj: unknown): obj is Machine {
|
||||
return (
|
||||
!!obj &&
|
||||
typeof obj === "object" &&
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
typeof (obj as any).name === "string" &&
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
typeof (obj as any).flake === "object" &&
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
typeof (obj as any).flake.identifier === "string"
|
||||
);
|
||||
}
|
||||
|
||||
// Machine type with flake for API calls
|
||||
interface Machine {
|
||||
name: string;
|
||||
flake: {
|
||||
identifier: string;
|
||||
};
|
||||
}
|
||||
|
||||
interface BackendOpts {
|
||||
logging?: { group: string | Machine };
|
||||
}
|
||||
|
||||
interface BackendReturnType<K extends OperationNames> {
|
||||
body: OperationResponse<K>;
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
header: Record<string, any>;
|
||||
}
|
||||
|
||||
const _callApi = <K extends OperationNames>(
|
||||
method: K,
|
||||
args: OperationArgs<K>,
|
||||
): { promise: Promise<OperationResponse<K>>; op_key: string } => {
|
||||
backendOpts?: BackendOpts,
|
||||
): { promise: Promise<BackendReturnType<K>>; op_key: string } => {
|
||||
// if window[method] does not exist, throw an error
|
||||
if (!(method in window)) {
|
||||
console.error(`Method ${method} not found on window object`);
|
||||
// return a rejected promise
|
||||
return {
|
||||
promise: Promise.resolve({
|
||||
status: "error",
|
||||
errors: [
|
||||
{
|
||||
message: `Method ${method} not found on window object`,
|
||||
code: "method_not_found",
|
||||
},
|
||||
],
|
||||
op_key: "noop",
|
||||
body: {
|
||||
status: "error",
|
||||
errors: [
|
||||
{
|
||||
message: `Method ${method} not found on window object`,
|
||||
code: "method_not_found",
|
||||
},
|
||||
],
|
||||
op_key: "noop",
|
||||
},
|
||||
header: {},
|
||||
}),
|
||||
op_key: "noop",
|
||||
};
|
||||
}
|
||||
|
||||
let header: BackendOpts = {};
|
||||
if (backendOpts != undefined) {
|
||||
header = { ...backendOpts };
|
||||
const group = backendOpts?.logging?.group;
|
||||
if (group != undefined && isMachine(group)) {
|
||||
header = {
|
||||
logging: { group: group.flake.identifier + "#" + group.name },
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const promise = (
|
||||
window as unknown as Record<
|
||||
OperationNames,
|
||||
(
|
||||
args: OperationArgs<OperationNames>,
|
||||
) => Promise<OperationResponse<OperationNames>>
|
||||
metadata: BackendOpts,
|
||||
) => Promise<BackendReturnType<OperationNames>>
|
||||
>
|
||||
)[method](args) as Promise<OperationResponse<K>>;
|
||||
)[method](args, header) as Promise<BackendReturnType<K>>;
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const op_key = (promise as any)._webviewMessageId as string;
|
||||
@@ -63,7 +111,7 @@ const _callApi = <K extends OperationNames>(
|
||||
|
||||
const handleCancel = async <K extends OperationNames>(
|
||||
ops_key: string,
|
||||
orig_task: Promise<OperationResponse<K>>,
|
||||
orig_task: Promise<BackendReturnType<K>>,
|
||||
) => {
|
||||
console.log("Canceling operation: ", ops_key);
|
||||
const { promise, op_key } = _callApi("cancel_task", { task_id: ops_key });
|
||||
@@ -83,7 +131,7 @@ const handleCancel = async <K extends OperationNames>(
|
||||
});
|
||||
const resp = await promise;
|
||||
|
||||
if (resp.status === "error") {
|
||||
if (resp.body.status === "error") {
|
||||
toast.custom(
|
||||
(t) => (
|
||||
<ErrorToastComponent
|
||||
@@ -105,10 +153,11 @@ const handleCancel = async <K extends OperationNames>(
|
||||
export const callApi = <K extends OperationNames>(
|
||||
method: K,
|
||||
args: OperationArgs<K>,
|
||||
backendOpts?: BackendOpts,
|
||||
): { promise: Promise<OperationResponse<K>>; op_key: string } => {
|
||||
console.log("Calling API", method, args);
|
||||
console.log("Calling API", method, args, backendOpts);
|
||||
|
||||
const { promise, op_key } = _callApi(method, args);
|
||||
const { promise, op_key } = _callApi(method, args, backendOpts);
|
||||
promise.catch((error) => {
|
||||
toast.custom(
|
||||
(t) => (
|
||||
@@ -146,13 +195,14 @@ export const callApi = <K extends OperationNames>(
|
||||
console.log("Not printing toast because operation was cancelled");
|
||||
}
|
||||
|
||||
if (response.status === "error" && !cancelled) {
|
||||
const body = response.body;
|
||||
if (body.status === "error" && !cancelled) {
|
||||
toast.remove(toastId);
|
||||
toast.custom(
|
||||
(t) => (
|
||||
<ErrorToastComponent
|
||||
t={t}
|
||||
message={"Error: " + response.errors[0].message}
|
||||
message={"Error: " + body.errors[0].message}
|
||||
/>
|
||||
),
|
||||
{
|
||||
@@ -162,7 +212,8 @@ export const callApi = <K extends OperationNames>(
|
||||
} else {
|
||||
toast.remove(toastId);
|
||||
}
|
||||
return response;
|
||||
return body;
|
||||
});
|
||||
|
||||
return { promise: new_promise, op_key: op_key };
|
||||
};
|
||||
|
||||
@@ -61,7 +61,7 @@ export const ApiTester = () => {
|
||||
return await callApi(
|
||||
values.endpoint as keyof API,
|
||||
JSON.parse(values.payload || "{}"),
|
||||
);
|
||||
).promise;
|
||||
},
|
||||
staleTime: Infinity,
|
||||
enabled: false,
|
||||
|
||||
@@ -27,5 +27,5 @@
|
||||
}
|
||||
|
||||
.button--dark-active:active {
|
||||
@apply active:border-secondary-900 active:shadow-button-primary-active;
|
||||
@apply active:border-secondary-900;
|
||||
}
|
||||
|
||||
@@ -7,5 +7,5 @@
|
||||
}
|
||||
|
||||
.button--ghost-active:active {
|
||||
@apply active:bg-secondary-200 active:text-secondary-900 active:shadow-button-primary-active;
|
||||
@apply active:bg-secondary-200 active:text-secondary-900;
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
}
|
||||
|
||||
.button--light-active:active {
|
||||
@apply active:bg-secondary-200 border-secondary-600 active:text-secondary-900 active:shadow-button-primary-active;
|
||||
@apply active:bg-secondary-200 border-secondary-600 active:text-secondary-900;
|
||||
|
||||
box-shadow: inset 2px 2px theme(backgroundColor.secondary.300);
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ const defaultRemoteData: RemoteData = {
|
||||
private_key: undefined,
|
||||
password: "",
|
||||
forward_agent: true,
|
||||
host_key_check: 0,
|
||||
host_key_check: "strict",
|
||||
verbose_ssh: false,
|
||||
ssh_options: {},
|
||||
tor_socks: false,
|
||||
@@ -32,7 +32,7 @@ const sampleRemoteData: RemoteData = {
|
||||
private_key: undefined,
|
||||
password: "",
|
||||
forward_agent: true,
|
||||
host_key_check: 1,
|
||||
host_key_check: "ask",
|
||||
verbose_ssh: false,
|
||||
ssh_options: {
|
||||
StrictHostKeyChecking: "no",
|
||||
@@ -238,7 +238,7 @@ const advancedRemoteData: RemoteData = {
|
||||
private_key: undefined,
|
||||
password: "",
|
||||
forward_agent: false,
|
||||
host_key_check: 2,
|
||||
host_key_check: "none",
|
||||
verbose_ssh: true,
|
||||
ssh_options: {
|
||||
ConnectTimeout: "10",
|
||||
|
||||
@@ -11,13 +11,6 @@ import { Loader } from "@/src/components/v2/Loader/Loader";
|
||||
import { Button } from "@/src/components/v2/Button/Button";
|
||||
import Accordion from "@/src/components/accordion";
|
||||
|
||||
// Define the HostKeyCheck enum values with proper API mapping
|
||||
export enum HostKeyCheck {
|
||||
ASK = 0,
|
||||
TOFU = 1,
|
||||
IGNORE = 2,
|
||||
}
|
||||
|
||||
// Export the API types for use in other components
|
||||
export type { RemoteData, Machine, RemoteDataSource };
|
||||
|
||||
@@ -185,40 +178,6 @@ export function RemoteForm(props: RemoteFormProps) {
|
||||
const [formData, setFormData] = createSignal<RemoteData | null>(null);
|
||||
const [isSaving, setIsSaving] = createSignal(false);
|
||||
|
||||
const hostKeyCheckOptions = [
|
||||
{ value: "ASK", label: "Ask" },
|
||||
{ value: "TOFU", label: "TOFU (Trust On First Use)" },
|
||||
{ value: "IGNORE", label: "Ignore" },
|
||||
];
|
||||
|
||||
// Helper function to convert enum name to numeric value
|
||||
const getHostKeyCheckValue = (name: string): number => {
|
||||
switch (name) {
|
||||
case "ASK":
|
||||
return HostKeyCheck.ASK;
|
||||
case "TOFU":
|
||||
return HostKeyCheck.TOFU;
|
||||
case "IGNORE":
|
||||
return HostKeyCheck.IGNORE;
|
||||
default:
|
||||
return HostKeyCheck.ASK;
|
||||
}
|
||||
};
|
||||
|
||||
// Helper function to convert numeric value to enum name
|
||||
const getHostKeyCheckName = (value: number | undefined): string => {
|
||||
switch (value) {
|
||||
case HostKeyCheck.ASK:
|
||||
return "ASK";
|
||||
case HostKeyCheck.TOFU:
|
||||
return "TOFU";
|
||||
case HostKeyCheck.IGNORE:
|
||||
return "IGNORE";
|
||||
default:
|
||||
return "ASK";
|
||||
}
|
||||
};
|
||||
|
||||
// Query host data when machine is provided
|
||||
const hostQuery = useQuery(() => ({
|
||||
queryKey: [
|
||||
@@ -241,11 +200,19 @@ export function RemoteForm(props: RemoteFormProps) {
|
||||
});
|
||||
}
|
||||
|
||||
const result = await callApi("get_host", {
|
||||
name: props.machine.name,
|
||||
flake: props.machine.flake,
|
||||
field: props.field || "targetHost",
|
||||
}).promise;
|
||||
const result = await callApi(
|
||||
"get_host",
|
||||
{
|
||||
name: props.machine.name,
|
||||
flake: props.machine.flake,
|
||||
field: props.field || "targetHost",
|
||||
},
|
||||
{
|
||||
logging: {
|
||||
group: { name: props.machine.name, flake: props.machine.flake },
|
||||
},
|
||||
},
|
||||
).promise;
|
||||
|
||||
if (result.status === "error")
|
||||
throw new Error("Failed to fetch host data");
|
||||
@@ -372,16 +339,13 @@ export function RemoteForm(props: RemoteFormProps) {
|
||||
|
||||
<SelectInput
|
||||
label="Host Key Check"
|
||||
value={getHostKeyCheckName(formData()?.host_key_check)}
|
||||
options={hostKeyCheckOptions}
|
||||
selectProps={{
|
||||
onInput: (e) =>
|
||||
updateFormData({
|
||||
host_key_check: getHostKeyCheckValue(
|
||||
e.currentTarget.value,
|
||||
) as 0 | 1 | 2 | 3,
|
||||
}),
|
||||
}}
|
||||
value={formData()?.host_key_check || "ask"}
|
||||
options={[
|
||||
{ value: "ask", label: "Ask" },
|
||||
{ value: "none", label: "None" },
|
||||
{ value: "strict", label: "Strict" },
|
||||
{ value: "tofu", label: "Trust on First Use" },
|
||||
]}
|
||||
disabled={computedDisabled}
|
||||
helperText="How to handle host key verification"
|
||||
/>
|
||||
|
||||
39
pkgs/clan-app/ui-2d/src/components/SimpleModal.tsx
Normal file
39
pkgs/clan-app/ui-2d/src/components/SimpleModal.tsx
Normal file
@@ -0,0 +1,39 @@
|
||||
import { JSX, Show } from "solid-js";
|
||||
|
||||
interface SimpleModalProps {
|
||||
open: boolean;
|
||||
onClose: () => void;
|
||||
title?: string;
|
||||
children: JSX.Element;
|
||||
}
|
||||
|
||||
export const SimpleModal = (props: SimpleModalProps) => {
|
||||
return (
|
||||
<Show when={props.open}>
|
||||
<div class="fixed inset-0 z-50 flex items-center justify-center">
|
||||
{/* Backdrop */}
|
||||
<div class="fixed inset-0 bg-black/50" onClick={props.onClose} />
|
||||
|
||||
{/* Modal Content */}
|
||||
<div class="relative mx-4 w-full max-w-md rounded-lg bg-white shadow-lg">
|
||||
{/* Header */}
|
||||
<Show when={props.title}>
|
||||
<div class="flex items-center justify-between border-b p-4">
|
||||
<h3 class="text-lg font-semibold">{props.title}</h3>
|
||||
<button
|
||||
type="button"
|
||||
class="text-gray-400 hover:text-gray-600"
|
||||
onClick={props.onClose}
|
||||
>
|
||||
×
|
||||
</button>
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Body */}
|
||||
<div>{props.children}</div>
|
||||
</div>
|
||||
</div>
|
||||
</Show>
|
||||
);
|
||||
};
|
||||
@@ -125,7 +125,7 @@ export const InputLabel = (props: InputLabelProps) => {
|
||||
weight="bold"
|
||||
class="inline-flex gap-1 align-middle !fg-def-1"
|
||||
classList={{
|
||||
[cx("!fg-semantic-1")]: !!props.error,
|
||||
[cx("!text-red-600")]: !!props.error,
|
||||
}}
|
||||
aria-invalid={props.error}
|
||||
>
|
||||
@@ -185,7 +185,7 @@ export const InputError = (props: InputErrorProps) => {
|
||||
// @ts-expect-error: Dependent type is to complex to check how it is coupled to the override for now
|
||||
size="xxs"
|
||||
weight="medium"
|
||||
class={cx("col-span-full px-1 !fg-semantic-4", typoClasses)}
|
||||
class={cx("col-span-full px-1 !text-red-500", typoClasses)}
|
||||
{...rest}
|
||||
>
|
||||
{props.error}
|
||||
|
||||
@@ -47,11 +47,15 @@ export const MachineListItem = (props: MachineListItemProps) => {
|
||||
);
|
||||
return;
|
||||
}
|
||||
const target_host = await callApi("get_host", {
|
||||
field: "targetHost",
|
||||
flake: { identifier: active_clan },
|
||||
name: name,
|
||||
}).promise;
|
||||
const target_host = await callApi(
|
||||
"get_host",
|
||||
{
|
||||
field: "targetHost",
|
||||
flake: { identifier: active_clan },
|
||||
name: name,
|
||||
},
|
||||
{ logging: { group: { name, flake: { identifier: active_clan } } } },
|
||||
).promise;
|
||||
|
||||
if (target_host.status == "error") {
|
||||
console.error("No target host found for the machine");
|
||||
@@ -79,7 +83,6 @@ export const MachineListItem = (props: MachineListItemProps) => {
|
||||
},
|
||||
no_reboot: true,
|
||||
debug: true,
|
||||
nix_options: [],
|
||||
password: null,
|
||||
},
|
||||
target_host: target_host.data!.data,
|
||||
@@ -104,11 +107,17 @@ export const MachineListItem = (props: MachineListItemProps) => {
|
||||
}
|
||||
setUpdating(true);
|
||||
|
||||
const target_host = await callApi("get_host", {
|
||||
field: "targetHost",
|
||||
flake: { identifier: active_clan },
|
||||
name: name,
|
||||
}).promise;
|
||||
const target_host = await callApi(
|
||||
"get_host",
|
||||
{
|
||||
field: "targetHost",
|
||||
flake: { identifier: active_clan },
|
||||
name: name,
|
||||
},
|
||||
{
|
||||
logging: { group: { name, flake: { identifier: active_clan } } },
|
||||
},
|
||||
).promise;
|
||||
|
||||
if (target_host.status == "error") {
|
||||
console.error("No target host found for the machine");
|
||||
@@ -125,11 +134,15 @@ export const MachineListItem = (props: MachineListItemProps) => {
|
||||
return;
|
||||
}
|
||||
|
||||
const build_host = await callApi("get_host", {
|
||||
field: "buildHost",
|
||||
flake: { identifier: active_clan },
|
||||
name: name,
|
||||
}).promise;
|
||||
const build_host = await callApi(
|
||||
"get_host",
|
||||
{
|
||||
field: "buildHost",
|
||||
flake: { identifier: active_clan },
|
||||
name: name,
|
||||
},
|
||||
{ logging: { group: { name, flake: { identifier: active_clan } } } },
|
||||
).promise;
|
||||
|
||||
if (build_host.status == "error") {
|
||||
console.error("No target host found for the machine");
|
||||
@@ -141,16 +154,20 @@ export const MachineListItem = (props: MachineListItemProps) => {
|
||||
return;
|
||||
}
|
||||
|
||||
await callApi("deploy_machine", {
|
||||
machine: {
|
||||
name: name,
|
||||
flake: {
|
||||
identifier: active_clan,
|
||||
await callApi(
|
||||
"deploy_machine",
|
||||
{
|
||||
machine: {
|
||||
name: name,
|
||||
flake: {
|
||||
identifier: active_clan,
|
||||
},
|
||||
},
|
||||
target_host: target_host.data!.data,
|
||||
build_host: build_host.data?.data || null,
|
||||
},
|
||||
target_host: target_host.data!.data,
|
||||
build_host: build_host.data?.data || null,
|
||||
}).promise;
|
||||
{ logging: { group: { name, flake: { identifier: active_clan } } } },
|
||||
).promise;
|
||||
|
||||
setUpdating(false);
|
||||
};
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
import Dialog from "corvu/dialog";
|
||||
import { createSignal, JSX } from "solid-js";
|
||||
import { Button } from "../Button/Button";
|
||||
import Icon from "../icon";
|
||||
import cx from "classnames";
|
||||
|
||||
interface ModalProps {
|
||||
open: boolean | undefined;
|
||||
handleClose: () => void;
|
||||
title: string;
|
||||
children: JSX.Element;
|
||||
class?: string;
|
||||
}
|
||||
export const Modal = (props: ModalProps) => {
|
||||
const [dragging, setDragging] = createSignal(false);
|
||||
const [startOffset, setStartOffset] = createSignal({ x: 0, y: 0 });
|
||||
|
||||
let dialogRef: HTMLDivElement;
|
||||
|
||||
const handleMouseDown = (e: MouseEvent) => {
|
||||
setDragging(true);
|
||||
const rect = dialogRef.getBoundingClientRect();
|
||||
setStartOffset({
|
||||
x: e.clientX - rect.left,
|
||||
y: e.clientY - rect.top,
|
||||
});
|
||||
};
|
||||
|
||||
const handleMouseMove = (e: MouseEvent) => {
|
||||
if (dragging()) {
|
||||
let newTop = e.clientY - startOffset().y;
|
||||
let newLeft = e.clientX - startOffset().x;
|
||||
|
||||
if (newTop < 0) {
|
||||
newTop = 0;
|
||||
}
|
||||
if (newLeft < 0) {
|
||||
newLeft = 0;
|
||||
}
|
||||
dialogRef.style.top = `${newTop}px`;
|
||||
dialogRef.style.left = `${newLeft}px`;
|
||||
|
||||
// dialogRef.style.maxHeight = `calc(100vh - ${newTop}px - 2rem)`;
|
||||
// dialogRef.style.maxHeight = `calc(100vh - ${newTop}px - 2rem)`;
|
||||
}
|
||||
};
|
||||
|
||||
const handleMouseUp = () => setDragging(false);
|
||||
|
||||
return (
|
||||
<Dialog open={props.open} trapFocus={true}>
|
||||
<Dialog.Portal>
|
||||
<Dialog.Overlay
|
||||
class="fixed inset-0 z-50 bg-black/50"
|
||||
onMouseMove={handleMouseMove}
|
||||
/>
|
||||
|
||||
<Dialog.Content
|
||||
class={cx(
|
||||
"overflow-hidden absolute left-1/3 top-1/3 z-50 min-w-[560px] rounded-md border border-def-4 focus-visible:outline-none",
|
||||
props.class,
|
||||
)}
|
||||
classList={{
|
||||
"!cursor-grabbing": dragging(),
|
||||
[cx("scale-[101%] transition-transform")]: dragging(),
|
||||
}}
|
||||
ref={(el) => {
|
||||
dialogRef = el;
|
||||
}}
|
||||
onMouseMove={handleMouseMove}
|
||||
onMouseUp={handleMouseUp}
|
||||
onMouseDown={(e: MouseEvent) => {
|
||||
e.stopPropagation(); // Prevent backdrop drag conflict
|
||||
}}
|
||||
onClick={(e: MouseEvent) => e.stopPropagation()} // Prevent backdrop click closing
|
||||
>
|
||||
<Dialog.Label
|
||||
as="div"
|
||||
class="flex w-full justify-center border-b-2 px-4 py-2 align-middle bg-def-3 border-def-4"
|
||||
onMouseDown={handleMouseDown}
|
||||
>
|
||||
<div
|
||||
class="flex w-full cursor-move flex-col gap-px py-1 "
|
||||
classList={{
|
||||
"!cursor-grabbing": dragging(),
|
||||
}}
|
||||
>
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
</div>
|
||||
<span class="mx-2 select-none whitespace-nowrap">
|
||||
{props.title}
|
||||
</span>
|
||||
<div
|
||||
class="flex w-full cursor-move flex-col gap-px py-1 "
|
||||
classList={{
|
||||
"!cursor-grabbing": dragging(),
|
||||
}}
|
||||
>
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
<hr class="h-px w-full border-none bg-secondary-300" />
|
||||
</div>
|
||||
|
||||
<div class="absolute right-1 top-2 pl-1 bg-def-3">
|
||||
<Button
|
||||
onMouseDown={(e) => e.stopPropagation()}
|
||||
tabIndex={-1}
|
||||
class="size-4"
|
||||
variant="ghost"
|
||||
onClick={() => props.handleClose()}
|
||||
size="s"
|
||||
startIcon={<Icon icon={"Close"} />}
|
||||
/>
|
||||
</div>
|
||||
</Dialog.Label>
|
||||
<Dialog.Description
|
||||
class="flex max-h-[90vh] flex-col overflow-y-hidden bg-def-1"
|
||||
as="div"
|
||||
>
|
||||
{props.children}
|
||||
</Dialog.Description>
|
||||
</Dialog.Content>
|
||||
</Dialog.Portal>
|
||||
</Dialog>
|
||||
);
|
||||
};
|
||||
@@ -1,4 +1,4 @@
|
||||
@import "material-icons/iconfont/filled.css";
|
||||
/* @import "material-icons/iconfont/filled.css"; */
|
||||
/* List of icons: https://marella.me/material-icons/demo/ */
|
||||
/* @import url(./components/Typography/css/typography.css); */
|
||||
|
||||
|
||||
@@ -19,10 +19,12 @@ import { createEffect, createSignal } from "solid-js"; // For, Show might not be
|
||||
import toast from "solid-toast";
|
||||
import { FieldLayout } from "@/src/Form/fields/layout";
|
||||
import { InputLabel } from "@/src/components/inputBase";
|
||||
import { Modal } from "@/src/components/modal";
|
||||
|
||||
import Fieldset from "@/src/Form/fieldset"; // Still used for other fieldsets
|
||||
import Accordion from "@/src/components/accordion";
|
||||
|
||||
import { SimpleModal } from "@/src/components/SimpleModal";
|
||||
|
||||
// Import the new generic component
|
||||
import {
|
||||
FileSelectorField,
|
||||
@@ -192,12 +194,11 @@ export const Flash = () => {
|
||||
return (
|
||||
<>
|
||||
<Header title="Flash installer" />
|
||||
<Modal
|
||||
<SimpleModal
|
||||
open={confirmOpen() || isFlashing()}
|
||||
handleClose={() => !isFlashing() && setConfirmOpen(false)}
|
||||
onClose={() => !isFlashing() && setConfirmOpen(false)}
|
||||
title="Confirm"
|
||||
>
|
||||
{/* ... Modal content as before ... */}
|
||||
<div class="flex flex-col gap-4 p-4">
|
||||
<div class="flex flex-col justify-between rounded-sm border p-4 align-middle text-red-900 border-def-2">
|
||||
<Typography
|
||||
@@ -230,7 +231,7 @@ export const Flash = () => {
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</Modal>
|
||||
</SimpleModal>
|
||||
<div class="w-full self-stretch p-8">
|
||||
<Form
|
||||
onSubmit={handleSubmit}
|
||||
|
||||
@@ -125,7 +125,6 @@ export function InstallMachine(props: InstallMachineProps) {
|
||||
machine: {
|
||||
name: props.name,
|
||||
flake: { identifier: curr_uri },
|
||||
private_key: values.sshKey?.name,
|
||||
},
|
||||
},
|
||||
target_host: targetHostResponse.data.data,
|
||||
|
||||
@@ -77,10 +77,18 @@ export function MachineForm(props: MachineFormProps) {
|
||||
if (!machine_name || !base_dir) {
|
||||
return [];
|
||||
}
|
||||
const result = await callApi("get_generators_closure", {
|
||||
base_dir: base_dir,
|
||||
machine_name: machine_name,
|
||||
}).promise;
|
||||
const result = await callApi(
|
||||
"get_generators_closure",
|
||||
{
|
||||
base_dir: base_dir,
|
||||
machine_name: machine_name,
|
||||
},
|
||||
{
|
||||
logging: {
|
||||
group: { name: machine_name, flake: { identifier: base_dir } },
|
||||
},
|
||||
},
|
||||
).promise;
|
||||
if (result.status === "error") throw new Error("Failed to fetch data");
|
||||
return result.data;
|
||||
},
|
||||
@@ -112,13 +120,19 @@ export function MachineForm(props: MachineFormProps) {
|
||||
return;
|
||||
}
|
||||
|
||||
const target = await callApi("get_host", {
|
||||
field: "targetHost",
|
||||
name: machine,
|
||||
flake: {
|
||||
identifier: curr_uri,
|
||||
const target = await callApi(
|
||||
"get_host",
|
||||
{
|
||||
field: "targetHost",
|
||||
name: machine,
|
||||
flake: {
|
||||
identifier: curr_uri,
|
||||
},
|
||||
},
|
||||
}).promise;
|
||||
{
|
||||
logging: { group: { name: machine, flake: { identifier: curr_uri } } },
|
||||
},
|
||||
).promise;
|
||||
|
||||
if (target.status === "error") {
|
||||
toast.error("Failed to get target host");
|
||||
@@ -132,18 +146,24 @@ export function MachineForm(props: MachineFormProps) {
|
||||
const target_host = target.data.data;
|
||||
|
||||
setIsUpdating(true);
|
||||
const r = await callApi("deploy_machine", {
|
||||
machine: {
|
||||
name: machine,
|
||||
flake: {
|
||||
identifier: curr_uri,
|
||||
const r = await callApi(
|
||||
"deploy_machine",
|
||||
{
|
||||
machine: {
|
||||
name: machine,
|
||||
flake: {
|
||||
identifier: curr_uri,
|
||||
},
|
||||
},
|
||||
target_host: {
|
||||
...target_host,
|
||||
},
|
||||
build_host: null,
|
||||
},
|
||||
target_host: {
|
||||
...target_host,
|
||||
{
|
||||
logging: { group: { name: machine, flake: { identifier: curr_uri } } },
|
||||
},
|
||||
build_host: null,
|
||||
}).promise.finally(() => {
|
||||
).promise.finally(() => {
|
||||
setIsUpdating(false);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -44,7 +44,7 @@ export const HWStep = (props: StepProps<HardwareValues>) => {
|
||||
command_prefix: "sudo",
|
||||
port: 22,
|
||||
forward_agent: false,
|
||||
host_key_check: 1, // 0 = ASK
|
||||
host_key_check: "ask", // 0 = ASK
|
||||
verbose_ssh: false,
|
||||
ssh_options: {},
|
||||
tor_socks: false,
|
||||
|
||||
@@ -149,11 +149,19 @@ export const VarsStep = (props: VarsStepProps) => {
|
||||
const generatorsQuery = createQuery(() => ({
|
||||
queryKey: [props.dir, props.machine_id, "generators", props.fullClosure],
|
||||
queryFn: async () => {
|
||||
const result = await callApi("get_generators_closure", {
|
||||
base_dir: props.dir,
|
||||
machine_name: props.machine_id,
|
||||
full_closure: props.fullClosure,
|
||||
}).promise;
|
||||
const result = await callApi(
|
||||
"get_generators_closure",
|
||||
{
|
||||
base_dir: props.dir,
|
||||
machine_name: props.machine_id,
|
||||
full_closure: props.fullClosure,
|
||||
},
|
||||
{
|
||||
logging: {
|
||||
group: { name: props.machine_id, flake: { identifier: props.dir } },
|
||||
},
|
||||
},
|
||||
).promise;
|
||||
if (result.status === "error") throw new Error("Failed to fetch data");
|
||||
return result.data;
|
||||
},
|
||||
|
||||
@@ -8,6 +8,7 @@ import Icon from "@/src/components/icon";
|
||||
import { Header } from "@/src/layout/header";
|
||||
import { makePersisted } from "@solid-primitives/storage";
|
||||
import { useClanContext } from "@/src/contexts/clan";
|
||||
import { debug } from "console";
|
||||
|
||||
type MachinesModel = Extract<
|
||||
OperationResponse<"list_machines">,
|
||||
@@ -38,6 +39,7 @@ export const MachineListView: Component = () => {
|
||||
},
|
||||
}).promise;
|
||||
console.log("response", response);
|
||||
|
||||
if (response.status === "error") {
|
||||
console.error("Failed to fetch data");
|
||||
} else {
|
||||
|
||||
@@ -1,26 +1,19 @@
|
||||
import { API, Error as ApiError } from "@/api/API";
|
||||
import { API } from "@/api/API";
|
||||
import { Schema as Inventory } from "@/api/Inventory";
|
||||
import { toast } from "solid-toast";
|
||||
import {
|
||||
ErrorToastComponent,
|
||||
CancelToastComponent,
|
||||
} from "@/src/components/toast";
|
||||
|
||||
type OperationNames = keyof API;
|
||||
type OperationArgs<T extends OperationNames> = API[T]["arguments"];
|
||||
export type OperationResponse<T extends OperationNames> = API[T]["return"];
|
||||
|
||||
type ApiEnvelope<T> =
|
||||
| {
|
||||
status: "success";
|
||||
data: T;
|
||||
op_key: string;
|
||||
}
|
||||
| ApiError;
|
||||
|
||||
type Services = NonNullable<Inventory["services"]>;
|
||||
type ServiceNames = keyof Services;
|
||||
type ClanService<T extends ServiceNames> = Services[T];
|
||||
type ClanServiceInstance<T extends ServiceNames> = NonNullable<
|
||||
|
||||
export type OperationArgs<T extends OperationNames> = API[T]["arguments"];
|
||||
export type OperationResponse<T extends OperationNames> = API[T]["return"];
|
||||
|
||||
export type ClanServiceInstance<T extends ServiceNames> = NonNullable<
|
||||
Services[T]
|
||||
>[string];
|
||||
|
||||
@@ -28,51 +21,87 @@ export type SuccessQuery<T extends OperationNames> = Extract<
|
||||
OperationResponse<T>,
|
||||
{ status: "success" }
|
||||
>;
|
||||
type SuccessData<T extends OperationNames> = SuccessQuery<T>["data"];
|
||||
export type SuccessData<T extends OperationNames> = SuccessQuery<T>["data"];
|
||||
|
||||
type ErrorQuery<T extends OperationNames> = Extract<
|
||||
OperationResponse<T>,
|
||||
{ status: "error" }
|
||||
>;
|
||||
type ErrorData<T extends OperationNames> = ErrorQuery<T>["errors"];
|
||||
|
||||
type ClanOperations = Record<OperationNames, (str: string) => void>;
|
||||
|
||||
interface GtkResponse<T> {
|
||||
result: T;
|
||||
op_key: string;
|
||||
function isMachine(obj: unknown): obj is Machine {
|
||||
return (
|
||||
!!obj &&
|
||||
typeof obj === "object" &&
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
typeof (obj as any).name === "string" &&
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
typeof (obj as any).flake === "object" &&
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
typeof (obj as any).flake.identifier === "string"
|
||||
);
|
||||
}
|
||||
|
||||
// Machine type with flake for API calls
|
||||
interface Machine {
|
||||
name: string;
|
||||
flake: {
|
||||
identifier: string;
|
||||
};
|
||||
}
|
||||
|
||||
interface BackendOpts {
|
||||
logging?: { group: string | Machine };
|
||||
}
|
||||
|
||||
interface BackendReturnType<K extends OperationNames> {
|
||||
body: OperationResponse<K>;
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
header: Record<string, any>;
|
||||
}
|
||||
|
||||
const _callApi = <K extends OperationNames>(
|
||||
method: K,
|
||||
args: OperationArgs<K>,
|
||||
): { promise: Promise<OperationResponse<K>>; op_key: string } => {
|
||||
backendOpts?: BackendOpts,
|
||||
): { promise: Promise<BackendReturnType<K>>; op_key: string } => {
|
||||
// if window[method] does not exist, throw an error
|
||||
if (!(method in window)) {
|
||||
console.error(`Method ${method} not found on window object`);
|
||||
// return a rejected promise
|
||||
return {
|
||||
promise: Promise.resolve({
|
||||
status: "error",
|
||||
errors: [
|
||||
{
|
||||
message: `Method ${method} not found on window object`,
|
||||
code: "method_not_found",
|
||||
},
|
||||
],
|
||||
op_key: "noop",
|
||||
body: {
|
||||
status: "error",
|
||||
errors: [
|
||||
{
|
||||
message: `Method ${method} not found on window object`,
|
||||
code: "method_not_found",
|
||||
},
|
||||
],
|
||||
op_key: "noop",
|
||||
},
|
||||
header: {},
|
||||
}),
|
||||
op_key: "noop",
|
||||
};
|
||||
}
|
||||
|
||||
let header: BackendOpts = {};
|
||||
if (backendOpts != undefined) {
|
||||
header = { ...backendOpts };
|
||||
const group = backendOpts?.logging?.group;
|
||||
if (group != undefined && isMachine(group)) {
|
||||
header = {
|
||||
logging: { group: group.flake.identifier + "#" + group.name },
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const promise = (
|
||||
window as unknown as Record<
|
||||
OperationNames,
|
||||
(
|
||||
args: OperationArgs<OperationNames>,
|
||||
) => Promise<OperationResponse<OperationNames>>
|
||||
metadata: BackendOpts,
|
||||
) => Promise<BackendReturnType<OperationNames>>
|
||||
>
|
||||
)[method](args) as Promise<OperationResponse<K>>;
|
||||
)[method](args, header) as Promise<BackendReturnType<K>>;
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const op_key = (promise as any)._webviewMessageId as string;
|
||||
@@ -82,7 +111,7 @@ const _callApi = <K extends OperationNames>(
|
||||
|
||||
const handleCancel = async <K extends OperationNames>(
|
||||
ops_key: string,
|
||||
orig_task: Promise<OperationResponse<K>>,
|
||||
orig_task: Promise<BackendReturnType<K>>,
|
||||
) => {
|
||||
console.log("Canceling operation: ", ops_key);
|
||||
const { promise, op_key } = _callApi("cancel_task", { task_id: ops_key });
|
||||
@@ -102,7 +131,7 @@ const handleCancel = async <K extends OperationNames>(
|
||||
});
|
||||
const resp = await promise;
|
||||
|
||||
if (resp.status === "error") {
|
||||
if (resp.body.status === "error") {
|
||||
toast.custom(
|
||||
(t) => (
|
||||
<ErrorToastComponent
|
||||
@@ -124,10 +153,11 @@ const handleCancel = async <K extends OperationNames>(
|
||||
export const callApi = <K extends OperationNames>(
|
||||
method: K,
|
||||
args: OperationArgs<K>,
|
||||
backendOpts?: BackendOpts,
|
||||
): { promise: Promise<OperationResponse<K>>; op_key: string } => {
|
||||
console.log("Calling API", method, args);
|
||||
console.log("Calling API", method, args, backendOpts);
|
||||
|
||||
const { promise, op_key } = _callApi(method, args);
|
||||
const { promise, op_key } = _callApi(method, args, backendOpts);
|
||||
promise.catch((error) => {
|
||||
toast.custom(
|
||||
(t) => (
|
||||
@@ -165,13 +195,14 @@ export const callApi = <K extends OperationNames>(
|
||||
console.log("Not printing toast because operation was cancelled");
|
||||
}
|
||||
|
||||
if (response.status === "error" && !cancelled) {
|
||||
const body = response.body;
|
||||
if (body.status === "error" && !cancelled) {
|
||||
toast.remove(toastId);
|
||||
toast.custom(
|
||||
(t) => (
|
||||
<ErrorToastComponent
|
||||
t={t}
|
||||
message={"Error: " + response.errors[0].message}
|
||||
message={"Error: " + body.errors[0].message}
|
||||
/>
|
||||
),
|
||||
{
|
||||
@@ -181,7 +212,8 @@ export const callApi = <K extends OperationNames>(
|
||||
} else {
|
||||
toast.remove(toastId);
|
||||
}
|
||||
return response;
|
||||
return body;
|
||||
});
|
||||
|
||||
return { promise: new_promise, op_key: op_key };
|
||||
};
|
||||
|
||||
@@ -8,13 +8,23 @@ pkgs.clangStdenv.mkDerivation {
|
||||
# We disallow remote connections from the UI on Linux
|
||||
# TODO: Disallow remote connections on MacOS
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "clan-lol";
|
||||
src = pkgs.fetchFromGitea {
|
||||
domain = "git.clan.lol";
|
||||
owner = "clan";
|
||||
repo = "webview";
|
||||
rev = "7d24f0192765b7e08f2d712fae90c046d08f318e";
|
||||
hash = "sha256-yokVI9tFiEEU5M/S2xAeJOghqqiCvTelLo8WLKQZsSY=";
|
||||
rev = "ef481aca8e531f6677258ca911c61aaaf71d2214";
|
||||
hash = "sha256-KF9ESpo40z6VXyYsZCLWJAIh0RFe1Zy/Qw4k7cTpoYU=";
|
||||
};
|
||||
|
||||
# @Mic92: Where is this revision coming from? I can't see it in any of the branches.
|
||||
# I removed the icon python code for now
|
||||
# src = pkgs.fetchFromGitHub {
|
||||
# owner = "clan-lol";
|
||||
# repo = "webview";
|
||||
# rev = "7d24f0192765b7e08f2d712fae90c046d08f318e";
|
||||
# hash = "sha256-yokVI9tFiEEU5M/S2xAeJOghqqiCvTelLo8WLKQZsSY=";
|
||||
# };
|
||||
|
||||
outputs = [
|
||||
"out"
|
||||
"dev"
|
||||
|
||||
2
pkgs/clan-cli/api.py
Normal file → Executable file
2
pkgs/clan-cli/api.py
Normal file → Executable file
@@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import importlib
|
||||
import json
|
||||
import pkgutil
|
||||
|
||||
@@ -42,7 +42,7 @@ class SshdConfig:
|
||||
def sshd_config(test_root: Path) -> Iterator[SshdConfig]:
|
||||
# FIXME, if any parent of the sshd directory is world-writable then sshd will refuse it.
|
||||
# we use .direnv instead since it's already in .gitignore
|
||||
with TemporaryDirectory(prefix="sshd-") as _dir:
|
||||
with TemporaryDirectory(prefix="sshd-", ignore_cleanup_errors=True) as _dir:
|
||||
tmpdir = Path(_dir)
|
||||
host_key = test_root / "data" / "ssh_host_ed25519_key"
|
||||
host_key.chmod(0o600)
|
||||
|
||||
@@ -91,13 +91,19 @@ def test_clan_core_templates(
|
||||
clan_core_templates = nix_attrset["inputs"][InputName("clan-core")]["templates"][
|
||||
"clan"
|
||||
]
|
||||
clan_core_template_keys = list(clan_core_templates.keys())
|
||||
clan_core_template_keys = set(clan_core_templates.keys())
|
||||
|
||||
expected_templates = ["default", "flake-parts", "minimal", "minimal-flake-parts"]
|
||||
expected_templates = set(
|
||||
{
|
||||
"default",
|
||||
"minimal",
|
||||
"flake-parts",
|
||||
}
|
||||
)
|
||||
assert clan_core_template_keys == expected_templates
|
||||
|
||||
vlist_temps = list_templates("clan", clan_dir)
|
||||
list_template_keys = list(vlist_temps.inputs[InputName("clan-core")].keys())
|
||||
list_template_keys = set(vlist_temps.inputs[InputName("clan-core")].keys())
|
||||
assert list_template_keys == expected_templates
|
||||
|
||||
default_template = get_template(
|
||||
@@ -112,14 +118,11 @@ def test_clan_core_templates(
|
||||
Path(default_template.src["path"]),
|
||||
new_clan,
|
||||
)
|
||||
assert (new_clan / "flake.nix").exists()
|
||||
assert (new_clan / "machines").is_dir()
|
||||
assert (new_clan / "machines" / "jon").is_dir()
|
||||
config_nix_p = new_clan / "machines" / "jon" / "configuration.nix"
|
||||
assert (config_nix_p).is_file()
|
||||
flake_file = new_clan / "flake.nix"
|
||||
|
||||
# Test if we can write to the configuration.nix file
|
||||
with config_nix_p.open("r+") as f:
|
||||
assert (flake_file).exists()
|
||||
# Test if we can read + write files after the template was copied
|
||||
with flake_file.open("r+") as f:
|
||||
data = f.read()
|
||||
f.write(data)
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ def test_create_flake(
|
||||
|
||||
cli.run(["flakes", "create", str(flake_dir), "--template=default", "--no-update"])
|
||||
|
||||
assert (flake_dir / ".clan-flake").exists()
|
||||
# Replace the inputs.clan.url in the template flake.nix
|
||||
# Replace the inputs.clan-core.url in the template flake.nix
|
||||
substitute(
|
||||
flake_dir / "flake.nix",
|
||||
clan_core,
|
||||
@@ -35,13 +34,6 @@ def test_create_flake(
|
||||
|
||||
cli.run(["machines", "create", "machine1"])
|
||||
|
||||
# create a hardware-configuration.nix that doesn't throw an eval error
|
||||
|
||||
for patch_machine in ["jon", "sara"]:
|
||||
(
|
||||
flake_dir / "machines" / f"{patch_machine}/hardware-configuration.nix"
|
||||
).write_text("{}")
|
||||
|
||||
with capture_output as output:
|
||||
cli.run(["machines", "list"])
|
||||
assert "machine1" in output.out
|
||||
@@ -68,8 +60,7 @@ def test_create_flake_existing_git(
|
||||
|
||||
cli.run(["flakes", "create", str(flake_dir), "--template=default", "--no-update"])
|
||||
|
||||
assert (flake_dir / ".clan-flake").exists()
|
||||
# Replace the inputs.clan.url in the template flake.nix
|
||||
# Replace the inputs.clan-core.url in the template flake.nix
|
||||
substitute(
|
||||
flake_dir / "flake.nix",
|
||||
clan_core,
|
||||
@@ -79,13 +70,6 @@ def test_create_flake_existing_git(
|
||||
monkeypatch.chdir(flake_dir)
|
||||
cli.run(["machines", "create", "machine1"])
|
||||
|
||||
# create a hardware-configuration.nix that doesn't throw an eval error
|
||||
|
||||
for patch_machine in ["jon", "sara"]:
|
||||
(
|
||||
flake_dir / "machines" / f"{patch_machine}/hardware-configuration.nix"
|
||||
).write_text("{}")
|
||||
|
||||
with capture_output as output:
|
||||
cli.run(["machines", "list"])
|
||||
assert "machine1" in output.out
|
||||
@@ -111,7 +95,7 @@ def test_ui_template(
|
||||
|
||||
cli.run(["flakes", "create", str(flake_dir), "--template=minimal", "--no-update"])
|
||||
|
||||
# Replace the inputs.clan.url in the template flake.nix
|
||||
# Replace the inputs.clan-core.url in the template flake.nix
|
||||
substitute(
|
||||
flake_dir / "flake.nix",
|
||||
clan_core,
|
||||
|
||||
@@ -1,15 +1,36 @@
|
||||
import logging
|
||||
import threading
|
||||
from dataclasses import dataclass
|
||||
|
||||
from clan_lib.api import API
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class WebThread:
|
||||
thread: threading.Thread
|
||||
stop_event: threading.Event
|
||||
|
||||
|
||||
BAKEND_THREADS: dict[str, WebThread] | None = None
|
||||
|
||||
|
||||
@API.register_abstract
|
||||
def cancel_task(task_id: str) -> None:
|
||||
"""Cancel a task by its op_key."""
|
||||
msg = "cancel_task() is not implemented"
|
||||
raise NotImplementedError(msg)
|
||||
assert BAKEND_THREADS is not None, "Backend threads not initialized"
|
||||
future = BAKEND_THREADS.get(task_id)
|
||||
if future:
|
||||
future.stop_event.set()
|
||||
log.debug(f"Task with id {task_id} has been cancelled.")
|
||||
else:
|
||||
msg = f"Task with id {task_id} not found."
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
@API.register_abstract
|
||||
@API.register
|
||||
def list_tasks() -> list[str]:
|
||||
"""List all tasks."""
|
||||
msg = "list_tasks() is not implemented"
|
||||
raise NotImplementedError(msg)
|
||||
assert BAKEND_THREADS is not None, "Backend threads not initialized"
|
||||
return list(BAKEND_THREADS.keys())
|
||||
|
||||
@@ -310,10 +310,13 @@ class LogManager:
|
||||
base_dir: Path
|
||||
|
||||
def create_log_file(
|
||||
self, func: Callable, op_key: str, group: str = "default"
|
||||
self, func: Callable, op_key: str, group: str | None = None
|
||||
) -> LogFile:
|
||||
now_utc = datetime.datetime.now(tz=datetime.UTC)
|
||||
|
||||
if group is None:
|
||||
group = "default"
|
||||
|
||||
log_file = LogFile(
|
||||
op_key=op_key,
|
||||
date_day=now_utc.strftime("%Y-%m-%d"),
|
||||
|
||||
@@ -64,7 +64,17 @@ def create_base_inventory(ssh_keys_pairs: list[SSHKeyPair]) -> InventoryWrapper:
|
||||
ssh_keys.append(InvSSHKeyEntry(f"user_{num}", ssh_key.public.read_text()))
|
||||
|
||||
"""Create the base inventory structure."""
|
||||
legacy_services: dict[str, Any] = {}
|
||||
legacy_services: dict[str, Any] = {
|
||||
"state-version": {
|
||||
"someid": {
|
||||
"roles": {
|
||||
"default": {
|
||||
"tags": ["all"],
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
instances = InventoryInstancesType(
|
||||
{
|
||||
|
||||
@@ -34,8 +34,9 @@ clan_lib = [
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests", "clan_cli", "clan_lib"]
|
||||
faulthandler_timeout = 240
|
||||
log_level = "DEBUG"
|
||||
log_format = "%(message)s"
|
||||
log_cli = true
|
||||
log_cli_level = "DEBUG"
|
||||
log_cli_format = "%(message)s"
|
||||
addopts = "--durations 5 --color=yes --new-first -W error -n auto" # Add --pdb for debugging
|
||||
norecursedirs = ["clan_cli/tests/helpers", "clan_lib/nixpkgs"]
|
||||
# All tests which evaluate any nix library code from clan-core need to use the
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
result
|
||||
result*
|
||||
.direnv/
|
||||
77
templates/clan/default/flake.nix
Normal file
77
templates/clan/default/flake.nix
Normal file
@@ -0,0 +1,77 @@
|
||||
{
|
||||
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
inputs.nixpkgs.follows = "clan-core/nixpkgs";
|
||||
|
||||
outputs =
|
||||
{ self, clan-core, ... }:
|
||||
let
|
||||
# Usage see: https://docs.clan.lol
|
||||
clan = clan-core.clanLib.buildClan {
|
||||
inherit self;
|
||||
|
||||
# Ensure this is unique among all clans you want to use.
|
||||
meta.name = "__CHANGE_ME__";
|
||||
|
||||
# Clan services to use. See https://docs.clan.lol/reference/clanServices
|
||||
inventory.instances = {
|
||||
admin = {
|
||||
roles.default.tags.all = { };
|
||||
roles.default.settings.allowedKeys = {
|
||||
# Insert the public key of all your admin machines
|
||||
# All these 'admin machines' will have ssh access to "tags.all" (all machines)
|
||||
# Alternatively set 'users.users.root.openssh.authorizedKeys.keys' in each machine
|
||||
"admin-machine-1" = "__YOUR_PUBLIC_KEY__";
|
||||
};
|
||||
};
|
||||
|
||||
zerotier = {
|
||||
# Replace with the name of your machine that you will use as zerotier-controller
|
||||
# See: https://docs.zerotier.com/controller/
|
||||
# Deploy this machine first to create the network secrets
|
||||
roles.controller.machines."__YOUR_CONTROLLER__" = { };
|
||||
# Peers of the network
|
||||
# tags.all means 'all machines' will joined
|
||||
roles.peer.tags.all = { };
|
||||
};
|
||||
};
|
||||
|
||||
# A mapping of machine names to their nixos configuration.
|
||||
# Allows specifying additional nixos configuration.
|
||||
machines = {
|
||||
somemachine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [ asciinema ];
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
# Expose clan structures as flake outputs. clanInternals is needed for
|
||||
# the clan-cli. Exposing nixosConfigurations allows using `nixos-rebuild` as before.
|
||||
inherit (clan)
|
||||
nixosConfigurations
|
||||
nixosModules
|
||||
clanInternals
|
||||
darwinConfigurations
|
||||
darwinModules
|
||||
;
|
||||
|
||||
# Add the Clan cli tool to the dev shell.
|
||||
# Use "nix develop" to enter the dev shell.
|
||||
devShells =
|
||||
clan-core.inputs.nixpkgs.lib.genAttrs
|
||||
[
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"aarch64-darwin"
|
||||
"x86_64-darwin"
|
||||
]
|
||||
(system: {
|
||||
default = clan-core.inputs.nixpkgs.legacyPackages.${system}.mkShell {
|
||||
packages = [ clan-core.packages.${system}.clan-cli ];
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
2
templates/clan/flake-parts/.gitignore
vendored
Normal file
2
templates/clan/flake-parts/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
result*
|
||||
.direnv/
|
||||
@@ -1,15 +1,11 @@
|
||||
{
|
||||
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
inputs.nixpkgs.follows = "clan-core/nixpkgs";
|
||||
inputs.flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
inputs.flake-parts.follows = "clan-core/flake-parts";
|
||||
inputs.flake-parts.inputs.nixpkgs-lib.follows = "clan-core/nixpkgs";
|
||||
|
||||
outputs =
|
||||
inputs@{
|
||||
self,
|
||||
flake-parts,
|
||||
...
|
||||
}:
|
||||
inputs@{ flake-parts, ... }:
|
||||
flake-parts.lib.mkFlake { inherit inputs; } {
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
@@ -18,84 +14,41 @@
|
||||
"aarch64-darwin"
|
||||
];
|
||||
imports = [ inputs.clan-core.flakeModules.default ];
|
||||
# https://docs.clan.lol/guides/getting-started/flake-parts/
|
||||
# https://docs.clan.lol/guides/flake-parts/
|
||||
clan = {
|
||||
meta.name = "__CHANGE_ME__"; # Ensure this is unique among all clans you want to use.
|
||||
# Ensure this is unique among all clans you want to use.
|
||||
meta.name = "__CHANGE_ME__";
|
||||
|
||||
inherit self;
|
||||
machines = {
|
||||
# "jon" will be the hostname of the machine
|
||||
jon =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
./modules/shared.nix
|
||||
./modules/disko.nix
|
||||
./machines/jon/configuration.nix
|
||||
];
|
||||
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
|
||||
# Set this for clan commands use ssh i.e. `clan machines update`
|
||||
# If you change the hostname, you need to update this line to root@<new-hostname>
|
||||
# This only works however if you have avahi running on your admin machine else use IP
|
||||
clan.core.networking.targetHost = pkgs.lib.mkDefault "root@jon";
|
||||
|
||||
# You can get your disk id by running the following command on the installer:
|
||||
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
|
||||
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
disko.devices.disk.main = {
|
||||
device = "/dev/disk/by-id/__CHANGE_ME__";
|
||||
};
|
||||
|
||||
# IMPORTANT! Add your SSH key here
|
||||
# e.g. > cat ~/.ssh/id_ed25519.pub
|
||||
users.users.root.openssh.authorizedKeys.keys = throw ''
|
||||
Don't forget to add your SSH key here!
|
||||
users.users.root.openssh.authorizedKeys.keys = [ "<YOUR SSH_KEY>" ]
|
||||
'';
|
||||
|
||||
# Zerotier needs one controller to accept new nodes. Once accepted
|
||||
# the controller can be offline and routing still works.
|
||||
clan.core.networking.zerotier.controller.enable = true;
|
||||
# Clan services to use. See https://docs.clan.lol/reference/clanServices
|
||||
inventory.instances = {
|
||||
admin = {
|
||||
roles.default.tags.all = { };
|
||||
roles.default.settings.allowedKeys = {
|
||||
# Insert the public key of all your admin machines
|
||||
# All these 'admin machines' will have ssh access to "tags.all" (all machines)
|
||||
# Alternatively set 'users.users.root.openssh.authorizedKeys.keys' in each machine
|
||||
"admin-machine-1" = "__YOUR_PUBLIC_KEY__";
|
||||
};
|
||||
# "sara" will be the hostname of the machine
|
||||
sara =
|
||||
};
|
||||
|
||||
zerotier = {
|
||||
# Replace with the name of your machine that you will use as zerotier-controller
|
||||
# See: https://docs.zerotier.com/controller/
|
||||
# Deploy this machine first to create the network secrets
|
||||
roles.controller.machines."__YOUR_CONTROLLER__" = { };
|
||||
# Peers of the network
|
||||
# tags.all means 'all machines' will joined
|
||||
roles.peer.tags.all = { };
|
||||
};
|
||||
};
|
||||
|
||||
# A mapping of machine names to their nixos configuration.
|
||||
# Allows specifying additional nixos configuration.
|
||||
machines = {
|
||||
somemachine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
./modules/shared.nix
|
||||
./modules/disko.nix
|
||||
./machines/sara/configuration.nix
|
||||
];
|
||||
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
|
||||
# Set this for clan commands use ssh i.e. `clan machines update`
|
||||
# If you change the hostname, you need to update this line to root@<new-hostname>
|
||||
# This only works however if you have avahi running on your admin machine else use IP
|
||||
clan.core.networking.targetHost = pkgs.lib.mkDefault "root@sara";
|
||||
|
||||
# You can get your disk id by running the following command on the installer:
|
||||
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
|
||||
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
disko.devices.disk.main = {
|
||||
device = "/dev/disk/by-id/__CHANGE_ME__";
|
||||
};
|
||||
|
||||
# IMPORTANT! Add your SSH key here
|
||||
# e.g. > cat ~/.ssh/id_ed25519.pub
|
||||
users.users.root.openssh.authorizedKeys.keys = throw ''
|
||||
Don't forget to add your SSH key here!
|
||||
users.users.root.openssh.authorizedKeys.keys = [ "<YOUR SSH_KEY>" ]
|
||||
'';
|
||||
|
||||
/*
|
||||
After jon is deployed, uncomment the following line
|
||||
This will allow sara to share the VPN overlay network with jon
|
||||
The networkId is generated by the first deployment of jon
|
||||
*/
|
||||
# clan.core.networking.zerotier.networkId = builtins.readFile ../../vars/per-machine/jon/zerotier/zerotier-network-id/value;
|
||||
environment.systemPackages = with pkgs; [ asciinema ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
{ config, ... }:
|
||||
let
|
||||
username = config.networking.hostName;
|
||||
in
|
||||
{
|
||||
imports = [ ./hardware-configuration.nix ];
|
||||
|
||||
# Locale service discovery and mDNS
|
||||
services.avahi.enable = true;
|
||||
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.gnome.enable = true;
|
||||
services.xserver.displayManager.gdm.enable = true;
|
||||
# Disable the default gnome apps to speed up deployment
|
||||
services.gnome.core-utilities.enable = false;
|
||||
|
||||
# Enable automatic login for the user.
|
||||
services.displayManager.autoLogin = {
|
||||
enable = true;
|
||||
user = username;
|
||||
};
|
||||
|
||||
users.users.${username} = {
|
||||
initialPassword = username;
|
||||
isNormalUser = true;
|
||||
extraGroups = [
|
||||
"wheel"
|
||||
"networkmanager"
|
||||
"video"
|
||||
"audio"
|
||||
"input"
|
||||
"dialout"
|
||||
"disk"
|
||||
];
|
||||
uid = 1000;
|
||||
openssh.authorizedKeys.keys = config.users.users.root.openssh.authorizedKeys.keys;
|
||||
};
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
{ config, ... }:
|
||||
|
||||
let
|
||||
username = config.networking.hostName;
|
||||
in
|
||||
{
|
||||
imports = [ ./hardware-configuration.nix ];
|
||||
|
||||
# Locale service discovery and mDNS
|
||||
services.avahi.enable = true;
|
||||
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.gnome.enable = true;
|
||||
services.xserver.displayManager.gdm.enable = true;
|
||||
# Disable the default gnome apps to speed up deployment
|
||||
services.gnome.core-utilities.enable = false;
|
||||
|
||||
# Enable automatic login for the user.
|
||||
services.displayManager.autoLogin = {
|
||||
enable = true;
|
||||
user = username;
|
||||
};
|
||||
|
||||
users.users.${username} = {
|
||||
initialPassword = username;
|
||||
isNormalUser = true;
|
||||
extraGroups = [
|
||||
"wheel"
|
||||
"networkmanager"
|
||||
"video"
|
||||
"audio"
|
||||
"input"
|
||||
"dialout"
|
||||
"disk"
|
||||
];
|
||||
uid = 1000;
|
||||
openssh.authorizedKeys.keys = config.users.users.root.openssh.authorizedKeys.keys;
|
||||
};
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
{ lib, clan-core, ... }:
|
||||
|
||||
let
|
||||
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
clan-core.clanModules.disk-id
|
||||
];
|
||||
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
disko.devices = {
|
||||
disk = {
|
||||
"main" = {
|
||||
# suffix is to prevent disk name collisions
|
||||
name = "main-" + suffix;
|
||||
type = "disk";
|
||||
# Set the following in flake.nix for each maschine:
|
||||
# device = <uuid>;
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
"boot" = {
|
||||
size = "1M";
|
||||
type = "EF02"; # for grub MBR
|
||||
priority = 1;
|
||||
};
|
||||
"ESP" = {
|
||||
size = "512M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
};
|
||||
};
|
||||
"root" = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
clan-core,
|
||||
# Optional, if you want to access other flakes:
|
||||
# self,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
clan-core.clanModules.sshd
|
||||
clan-core.clanModules.root-password
|
||||
# You can access other flakes imported in your flake via `self` like this:
|
||||
# self.inputs.nix-index-database.nixosModules.nix-index
|
||||
];
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
{ self, ... }:
|
||||
{
|
||||
perSystem =
|
||||
{
|
||||
self',
|
||||
lib,
|
||||
system,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks =
|
||||
let
|
||||
nixosMachines = lib.mapAttrs' (
|
||||
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
|
||||
) ((lib.filterAttrs (_: config: config.pkgs.system == system)) self.nixosConfigurations);
|
||||
|
||||
packages = lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages;
|
||||
devShells = lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells;
|
||||
in
|
||||
nixosMachines // packages // devShells;
|
||||
};
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
{ self, inputs, ... }:
|
||||
{
|
||||
imports = [
|
||||
inputs.clan.flakeModules.default
|
||||
];
|
||||
clan = {
|
||||
meta.name = "__CHANGE_ME__";
|
||||
inherit self;
|
||||
specialArgs = {
|
||||
inherit inputs;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
_: {
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
inputs',
|
||||
...
|
||||
}:
|
||||
{
|
||||
devShells = {
|
||||
default = pkgs.mkShellNoCC {
|
||||
packages = [
|
||||
inputs'.clan.packages.default
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
|
||||
inputs = {
|
||||
clan.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
nixpkgs.follows = "clan/nixpkgs";
|
||||
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
flake-parts.inputs.nixpkgs-lib.follows = "clan/nixpkgs";
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs@{ flake-parts, ... }:
|
||||
flake-parts.lib.mkFlake { inherit inputs; } (
|
||||
{ ... }:
|
||||
{
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
|
||||
imports = [
|
||||
./checks.nix
|
||||
./clan.nix
|
||||
./devshells.nix
|
||||
./formatter.nix
|
||||
];
|
||||
}
|
||||
);
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
_: {
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
formatter = pkgs.nixfmt;
|
||||
};
|
||||
}
|
||||
2
templates/clan/minimal/.gitignore
vendored
Normal file
2
templates/clan/minimal/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
result*
|
||||
.direnv/
|
||||
@@ -1,12 +1,22 @@
|
||||
{
|
||||
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
inputs.nixpkgs.follows = "clan-core/nixpkgs";
|
||||
inputs = {
|
||||
clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
nixpkgs.follows = "clan-core/nixpkgs";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self, clan-core, ... }:
|
||||
{
|
||||
self,
|
||||
clan-core,
|
||||
nixpkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Usage see: https://docs.clan.lol
|
||||
clan = clan-core.lib.clan { inherit self; };
|
||||
clan = clan-core.lib.clan {
|
||||
inherit self;
|
||||
meta.name = nixpkgs.lib.mkDefault "new-clan";
|
||||
};
|
||||
in
|
||||
{
|
||||
# all machines managed by Clan
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"meta": { "name": "__CHANGE_ME__" },
|
||||
"machines": {},
|
||||
"services": {}
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
# DO NOT DELETE
|
||||
# This file is used by the clan cli to discover a clan flake
|
||||
@@ -1,46 +0,0 @@
|
||||
{
|
||||
inputs.clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
|
||||
inputs.nixpkgs.follows = "clan-core/nixpkgs";
|
||||
|
||||
outputs =
|
||||
{ self, clan-core, ... }:
|
||||
let
|
||||
# Usage see: https://docs.clan.lol
|
||||
clan = clan-core.lib.clan {
|
||||
inherit self;
|
||||
# Ensure this is unique among all clans you want to use.
|
||||
meta.name = "__CHANGE_ME__";
|
||||
|
||||
# All machines in ./machines will be imported.
|
||||
|
||||
# Prerequisite: boot into the installer.
|
||||
# See: https://docs.clan.lol/guides/getting-started/installer
|
||||
# local> mkdir -p ./machines/machine1
|
||||
# local> Edit ./machines/<machine>/configuration.nix to your liking.
|
||||
machines = {
|
||||
# You can also specify additional machines here.
|
||||
# somemachine = {
|
||||
# imports = [ ./some-machine/configuration.nix ];
|
||||
# }
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit (clan.config) nixosConfigurations nixosModules clanInternals;
|
||||
# Add the Clan cli tool to the dev shell.
|
||||
# Use "nix develop" to enter the dev shell.
|
||||
devShells =
|
||||
clan-core.inputs.nixpkgs.lib.genAttrs
|
||||
[
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"aarch64-darwin"
|
||||
"x86_64-darwin"
|
||||
]
|
||||
(system: {
|
||||
default = clan-core.inputs.nixpkgs.legacyPackages.${system}.mkShell {
|
||||
packages = [ clan-core.packages.${system}.clan-cli ];
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
# contains your disk format and partitioning configuration.
|
||||
../../modules/disko.nix
|
||||
# this file is shared among all machines
|
||||
../../modules/shared.nix
|
||||
# enables GNOME desktop (optional)
|
||||
../../modules/gnome.nix
|
||||
];
|
||||
|
||||
# This is your user login name.
|
||||
users.users.user.name = "<your-username>";
|
||||
|
||||
# Set this for clan commands use ssh i.e. `clan machines update`
|
||||
# If you change the hostname, you need to update this line to root@<new-hostname>
|
||||
# This only works however if you have avahi running on your admin machine else use IP
|
||||
clan.core.networking.targetHost = "root@<IP>";
|
||||
|
||||
# You can get your disk id by running the following command on the installer:
|
||||
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
|
||||
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__";
|
||||
|
||||
# IMPORTANT! Add your SSH key here
|
||||
# e.g. > cat ~/.ssh/id_ed25519.pub
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
''
|
||||
__YOUR_SSH_KEY__
|
||||
''
|
||||
];
|
||||
|
||||
# Zerotier needs one controller to accept new nodes. Once accepted
|
||||
# the controller can be offline and routing still works.
|
||||
clan.core.networking.zerotier.controller.enable = true;
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
../../modules/disko.nix
|
||||
../../modules/shared.nix
|
||||
# enables GNOME desktop (optional)
|
||||
../../modules/gnome.nix
|
||||
];
|
||||
# Put your username here for login
|
||||
users.users.user.name = "<your-username>";
|
||||
|
||||
# Set this for clan commands use ssh i.e. `clan machines update`
|
||||
# If you change the hostname, you need to update this line to root@<new-hostname>
|
||||
# This only works however if you have avahi running on your admin machine else use IP
|
||||
clan.core.networking.targetHost = "root@<IP>";
|
||||
|
||||
# You can get your disk id by running the following command on the installer:
|
||||
# Replace <IP> with the IP of the installer printed on the screen or by running the `ip addr` command.
|
||||
# ssh root@<IP> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
|
||||
disko.devices.disk.main.device = "/dev/disk/by-id/__CHANGE_ME__";
|
||||
|
||||
# IMPORTANT! Add your SSH key here
|
||||
# e.g. > cat ~/.ssh/id_ed25519.pub
|
||||
users.users.root.openssh.authorizedKeys.keys = [
|
||||
''
|
||||
__YOUR_SSH_KEY__
|
||||
''
|
||||
];
|
||||
/*
|
||||
After jon is deployed, uncomment the following line
|
||||
This will allow sara to share the VPN overlay network with jon
|
||||
The networkId is generated by the first deployment of jon
|
||||
*/
|
||||
# clan.core.networking.zerotier.networkId = builtins.readFile ../../vars/per-machine/jon/zerotier/zerotier-network-id/value;
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
clan-core,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
suffix = config.clan.core.vars.generators.disk-id.files.diskId.value;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
clan-core.clanModules.disk-id
|
||||
];
|
||||
|
||||
# DO NOT EDIT THIS FILE AFTER INSTALLATION of a machine
|
||||
# Otherwise your system might not boot because of missing partitions / filesystems
|
||||
boot.loader.grub.efiSupport = lib.mkDefault true;
|
||||
boot.loader.grub.efiInstallAsRemovable = lib.mkDefault true;
|
||||
disko.devices = {
|
||||
disk = {
|
||||
"main" = {
|
||||
# suffix is to prevent disk name collisions
|
||||
name = "main-" + suffix;
|
||||
type = "disk";
|
||||
# Set the following in flake.nix for each maschine:
|
||||
# device = <uuid>;
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
"boot" = {
|
||||
size = "1M";
|
||||
type = "EF02"; # for grub MBR
|
||||
priority = 1;
|
||||
};
|
||||
"ESP" = {
|
||||
size = "512M";
|
||||
type = "EF00";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = [ "nofail" ];
|
||||
};
|
||||
};
|
||||
"root" = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
# format = "btrfs";
|
||||
# format = "bcachefs";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
services.xserver.enable = true;
|
||||
services.xserver.desktopManager.gnome.enable = true;
|
||||
services.xserver.displayManager.gdm.enable = true;
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
config,
|
||||
clan-core,
|
||||
# Optional, if you want to access other flakes:
|
||||
# self,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
# Enables the OpenSSH server for remote access
|
||||
clan-core.clanModules.sshd
|
||||
# Set a root password
|
||||
clan-core.clanModules.root-password
|
||||
clan-core.clanModules.user-password
|
||||
|
||||
# You can access other flakes imported in your flake via `self` like this:
|
||||
# self.inputs.nix-index-database.nixosModules.nix-index
|
||||
];
|
||||
|
||||
# Locale service discovery and mDNS
|
||||
services.avahi.enable = true;
|
||||
|
||||
# generate a random password for our user below
|
||||
# can be read using `clan secrets get <machine-name>-user-password` command
|
||||
clan.user-password.user = "user";
|
||||
users.users.user = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [
|
||||
"wheel"
|
||||
"networkmanager"
|
||||
"video"
|
||||
"input"
|
||||
];
|
||||
uid = 1000;
|
||||
openssh.authorizedKeys.keys = config.users.users.root.openssh.authorizedKeys.keys;
|
||||
};
|
||||
}
|
||||
@@ -26,9 +26,11 @@
|
||||
EOF
|
||||
'';
|
||||
evaled = (import "${initialized}/flake.nix").outputs {
|
||||
flake-parts = inputs.flake-parts;
|
||||
self = evaled // {
|
||||
outPath = initialized;
|
||||
};
|
||||
nixpkgs = self.inputs.nixpkgs;
|
||||
clan-core = self;
|
||||
};
|
||||
in
|
||||
|
||||
@@ -24,20 +24,18 @@
|
||||
|
||||
clan = {
|
||||
default = {
|
||||
description = "Initialize a new clan flake";
|
||||
path = ./clan/new-clan;
|
||||
};
|
||||
minimal = {
|
||||
description = "for clans managed via (G)UI";
|
||||
path = ./clan/minimal;
|
||||
description = "Initialize a new clan";
|
||||
path = ./clan/default;
|
||||
};
|
||||
|
||||
flake-parts = {
|
||||
description = "Flake-parts";
|
||||
description = "Initialize a new clan (flake-parts)";
|
||||
path = ./clan/flake-parts;
|
||||
};
|
||||
minimal-flake-parts = {
|
||||
description = "Minimal flake-parts clan template";
|
||||
path = ./clan/minimal-flake-parts;
|
||||
|
||||
minimal = {
|
||||
description = "Minimal Clan";
|
||||
path = ./clan/minimal;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user