Compare commits

..

300 Commits

Author SHA1 Message Date
DavHau
3d4b7902e6 clana: init 2024-03-08 14:40:55 +07:00
clan-bot
e6b494a849 Merge pull request 'clan_vm_manager: Add CUMTIME to profiler output' (#904) from Qubasa-main into main 2024-03-04 09:02:49 +00:00
Qubasa
cde72f3710 clan_vm_manager: Add CUMTIME to profiler output 2024-03-04 16:00:04 +07:00
clan-bot
5047b6686d Merge pull request 'clan_vm_manager: Cache profiler obj for multi call functions' (#903) from Qubasa-main into main 2024-03-04 08:55:47 +00:00
Qubasa
b77ffac4d4 clan_vm_manager: Cache profiler obj for multi call functions 2024-03-04 15:52:57 +07:00
clan-bot
b2d3ff4431 Merge pull request 'clan_vm_manager: Extracted VMObject to own component' (#901) from Qubasa-main into main 2024-03-04 07:14:16 +00:00
Qubasa
f70879aa63 clan_vm_manager: Add profiler component 2024-03-04 14:01:30 +07:00
Qubasa
31190ed8e5 clan_vm_manager: Extracted VMObject to own component 2024-03-04 12:38:20 +07:00
clan-bot
36dbb8fafd Merge pull request 'clan_vm_manager: Renamed Views to ViewStack' (#899) from Qubasa-main into main 2024-03-03 09:54:26 +00:00
Qubasa
47ae5981f6 clan_vm_manager: Renamed Views to ViewStack 2024-03-03 16:51:34 +07:00
clan-bot
11c3b6f353 Merge pull request 'clan_vm_manager: Renamed VMs singleton to ClanStore. And VM to VMObject' (#898) from Qubasa-main into main 2024-03-03 09:50:32 +00:00
Qubasa
191562a84e clan_vm_manager: Renamed VMs singleton to ClanStore. And VM to VMObject 2024-03-03 16:47:38 +07:00
clan-bot
06a54c21c3 Merge pull request 'clan_vm_manager: New directory structure' (#897) from Qubasa-main into main 2024-03-03 09:41:16 +00:00
Qubasa
359ad22c90 clan_vm_manager: New directory structure 2024-03-03 16:38:25 +07:00
clan-bot
754e0ca9e8 Merge pull request 'clan_vm_manager: Added suffix task to glib task functions' (#894) from Qubasa-main into main 2024-03-03 09:18:42 +00:00
Qubasa
8290660f20 clan_vm_manager: Improved readability of GKVStore 2024-03-03 16:15:50 +07:00
Qubasa
78a50c5d74 clan_vm_manager: Added suffix task to glib task functions 2024-03-03 16:01:08 +07:00
clan-bot
496555b405 Merge pull request 'clan-vm-manager: Fix incorrect use of all Glib.idle_add uses' (#893) from Qubasa-main into main 2024-03-03 08:55:43 +00:00
Qubasa
216e5a53d4 clan_vm_manager: Remove superfluous argument to build_vm 2024-03-03 15:52:56 +07:00
clan-bot
a1af14db57 Merge pull request 'clan-vm-manager: Fix incorrect use of all Glib.idle_add uses' (#892) from Qubasa-main into main 2024-03-03 08:49:07 +00:00
Qubasa
976b4a2c3a clan_vm_manager: Fix incorrect signal behaviour in GKVStore setitem 2024-03-03 15:47:00 +07:00
Qubasa
c6a2db15a7 clan_vm_manager: Fix dynamic join 2024-03-03 15:47:00 +07:00
Qubasa
6f80cee971 clan_cli: history_add now returns newly added HistoryEntry. clan-vm-manager: Join now uses signals instead of callbacks. 2024-03-03 15:47:00 +07:00
Qubasa
f17cf41093 clan-vm-manager: Fix incorrect use of all Glib.idle_add uses 2024-03-03 15:47:00 +07:00
Qubasa
483e2c05ea clan_vm_manager: Fix incorrect signal behaviour in GKVStore setitem 2024-03-03 15:44:16 +07:00
clan-bot
da34bd7199 Merge pull request 'clan_cli secrets generate: fix type in error msg' (#890) from interactive_secrets into main 2024-03-03 07:43:48 +00:00
lassulus
3478dea8b2 clan_cli secrets generate: fix type in error msg 2024-03-03 08:40:22 +01:00
Qubasa
ce3fc6973b clan_vm_manager: Fix dynamic join 2024-03-03 13:50:49 +07:00
clan-bot
c228d72da2 Merge pull request 'interactive_secrets' (#885) from interactive_secrets into main 2024-03-03 06:15:44 +00:00
Qubasa
127009b303 clan_cli: history_add now returns newly added HistoryEntry. clan-vm-manager: Join now uses signals instead of callbacks. 2024-03-03 12:47:18 +07:00
lassulus
ed653fa8b9 fix pyproject syntax, ignore E731 2024-03-03 06:20:08 +01:00
lassulus
b8da149453 clan-cli sops: fix super class interface compliance 2024-03-03 06:20:08 +01:00
lassulus
a23c251b09 clan-cli secrets: actually check if only service needs regeneration 2024-03-03 06:20:08 +01:00
Qubasa
bf214011cf clan-vm-manager: Fix incorrect use of all Glib.idle_add uses 2024-03-03 11:21:12 +07:00
lassulus
a1dcddf9b4 clan-cli: add interactive secrets/fact generation 2024-03-03 04:06:18 +01:00
lassulus
f500aee786 clanCore secrets: rename toplevel secret to service 2024-03-02 11:43:20 +01:00
lassulus
4cfd580447 outputs: pass secretsData directly 2024-03-02 11:43:20 +01:00
lassulus
b1a4b4de96 clan-cli vms run: remove unused vm arg 2024-03-02 11:20:05 +01:00
lassulus
108a37b0a3 clan-cli machines: cache machines_func via store 2024-03-02 11:20:05 +01:00
clan-bot
8c7db195ab Merge pull request 'devshells: cleanup' (#888) from DavHau-dave into main 2024-03-02 04:58:32 +00:00
DavHau
f7bb5d7aaf devshells: cleanup 2024-03-02 11:55:15 +07:00
clan-bot
8e9053cf80 Merge pull request 'rename lol.clan to org.clan' (#887) from Mic92-main into main 2024-03-01 11:55:24 +00:00
Jörg Thalheim
9ec66195eb rename lol.clan to org.clan 2024-03-01 12:52:05 +01:00
clan-bot
93475ab4b3 Merge pull request 'devShells: one global python devshell + activation via command' (#886) from DavHau-dave into main 2024-03-01 11:22:09 +00:00
DavHau
d1e8b1ed96 devShells: one global python devshell + activation via command
- this adds devShells.{system}.python
- a 'select-shell' command to switch between devshells
2024-03-01 18:16:38 +07:00
clan-bot
3acc4b4d25 Merge pull request 'clan_vm_manager: Add GKVStore to combat O(n2) runtimes. Add pygdb to devshell' (#884) from Qubasa-main into main 2024-03-01 03:49:53 +00:00
Qubasa
7932517b4a clan_vm_manager: Fix gdb package incompatible with aarch darwin 2024-03-01 10:46:35 +07:00
Qubasa
5f1191148e clan_vm_manager: Fix GLib.idle_add rexecuting the VM push multiple times because of missing GLib.SOURCE_REMOVE 2024-03-01 01:58:03 +07:00
Qubasa
d079bc85a8 clan_vm_manager: Working GKVStore that emulates the ListStore Object 2024-03-01 01:26:45 +07:00
Qubasa
df6683a0bd clan_vm_manager: Add GKVStore to combat O(n2) runtimes. Add pygdb to devshell 2024-02-29 22:46:09 +07:00
clan-bot
4b3b573e8c Merge pull request 'writers: fix bug by typo' (#883) from DavHau-dave into main 2024-02-27 15:25:28 +00:00
DavHau
e930e14238 writers: fix bug by typo 2024-02-27 22:21:42 +07:00
clan-bot
2ccf32c36b Merge pull request 'merge-after-ci: rewrite according to #814' (#882) from DavHau-dave into main 2024-02-27 11:35:26 +00:00
DavHau
398a61acbc merge-after-ci: rewrite according to #814 2024-02-27 18:32:11 +07:00
DavHau
fdedf40e27 formatting: exclude script-writers.nix
Was copied from nixpkgs -> Keeping the diff low in order to upstream easier
2024-02-27 18:20:33 +07:00
DavHau
45fd64a930 script-writers: add wrapping support (makeWrapperArgs) 2024-02-27 18:20:33 +07:00
DavHau
31722d9dc0 script-writers: init (copied from nixpkgs) 2024-02-27 18:20:33 +07:00
clan-bot
d804c6059d Merge pull request 'clan-vm-manager: Moved switch from list view to VM object.' (#881) from Qubasa-main into main 2024-02-26 20:56:55 +00:00
Qubasa
4d1437b5cc clan-vm-manager: Moved switch from list view to VM object. 2024-02-27 03:53:19 +07:00
clan-bot
58bc8d162d Merge pull request 'Automatic flake update - 2024-02-26T00:00+00:00' (#880) from flake-update-2024-02-26 into main 2024-02-26 09:22:27 +00:00
DavHau
d12019d290 fix formatting 2024-02-26 16:19:30 +07:00
Clan Merge Bot
1918cfd707 update flake lock - 2024-02-26T00:00+00:00
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/d07de570ba05cec2807d058daaa044f6955720c7' (2024-02-10)
  → 'github:nix-community/disko/23d308f0059955e3719efc81a34d1fc0369fbb74' (2024-02-22)
• Updated input 'nixos-generators':
    'github:nix-community/nixos-generators/843e2f04c716092797ffa4ce14c446adce2f09ef' (2024-02-08)
  → 'github:nix-community/nixos-generators/f4631dee1a0fd56c0db89860e83e3588a28c7631' (2024-02-22)
• Updated input 'nixos-generators/nixlib':
    'github:nix-community/nixpkgs.lib/f5af57d3ef9947a70ac86e42695231ac1ad00c25' (2023-09-03)
  → 'github:nix-community/nixpkgs.lib/e623008d8a46517470e6365505f1a3ce171fa46a' (2024-02-18)
• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/bdc57436da855500d44e9c1ce7450c0772e1cfa1' (2024-02-11)
  → 'github:NixOS/nixpkgs/2a34566b67bef34c551f204063faeecc444ae9da' (2024-02-25)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/2eb7c4ba3aa75e2660fd217eb1ab64d5b793608e' (2024-02-11)
  → 'github:Mic92/sops-nix/2874fbbe4a65bd2484b0ad757d27a16107f6bc17' (2024-02-25)
• Updated input 'treefmt-nix':
    'github:numtide/treefmt-nix/ac599dab59a66304eb511af07b3883114f061b9d' (2024-02-07)
  → 'github:numtide/treefmt-nix/e497a9ddecff769c2a7cbab51e1ed7a8501e7a3a' (2024-02-25)
2024-02-26 00:00:15 +00:00
clan-bot
067da45082 Merge pull request 'clan-vm-manager: Fix ui state desync on build error. Add build progress bar' (#879) from Qubasa-main into main 2024-02-25 19:03:44 +00:00
Qubasa
0a8b8713d9 clan-vm-manager: Fix ui state desync on build error. Add build progress bar 2024-02-26 01:59:45 +07:00
clan-bot
4993b98258 Merge pull request 'clan_vm_manager: Fix qmp shutdown command, bad socket error on retried shutdown' (#878) from Qubasa-main into main 2024-02-25 18:21:43 +00:00
Qubasa
183c1f4235 clan_vm_manager: Fix qmp shutdown command, bad socket error on retried shutdown 2024-02-26 01:18:13 +07:00
clan-bot
ea7b0c8b90 Merge pull request 'clan_vm_manager: Improve VM start and stop switch. Switch will be disabled while stopping vm' (#877) from Qubasa-main into main 2024-02-25 18:11:38 +00:00
Qubasa
27b9c8915b clan_vm_manager: Improve VM start and stop switch. Switch will be disabled while stopping vm 2024-02-26 01:04:09 +07:00
clan-bot
36771f3ecd Merge pull request 'Also commit files when adding machines/users or removing secrets' (#876) from Mic92-main into main 2024-02-22 15:15:21 +00:00
Jörg Thalheim
52fcc91479 Also commit files when adding machines/users or removing secrets 2024-02-22 16:12:11 +01:00
Jörg Thalheim
65d2a4e081 secrets: commit when renaming secrets 2024-02-22 15:59:12 +01:00
Mic92
9dc362437c Merge pull request 'borgbackup: drop comment from string' (#875) from Mic92-main into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/875
2024-02-22 14:45:07 +00:00
Jörg Thalheim
6eb8fe47c4 borgbackup: drop comment from string
Looks weird in the logs.
2024-02-22 14:45:07 +00:00
Jörg Thalheim
7208d63e78 borgbackup: drop comment from string
Looks weird in the logs.
2024-02-22 15:42:19 +01:00
clan-bot
01f1a6900a Merge pull request 'print backup archive ids instead of bare objects' (#874) from Mic92-main into main 2024-02-22 14:29:47 +00:00
Jörg Thalheim
12ce8238f1 print backup archive ids instead of bare objects 2024-02-22 15:26:20 +01:00
clan-bot
c5071bc212 Merge pull request 'encrypt backups by default' (#873) from Mic92-main into main 2024-02-22 14:06:07 +00:00
Jörg Thalheim
81fc60eef8 encrypt backups by default 2024-02-22 15:03:22 +01:00
clan-bot
bb25e136c3 Merge pull request 'secret cli: accept a pattern in secrets list' (#872) from Mic92-main into main 2024-02-22 13:25:07 +00:00
Jörg Thalheim
a1e2a4f64a secret cli: accept a pattern in secrets list 2024-02-22 14:21:53 +01:00
clan-bot
943c19939a Merge pull request 'borgbackup: use persistent timers' (#871) from Mic92-main into main 2024-02-22 12:50:25 +00:00
Jörg Thalheim
17d7eec0ae borgbackup: use persistent timers 2024-02-22 13:47:09 +01:00
clan-bot
7b4e76df29 Merge pull request 'add lychee link checker' (#870) from Mic92-main into main 2024-02-21 16:21:48 +00:00
Jörg Thalheim
1cb33a5c6c add lychee link checker 2024-02-21 17:18:28 +01:00
clan-bot
cd11f6ad10 Merge pull request 'Updating contribution documentation' (#869) from Mic92-main into main 2024-02-21 16:05:50 +00:00
Jörg Thalheim
67ceba6637 Updating contribution documentation 2024-02-21 17:02:13 +01:00
Jörg Thalheim
1330c60190 drop obsolete documentation 2024-02-21 17:02:13 +01:00
clan-bot
e8d4cd9936 Merge pull request 'drop obsolete documentation' (#868) from Mic92-main into main 2024-02-21 15:54:47 +00:00
Jörg Thalheim
537a1ae87f drop obsolete documentation 2024-02-21 16:51:27 +01:00
clan-bot
0aa876a06c Merge pull request 'clan-cli: remove unused flag' (#867) from Mic92-fix-cross-system into main 2024-02-21 10:19:26 +00:00
Jörg Thalheim
457e45d989 clan-cli: remove unused flag 2024-02-21 10:55:53 +01:00
Jörg Thalheim
1356ca9b8c fix cross-system deploy
This allows to be nixpkgs.pkgs and deploy systems of a different arch.
2024-02-21 10:55:53 +01:00
clan-bot
df8074100d Merge pull request 'README: fix links harder' (#866) from fix_links into main 2024-02-21 09:25:12 +00:00
clan-bot
d441f1d60c Merge pull request 'clan-vm-manager: Fix double instantiation of Singleton. clan_cli: Shorten filepath of logging messages' (#865) from Qubasa-main into main 2024-02-21 09:23:21 +00:00
lassulus
a0097dab66 README: fix links harder 2024-02-21 10:22:09 +01:00
Qubasa
6c17fa648f clan_cli: Add exception handling in logger 2024-02-21 16:20:01 +07:00
Qubasa
51b087f7ae clan-vm-manager: Fix double instantiation of Singleton. clan_cli: Shorten filepath of logging messages 2024-02-21 16:16:58 +07:00
clan-bot
c340831edd Merge pull request 'README: fix links' (#864) from fix_links into main 2024-02-21 09:15:49 +00:00
lassulus
c3dc315576 README: fix links 2024-02-21 10:12:41 +01:00
clan-bot
ff3a1dc928 Merge pull request 'secret_store: drop update_check and generate_hash as abstract methods' (#863) from Mic92-target_host into main 2024-02-20 18:01:51 +00:00
Jörg Thalheim
3695a5adf2 disable vgpu on non-nixos systems 2024-02-20 18:58:38 +01:00
Jörg Thalheim
4d404cfc50 secret_store: drop update_check and generate_hash as abstract methods
Only password implements those just now
2024-02-20 18:58:38 +01:00
clan-bot
7091b09fa7 Merge pull request 'secrets: add git support when updating secrets' (#862) from Mic92-target_host into main 2024-02-20 11:45:13 +00:00
Jörg Thalheim
77c84e7471 secrets: add git support when updating secrets 2024-02-20 12:41:52 +01:00
clan-bot
413e172cbd Merge pull request 'abstract_fixes' (#861) from abstract_fixes into main 2024-02-20 10:51:42 +00:00
lassulus
3b975ed993 clan-cli SecretStore: remove generate_hash from base class 2024-02-20 11:48:13 +01:00
lassulus
36baec8d48 clan-cli SecretStore: implement update_check in base class 2024-02-20 11:47:53 +01:00
clan-bot
eb8d5167e7 Merge pull request 'sops: unbreak edit flags' (#860) from Mic92-target_host into main 2024-02-20 10:18:17 +00:00
Jörg Thalheim
b358089488 sops: unbreak edit flags 2024-02-20 11:07:00 +01:00
clan-bot
36b20f18d4 Merge pull request 'add option to set defaultGroups for secrets' (#858) from Mic92-target_host into main 2024-02-16 16:29:28 +00:00
Jörg Thalheim
52c6ad548d improve error message if group does not exists 2024-02-16 17:26:20 +01:00
Jörg Thalheim
57e9b27ff8 add option to set defaultGroups for secrets 2024-02-16 17:26:20 +01:00
clan-bot
661004972b Merge pull request 'make secrets stores inherit from an interface' (#857) from Mic92-target_host into main 2024-02-16 14:00:20 +00:00
Jörg Thalheim
714f3b0378 upload_secrets: call update_check directly without introspection 2024-02-16 14:57:01 +01:00
Jörg Thalheim
87f301122e split of generate_secrets method into smaller functions 2024-02-16 14:48:46 +01:00
Jörg Thalheim
53d658a3c0 make facts stores inherit from an interface 2024-02-16 14:47:39 +01:00
Jörg Thalheim
9257f140ba make secrets stores inherit from an interface 2024-02-16 14:47:28 +01:00
clan-bot
b68e39e8fa Merge pull request 'demo script' (#856) from Qubasa-HEAD into main 2024-02-16 10:55:02 +00:00
Qubasa
c566872f05 Working demo script 2024-02-16 17:51:55 +07:00
Qubasa
446039b02b Working demo script 2024-02-16 17:47:34 +07:00
Qubasa
5a69bbe93e demo script 2024-02-16 17:47:05 +07:00
clan-bot
a715364338 Merge pull request 'clan-vm-manager: Added clan icon to trayicon' (#855) from Qubasa-main into main 2024-02-16 09:14:08 +00:00
Qubasa
280bee0861 clan-vm-manager: Fixing vm starting. 2024-02-16 16:10:49 +07:00
clan-bot
7bf1c0e42a Merge pull request 'waypipe: disable gpu for now' (#854) from Mic92-target_host into main 2024-02-16 08:55:35 +00:00
Jörg Thalheim
81545766a0 update comments about virtio-gpu 2024-02-16 09:52:13 +01:00
Jörg Thalheim
4e0ae54471 waypipe: disable gpu for now 2024-02-16 09:50:53 +01:00
Qubasa
4f7f34f9b4 clan-vm-manager: Added clan icon to trayicon 2024-02-16 12:25:06 +07:00
clan-bot
7fe38a9a80 Merge pull request 'add waypipe user to video group' (#853) from Mic92-target_host into main 2024-02-15 18:41:04 +00:00
Jörg Thalheim
95820905f9 waypipe: add fixed uid for user 2024-02-15 19:33:01 +01:00
Mic92
be77d365e7 Merge pull request 'add waypipe user to video group' (#852) from Mic92-target_host into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/852
2024-02-15 18:31:39 +00:00
Jörg Thalheim
897acfaf6b add waypipe user to video group 2024-02-15 18:31:39 +00:00
Jörg Thalheim
30cb4c0eed add waypipe user to video group 2024-02-15 19:30:51 +01:00
clan-bot
50c8c2070b Merge pull request 'vms: move qemu_command to its own module' (#851) from Mic92-target_host into main 2024-02-15 16:30:01 +00:00
Jörg Thalheim
0200016dd2 vms: move qemu_command to its own module 2024-02-15 17:25:24 +01:00
clan-bot
658758302a Merge pull request 'vms: smaller cleanups' (#850) from Mic92-target_host into main 2024-02-15 16:23:24 +00:00
Jörg Thalheim
8e780b2a8c vms: drop unused xchdir 2024-02-15 17:19:43 +01:00
Jörg Thalheim
a399cbb8d9 vms: move virtiofsd/waypipe into their own modules 2024-02-15 17:19:31 +01:00
clan-bot
eacda36bb3 Merge pull request 'fix fact store' (#849) from Mic92-target_host into main 2024-02-15 11:27:59 +00:00
Jörg Thalheim
4943c33307 add file prefix for nix 2.19 or newer 2024-02-15 12:24:18 +01:00
clan-bot
2e900d943d Merge pull request 'waypipe: add more environment variables' (#848) from a-kenji-wayland-module-env-add into main 2024-02-15 10:09:12 +00:00
a-kenji
d7d33f6c25 waypipe: add more environment variables 2024-02-15 11:03:27 +01:00
clan-bot
58073375e4 Merge pull request 'add factsStore modules' (#839) from fact_store into main 2024-02-15 09:46:01 +00:00
lassulus
6871b29d15 vms: use vm fact/secret-store 2024-02-15 10:41:25 +01:00
lassulus
98139ac48d add factsStore modules 2024-02-15 10:41:25 +01:00
clan-bot
f9f428b960 Merge pull request 'waypipe: add wayland environment' (#847) from a-kenji-wayland-module-env into main 2024-02-15 09:01:49 +00:00
a-kenji
99bda8e099 waypipe: add wayland environment 2024-02-15 09:58:56 +01:00
clan-bot
06721b0c5a Merge pull request 'optimize filesystem mount flags' (#846) from Mic92-target_host into main 2024-02-14 12:06:47 +00:00
Jörg Thalheim
7cab50c088 optimize filesystem mount flags
perf!
2024-02-14 13:01:47 +01:00
clan-bot
1cc6e74297 Merge pull request 'clan_vm_manager: --debug enables debug mode in clan_cli too' (#840) from Qubasa-add_spinner into main 2024-02-14 08:43:14 +00:00
Qubasa
30850eef22 clan_cli: Added longer timeout for test 2024-02-14 15:40:03 +07:00
clan-bot
de69d3139b Merge pull request 'waypipe: rename systemd unit' (#845) from a-kenji-waypipe/rename-system-service into main 2024-02-13 15:45:29 +00:00
a-kenji
5ef2257ed1 waypipe: rename systemd unit 2024-02-13 16:42:29 +01:00
clan-bot
436e4e3882 Merge pull request 'waypipe: improve default module' (#844) from a-kenji-improve/module/waypipe into main 2024-02-13 15:40:11 +00:00
a-kenji
3ba4954c8d waypipe: improve default module 2024-02-13 16:37:14 +01:00
clan-bot
82e5e57e20 Merge pull request 'Fix demo script' (#843) from a-kenji-demo/improve/script into main 2024-02-13 15:18:23 +00:00
a-kenji
91c36a33da Fix demo script
The `--wayland` cli option is now a module option.
2024-02-13 16:15:36 +01:00
clan-bot
2f567db623 Merge pull request 'waypipe: improve default module' (#842) from a-kenji-waypipe/improve into main 2024-02-13 15:08:03 +00:00
a-kenji
e46315cab8 waypipe: improve default module 2024-02-13 16:02:46 +01:00
clan-bot
2c90664456 Merge pull request 'vms: enable sysusers' (#841) from Mic92-target_host into main 2024-02-13 13:20:02 +00:00
Jörg Thalheim
7a3fcd3deb vms: enable sysusers 2024-02-13 14:14:54 +01:00
clan-bot
2744d5724b Merge pull request 'switch to sops-nix experimental branch' (#832) from Mic92-target_host into main 2024-02-13 13:01:01 +00:00
Jörg Thalheim
952f976ea7 fix virtiofsd in CIs 2024-02-13 13:57:52 +01:00
Jörg Thalheim
b15c21f800 drop check for /var/lib/nixos 2024-02-13 12:44:22 +01:00
Jörg Thalheim
7cfce69504 demo.sh: make relative path configureable 2024-02-13 11:45:42 +01:00
Jörg Thalheim
8f98f0e8b7 also use qcow2 for volatile state 2024-02-13 11:45:42 +01:00
Jörg Thalheim
3bf94ab0fc use virtio-console instead of serial for vm 2024-02-13 11:45:42 +01:00
Jörg Thalheim
11ac50c17b format rootfs in vm itself 2024-02-13 11:45:42 +01:00
Jörg Thalheim
07caab537f drop unused mounts 2024-02-13 11:45:42 +01:00
Jörg Thalheim
a56dc3bf8c simplify vmstate directory 2024-02-13 11:45:42 +01:00
Jörg Thalheim
7f8ba25a5b qemu: disable sea-bios and option rom 2024-02-13 11:45:42 +01:00
Jörg Thalheim
ef202a8150 switch to sops-nix experimental branch 2024-02-13 11:45:23 +01:00
Jörg Thalheim
d6b3e03d70 vms: integrate virtiofsd 2024-02-13 11:44:17 +01:00
Qubasa
03b9183e04 clan_cli: Added lazy qmp 2024-02-13 16:44:09 +07:00
Qubasa
92ec3fb9f9 test_vms_cli: Trying new way of testing 2024-02-13 16:44:09 +07:00
Qubasa
87dbc99cab clan_cli: Made qmp implementation lazy 2024-02-13 16:44:09 +07:00
Qubasa
ef6d7cee1a clan_vm_manager: started spinner. not fully working yet 2024-02-13 16:44:09 +07:00
Qubasa
4d1bde083a UI: Improve README 2024-02-13 16:44:09 +07:00
Qubasa
403b874522 clan_vm_manager: --debug enables debug mode in clan_cli too 2024-02-13 16:44:09 +07:00
clan-bot
0dadae9087 Merge pull request 'update flake lock' (#838) from Qubasa-main into main 2024-02-12 12:03:30 +00:00
lassulus
b39c860379 fix borgbackup check
we need to switch to the classical test environment again, because borg
was complaining otherwise
2024-02-12 12:59:32 +01:00
Qubasa
7d301b7e3c update flake lock 2024-02-12 14:52:01 +07:00
clan-bot
33787a6aab Merge pull request 'UI: Added tray icon' (#831) from Qubasa-main into main 2024-02-12 07:19:59 +00:00
Qubasa
0ce8bcd018 clan_vm_manager: Added VM shutdown timeout 2024-02-12 14:16:44 +07:00
Qubasa
7b48535a98 UI: Added tray icon 2024-02-12 13:43:54 +07:00
clan-bot
f166da1621 Merge pull request 'allow passing of extra_config into machines' (#834) from lassulus-extra_config into main 2024-02-11 07:43:52 +00:00
lassulus
eebd9d0b4a allow passing of extra_config into machines 2024-02-11 08:40:41 +01:00
lassulus
10cbe11e53 nixosModules clanCore: fix iso format 2024-02-10 13:27:51 +01:00
clan-bot
2530ba52ac Merge pull request 'waypipe: add more default settings' (#836) from a-kenji-waypipe/add into main 2024-02-10 12:24:55 +00:00
a-kenji
798bbe188c waypipe: add more default settings 2024-02-10 13:22:16 +01:00
clan-bot
237d7aee4a Merge pull request 'clanModules: add waypipe service' (#835) from a-kenji-init/waypipe into main 2024-02-10 12:03:49 +00:00
a-kenji
105209cfb9 clanModules: add waypipe service 2024-02-10 12:32:06 +01:00
clan-bot
cc8d6b281b Merge pull request 'vms: init graceful shutdown for GUI' (#833) from DavHau-dave into main 2024-02-09 12:58:52 +00:00
DavHau
02dd132e08 vms: init graceful shutdown for GUI
- add python modules for qemu protocols: QMP (hardware interactions) and QGA (guest service interaction)
- refactor state directory: remove name from path (already contains url)
- add impure vm test for basic qmp interaction
- simplify existing vm persistance test (factor out shared code)
- integrate graceful shutdown into GUI

the GUI integration still needs to be improved later:
- add fallback in case system doesn't react to powerdown button
- shutdown GUI switch fails if VM hasn't been started yet, and then remains in a wrong position
2024-02-09 19:55:18 +07:00
clan-bot
6af8423f1e Merge pull request 'UI: Fixed incorrect display of cLAN icon in window switcher' (#830) from Qubasa-main into main 2024-02-08 08:08:28 +00:00
Qubasa
8a9d3d3230 UI: Fixed incorrect display of cLAN icon in window switcher 2024-02-08 15:00:36 +07:00
clan-bot
13457eca0a Merge pull request 'Clan VM Manager: add dropdown to add more machines' (#827) from hsjobeki-main into main 2024-02-08 07:40:06 +00:00
Johannes Kirschbauer
0221e7176b Clan VM Manager: add dropdown to add more machines 2024-02-08 14:36:47 +07:00
clan-bot
7326862c1a Merge pull request 'UI: Improved Join card display' (#829) from Qubasa-main into main 2024-02-08 07:31:30 +00:00
Qubasa
0ee4dcd782 UI: Improved Join card display 2024-02-08 14:28:34 +07:00
clan-bot
e0ed00ef5c Merge pull request 'UI: Fixed style.css not working when installed' (#828) from Qubasa-main into main 2024-02-08 07:13:06 +00:00
Qubasa
a2ce341995 UI: Fixed style.css not working when installed 2024-02-08 14:10:17 +07:00
clan-bot
6ddb8dfe9d Merge pull request 'UI: Added joining multiple clans one after another over clan url' (#826) from Qubasa-main into main 2024-02-07 10:19:01 +00:00
Qubasa
10578e7611 UI: Added joining multiple clans one after another over clan url 2024-02-07 17:16:20 +07:00
clan-bot
96b98dcbed Merge pull request 'Clan VM Manager: detect if clan exists' (#825) from hsjobeki-main into main 2024-02-07 09:20:33 +00:00
Johannes Kirschbauer
030cbd24ce Clan VM Manager: detect if clan exists 2024-02-07 16:08:48 +07:00
clan-bot
045c5e608b Merge pull request 'Clan VM Manager: init per vm settings handler' (#824) from hsjobeki-main into main 2024-02-07 08:43:58 +00:00
Johannes Kirschbauer
d20902cef4 Clan VM Manager: init per vm settings handler 2024-02-07 15:41:18 +07:00
clan-bot
a1a433b654 Merge pull request 'clan_manager: UI is now a singelton.' (#822) from Qubasa-main into main 2024-02-07 05:06:11 +00:00
Qubasa
869c01bf95 clan_manager: UI is now a singelton. 2024-02-07 12:03:12 +07:00
clan-bot
68ac0cd3ec Merge pull request 'clan-cli: add simple flash command' (#821) from lassulus-flaash into main 2024-02-07 04:31:52 +00:00
lassulus
67d264263c nixosModules zerotier: remove unneeded default 2024-02-07 05:26:01 +01:00
lassulus
b780754621 clan-cli: add simple flash command 2024-02-07 05:26:01 +01:00
clan-bot
cd45bb3174 Merge pull request 'add requireExplicitUpdate option for mobile devices' (#820) from Mic92-target_host into main 2024-02-06 16:59:03 +00:00
Jörg Thalheim
6fe6229498 add requireExplicitUpdate option for mobile devices 2024-02-06 17:55:34 +01:00
clan-bot
7c598e6278 Merge pull request 'document build host option' (#819) from Mic92-target_host into main 2024-02-06 16:30:04 +00:00
Jörg Thalheim
531a899817 document build host option 2024-02-06 17:27:06 +01:00
clan-bot
e912b125c3 Merge pull request 'remove unused ssh.run method' (#818) from Mic92-target_host into main 2024-02-06 16:24:47 +00:00
Jörg Thalheim
614d1aecfd set nixpkgs.pkgs for secrets generation
This allows us to use the same nixpkgs instance for all machines.
2024-02-06 17:21:42 +01:00
Jörg Thalheim
be3a75bbd7 add support for build machines 2024-02-06 17:21:42 +01:00
Jörg Thalheim
2315dba2a9 rename machine.host to machine.target_host 2024-02-06 17:21:42 +01:00
Jörg Thalheim
6e57122da8 rename target_host to target_host_address 2024-02-06 17:21:42 +01:00
Jörg Thalheim
301a6b6a23 machines/update: get flake_attr from machine class 2024-02-06 17:21:42 +01:00
Jörg Thalheim
a2f0d077c8 remove unused ssh.run method 2024-02-06 15:47:32 +01:00
clan-bot
8234f127e5 Merge pull request 'machines: don't ignore errors when parsing secretsData json' (#817) from Mic92-target_host into main 2024-02-06 14:18:38 +00:00
Jörg Thalheim
c66c25aeb7 machines: don't ignore errors when parsing secretsData json 2024-02-06 15:15:21 +01:00
clan-bot
534ebb6094 Merge pull request 'skip machines without target_host when running clan machines update' (#816) from Mic92-target_host into main 2024-02-06 14:07:31 +00:00
Jörg Thalheim
91f26a4743 skip machines without target_host when running clan machines update 2024-02-06 15:04:19 +01:00
clan-bot
71d14eb178 Merge pull request 'move checks if targetHost/buildHost is set to cli' (#815) from Mic92-target_host into main 2024-02-06 13:54:50 +00:00
Jörg Thalheim
ad1a87fc14 move checks if targetHost/buildHost is set to cli 2024-02-06 14:51:44 +01:00
clan-bot
35bb076729 Merge pull request 'clan_manager: Implemented machine_icon, machine_description' (#813) from Qubasa-main into main 2024-02-06 13:16:31 +00:00
Qubasa
ab05cfde30 clan_manager: Implemented machine_icon, machine_description 2024-02-06 20:13:18 +07:00
clan-bot
4d18ce2366 Merge pull request 'cli,nix: Add machine_icon, machine_description to vm' (#812) from Qubasa-main into main 2024-02-06 12:29:32 +00:00
Qubasa
21443d0647 cli,nix: Add machine_icon, machine_description 2024-02-06 19:25:34 +07:00
clan-bot
868aba47b5 Merge pull request 'clanCore: fix deploymentAddress -> targetHost alias' (#811) from Mic92-target_host into main 2024-02-06 09:57:56 +00:00
Jörg Thalheim
923696c21c clanCore: fix deploymentAddress -> targetHost alias 2024-02-06 10:55:07 +01:00
clan-bot
99c432fcb8 Merge pull request 'Automatic flake update - 2024-02-05T00:00+00:00' (#804) from flake-update-2024-02-05 into main 2024-02-06 04:20:53 +00:00
Clan Merge Bot
3b5465d24d update flake lock - 2024-02-05T00:00+00:00
Flake lock file updates:

• Updated input 'flake-parts':
    'github:hercules-ci/flake-parts/07f6395285469419cf9d078f59b5b49993198c00' (2024-01-11)
  → 'github:hercules-ci/flake-parts/b253292d9c0a5ead9bc98c4e9a26c6312e27d69f' (2024-02-01)
• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/50071d87c75300c037e28439c5176c3933b9fce5' (2024-01-28)
  → 'github:NixOS/nixpkgs/5d75993fa5feaa333f3eadd83e0a08fc34432acc' (2024-02-04)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/73bf36912e31a6b21af6e0f39218e067283c67ef' (2024-01-28)
  → 'github:Mic92/sops-nix/23f61b897c00b66855074db471ba016e0cda20dd' (2024-02-04)
2024-02-06 04:17:06 +00:00
clan-bot
6a62018f1d Merge pull request 'build-clan: Removed unecessary arg, machineDescription and machineIcon' (#810) from Qubasa-main into main 2024-02-05 09:58:58 +00:00
Qubasa
4421797f30 build-clan: Removed unecessary arg, machineDescription and machineIcon 2024-02-05 16:56:13 +07:00
clan-bot
bfd525b042 Merge pull request 'clan-cli: add autocommits for facts/secrets' (#809) from lassulus-autocommits into main 2024-02-05 09:08:29 +00:00
lassulus
815527ec2b clan-cli secrets: commit facts/secrets after generating them 2024-02-05 10:03:10 +01:00
lassulus
e265537f69 clan-cli secrets: remove debug output 2024-02-05 10:03:10 +01:00
lassulus
8114cebaa4 clan-cli git: add commit_files function 2024-02-05 10:03:10 +01:00
clan-bot
0e076e52c8 Merge pull request 'vm_manager: Fixed uri join and then vm start not working' (#808) from Qubasa-main into main 2024-02-05 09:01:21 +00:00
Qubasa
fd44eeb384 vm_manager: Fixed uri join and then vm start not working 2024-02-05 15:57:14 +07:00
clan-bot
08c1e13dce Merge pull request 'async join' (#807) from hsjobeki-main into main 2024-02-05 08:07:51 +00:00
Johannes Kirschbauer
c52c83002c async join 2024-02-05 15:05:14 +07:00
clan-bot
1a8a6acfb1 Merge pull request 'Added machineIcon and machineDescription to buildClan' (#806) from Qubasa-main into main 2024-02-05 07:21:18 +00:00
Qubasa
b3815527a5 Added machineIcon and machineDescription to buildClan 2024-02-05 14:18:40 +07:00
clan-bot
fc50d8748a Merge pull request 'Starting to implement logs' (#799) from Qubasa-main into main 2024-02-05 06:40:26 +00:00
Qubasa
38cadd0ab2 Added --debug flag clan command in nix tests 2024-02-05 13:37:35 +07:00
Qubasa
33a10f76c0 UI: Fixed multiple connects to signal 2024-02-05 13:37:35 +07:00
clan-bot
2c00ccaea6 Merge pull request 'nixosModules zerotier: fix type of dns' (#805) from lassulus-zerotier-dns into main 2024-02-05 01:35:04 +00:00
lassulus
a2eb6f219d nixosModules zerotier: fix type of dns 2024-02-05 02:31:14 +01:00
clan-bot
ae256b666e Merge pull request 'clanCore zerotier: set default values in config for merging' (#803) from lassulus-zerotier-settings2 into main 2024-02-03 08:29:06 +00:00
lassulus
b39fda8d85 clanCore zerotier: set default values in config for merging 2024-02-03 09:26:04 +01:00
clan-bot
eaf2ac3c5c Merge pull request 'clan-cli update: upload only local paths from localhost' (#802) from lassulus-fast_flake_archive into main 2024-02-03 06:56:37 +00:00
lassulus
31188648f0 clan-cli update: remove legacy argument 2024-02-03 07:53:15 +01:00
lassulus
6a62065cdf clan-cli update: upload only local paths from localhost 2024-02-03 07:53:15 +01:00
clan-bot
20257b88ed Merge pull request 'clanCore zerotier: add settings option' (#801) from lassulus-zerotier-settings into main 2024-02-03 03:51:53 +00:00
lassulus
a52f1e3594 clanCore zerotier: add settings option 2024-02-03 04:48:54 +01:00
clan-bot
3bff29b9fe Merge pull request 'clan-cli: secrets check command' (#800) from lassulus-check_secrets into main 2024-02-02 16:43:25 +00:00
lassulus
315cdea6ce clan-cli machines: remove debug prints 2024-02-02 17:40:19 +01:00
lassulus
605b03bb91 clan-cli password-store: remove debug print 2024-02-02 17:40:19 +01:00
lassulus
bcdde990ff clan-cli secrets: add check command 2024-02-02 17:40:19 +01:00
clan-bot
841581bfc4 Merge pull request 'rename deployment address to target address' (#798) from Mic92-target_host into main 2024-02-02 09:42:29 +00:00
Jörg Thalheim
3538cf2e46 rename deployment address to target address
This is a prepares having a build server for deployment
2024-02-02 16:39:29 +07:00
clan-bot
7daca31db7 Merge pull request 'Starting to implement logs' (#796) from Qubasa-main into main 2024-02-02 05:07:39 +00:00
Qubasa
16562946fe vm-manager: Added log console printing on vm start. Added python logging module 2024-02-02 12:04:30 +07:00
clan-bot
789f3132c5 Merge pull request 'multi join via cli' (#795) from hsjobeki-main into main 2024-02-02 04:01:49 +00:00
Johannes Kirschbauer
e57169cb29 multi join via cli 2024-02-02 10:58:28 +07:00
clan-bot
90cf41c365 Merge pull request 'halalify zerotierone' (#794) from lassulus-halalify into main 2024-02-01 14:46:53 +00:00
lassulus
b4c6092cc0 halalify zerotierone 2024-02-01 15:44:13 +01:00
clan-bot
79a8c40f40 Merge pull request 'zerotier generate: kill process group' (#793) from lassulus-zerotier-kill-pg into main 2024-02-01 09:14:17 +00:00
lassulus
86b248d457 zerotier generate: retry if port allocation fails 2024-02-01 10:11:30 +01:00
clan-bot
b43a29dadc Merge pull request 'zerotier generate: kill process group' (#792) from lassulus-zerotier-kill-pg into main 2024-02-01 09:06:22 +00:00
lassulus
93874705fe zerotier generate: kill process group 2024-02-01 10:01:28 +01:00
clan-bot
59feea9e8a Merge pull request 'qemu: init python modules for qmp and qga' (#790) from DavHau-dave into main 2024-02-01 05:40:06 +00:00
DavHau
56b6907740 qemu: init python modules for qmp and qga 2024-02-01 12:32:21 +07:00
Qubasa
14917b7d56 Starting to implement logs 2024-02-01 10:21:58 +07:00
clan-bot
cc21108c59 Merge pull request 'vms: rename wayland attrs to waypipe' (#789) from a-kenji-rename-wayland-to-waypipe into main 2024-02-01 03:17:12 +00:00
a-kenji
533012af7d vms: rename wayland attrs to waypipe
And remove the options from the cli interface.
2024-02-01 10:14:36 +07:00
clan-bot
cdeb409c53 Merge pull request 'vms: wayland attr specified in configuration' (#787) from a-kenji-allow/wayland-in-config into main 2024-02-01 02:06:52 +00:00
a-kenji
f89c9b00dd vms: wayland attr specified in configuration 2024-02-01 09:00:43 +07:00
clan-bot
110e790246 Merge pull request 'syncthing: remember auto accepted folders, if introduced' (#786) from a-kenji-syncthing-default-accept into main 2024-01-31 15:39:50 +00:00
a-kenji
c81e9857da syncthing: remember auto accepted folders, if introduced
Makes it more compatible with restoring state
2024-01-31 22:24:46 +07:00
clan-bot
b5edd7ca08 Merge pull request 'group clans by url' (#783) from hsjobeki-main into main 2024-01-31 04:23:15 +00:00
Johannes Kirschbauer
c1bc1c942a group clans by url 2024-01-31 11:20:35 +07:00
clan-bot
6107b01a3f Merge pull request 'vm-state: fix and improve testing' (#782) from DavHau-dave into main 2024-01-31 04:07:41 +00:00
DavHau
59fa63eba9 Reapply "vm-state: fix and improve testing"
This reverts commit 99092f6e76.

vm-state: revert sysusers, improve testing

zerotier: enable persistence

vm-state: cleanup tests
2024-01-31 11:02:16 +07:00
clan-bot
c69f68feee Merge pull request 'syncthing: make inotify tuning overrideable' (#781) from a-kenji-syncthing-inotify into main 2024-01-31 03:47:06 +00:00
a-kenji
dd460e9f4f syncthing: make inotify tuning overrideable 2024-01-31 10:44:31 +07:00
clan-bot
b99f569973 Merge pull request 'some minor secrets fixups' (#780) from lassulus-secrets-fixes into main 2024-01-30 11:13:35 +00:00
lassulus
961eb26335 secrets modules: pass secrets as bytes 2024-01-30 12:11:05 +01:00
lassulus
0dbfe52d62 secrets: add sandbox user 2024-01-30 12:11:05 +01:00
clan-bot
a0ebf882c5 Merge pull request 'Machine __str__ impl' (#779) from Qubasa-heads/origin/Qubasa-fix into main 2024-01-30 08:38:51 +00:00
Qubasa
649e345585 Machine __str__ impl 2024-01-30 15:32:35 +07:00
clan-bot
1f108f8913 Merge pull request 'Added demo.sh to prepare demo environment' (#777) from Qubasa-origin/Qubasa-fix into main 2024-01-30 08:04:49 +00:00
Qubasa
a3207f7011 UI: Fixed toggle button color on second time not changing 2024-01-30 15:02:05 +07:00
clan-bot
c9b2deb326 Merge pull request 'Demo version' (#776) from Qubasa-main into main 2024-01-30 07:05:55 +00:00
137 changed files with 6357 additions and 1942 deletions

1
.env Normal file
View File

@@ -0,0 +1 @@
export OPENAI_API_KEY=$(rbw get openai-api-key)

1
.env.template Normal file
View File

@@ -0,0 +1 @@
export OPENAI_API_KEY=$(rbw get openai-api-key)

12
.envrc
View File

@@ -2,4 +2,14 @@ if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
fi
use flake
watch_file .direnv/selected-shell
if [ -e .env ]; then
source .env
fi
if [ -e .direnv/selected-shell ]; then
use flake .#$(cat .direnv/selected-shell)
else
use flake
fi

View File

@@ -1,12 +0,0 @@
name: checks-impure
on:
pull_request:
push:
branches: main
jobs:
test:
if: ${{ github.actor != 'ui-asset-bot' }}
runs-on: nix
steps:
- uses: actions/checkout@v3
- run: nix run .#impure-checks

View File

@@ -2,11 +2,21 @@ name: checks
on:
pull_request:
push:
branches: main
branches:
- main
jobs:
test:
if: ${{ github.actor != 'ui-asset-bot' }}
checks:
runs-on: nix
steps:
- uses: actions/checkout@v3
- run: nix run --refresh github:Mic92/nix-fast-build -- --no-nom --eval-workers 20
check-links:
runs-on: nix
steps:
- uses: actions/checkout@v3
- run: nix run --refresh --inputs-from .# nixpkgs#lychee .
checks-impure:
runs-on: nix
steps:
- uses: actions/checkout@v3
- run: nix run .#impure-checks

View File

@@ -6,18 +6,23 @@ Welcome to the cLAN Core Repository, the heart of the [clan.lol](https://clan.lo
If you're new to cLAN and eager to dive in, start with our quickstart guide:
- **Quickstart Guide**: Check out [quickstart.md](docs/quickstart.md) to get up and running with cLAN in no time.
- **Quickstart Guide**: Check out [quickstart.md](docs/admins/quickstart.md) to get up and running with cLAN in no time.
## Managing Secrets
Security is paramount, and cLAN provides guidelines for handling secrets effectively:
- **Secrets Management**: Learn how to manage secrets securely by reading [secrets-management.md](docs/secrets-management.md).
- **Secrets Management**: Learn how to manage secrets securely by reading [secrets-management.md](docs/admins/secrets-management.md).
## Contributing to cLAN
We welcome contributions from the community, and we've prepared a comprehensive guide to help you get started:
- **Contribution Guidelines**: Find out how to contribute and make a meaningful impact on the cLAN project by reading [contributing.md](docs/contributing.md).
- **Contribution Guidelines**: Find out how to contribute and make a meaningful impact on the cLAN project by reading [contributing.md](docs/contributing/contributing.md).
Whether you're a newcomer or a seasoned developer, we look forward to your contributions and collaboration on the cLAN project. Let's build amazing things together!
### development environment
Setup `direnv` and `nix-direnv` and execute `dienv allow`.
To switch between different dev environments execute `select-shell`.

View File

@@ -5,6 +5,7 @@ let
directory = ../..;
machines = {
test_backup_client = {
clan.networking.targetHost = "client";
imports = [ self.nixosModules.test_backup_client ];
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
@@ -14,7 +15,7 @@ let
in
{
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) test_backup_client; };
flake.clanInternals.machines = clan.clanInternals.machines;
flake.clanInternals = clan.clanInternals;
flake.nixosModules = {
test_backup_server = { ... }: {
imports = [
@@ -45,6 +46,25 @@ in
users.users.root.openssh.authorizedKeys.keyFiles = [
../lib/ssh/pubkey
];
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets/borgbackup.ssh" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
user = "root";
};
};
};
clanCore.secretStore = "vm";
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
nix.settings = {
@@ -57,10 +77,7 @@ in
clanCore.state.test-backups.folders = [ "/var/test-backups" ];
clan.borgbackup = {
enable = true;
destinations.test_backup_server = {
repo = "borg@server:.";
rsh = "ssh -i /root/.ssh/id_ed25519 -o StrictHostKeyChecking=no";
};
destinations.test_backup_server.repo = "borg@server:.";
};
};
};
@@ -109,16 +126,16 @@ in
client.succeed("echo testing > /var/test-backups/somefile")
# create
client.succeed("clan --flake ${../..} backups create test_backup_client")
client.succeed("clan --debug --flake ${../..} backups create test_backup_client")
client.wait_until_succeeds("! systemctl is-active borgbackup-job-test_backup_server")
# list
backup_id = json.loads(client.succeed("borg-job-test_backup_server list --json"))["archives"][0]["archive"]
assert(backup_id in client.succeed("clan --flake ${../..} backups list test_backup_client"))
assert(backup_id in client.succeed("clan --debug --flake ${../..} backups list test_backup_client"))
# restore
client.succeed("rm -f /var/test-backups/somefile")
client.succeed(f"clan --flake ${../..} backups restore test_backup_client borgbackup {backup_id}")
client.succeed(f"clan --debug --flake ${../..} backups restore test_backup_client borgbackup {backup_id}")
assert(client.succeed("cat /var/test-backups/somefile").strip() == "testing")
'';
}

View File

@@ -1,7 +1,7 @@
(import ../lib/container-test.nix) ({ ... }: {
(import ../lib/test-base.nix) ({ ... }: {
name = "borgbackup";
nodes.machine = { self, ... }: {
nodes.machine = { self, pkgs, ... }: {
imports = [
self.clanModules.borgbackup
self.nixosModules.clanCore
@@ -18,12 +18,27 @@
clanCore.clanDir = ./.;
clanCore.state.testState.folders = [ "/etc/state" ];
environment.etc.state.text = "hello world";
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets/borgbackup.ssh" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
user = "root";
};
};
};
clanCore.secretStore = "vm";
clan.borgbackup = {
enable = true;
destinations.test = {
repo = "borg@localhost:.";
rsh = "ssh -i ${../lib/ssh/privkey} -o StrictHostKeyChecking=no";
};
destinations.test.repo = "borg@localhost:.";
};
}
];

View File

@@ -5,6 +5,7 @@ let
directory = ../..;
machines = {
test_install_machine = {
clan.networking.targetHost = "test_install_machine";
imports = [ self.nixosModules.test_install_machine ];
};
};
@@ -12,7 +13,7 @@ let
in
{
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) test_install_machine; };
flake.clanInternals.machines = clan.clanInternals.machines;
flake.clanInternals = clan.clanInternals;
flake.nixosModules = {
test_install_machine = { lib, modulesPath, ... }: {
imports = [
@@ -106,7 +107,7 @@ in
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
client.wait_until_succeeds("ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
client.succeed("clan --flake ${../..} machines install test_install_machine root@target >&2")
client.succeed("clan --debug --flake ${../..} machines install test_install_machine root@target >&2")
try:
target.shutdown()
except BrokenPipeError:

View File

@@ -19,8 +19,8 @@ test_driver = ["py.typed"]
target-version = "py311"
line-length = 88
select = [ "E", "F", "I", "U", "N", "RUF", "ANN", "A" ]
ignore = ["E501", "ANN101", "ANN401", "A003"]
lint.select = [ "E", "F", "I", "U", "N", "RUF", "ANN", "A" ]
lint.ignore = ["E501", "ANN101", "ANN401", "A003"]
[tool.mypy]
python_version = "3.11"

View File

@@ -34,18 +34,21 @@ in
services.borgbackup.jobs = lib.mapAttrs
(_: dest: {
paths = lib.flatten (map (state: state.folders) (lib.attrValues config.clanCore.state));
exclude = [
"*.pyc"
];
exclude = [ "*.pyc" ];
repo = dest.repo;
environment.BORG_RSH = dest.rsh;
encryption.mode = "none";
compression = "auto,zstd";
startAt = "*-*-* 01:00:00";
persistentTimer = true;
preHook = ''
set -x
'';
encryption = {
mode = "repokey";
passCommand = "cat ${config.clanCore.secrets.borgbackup.secrets."borgbackup.repokey".path}";
};
prune.keep = {
within = "1d"; # Keep all archives from the last day
daily = 7;
@@ -58,20 +61,21 @@ in
clanCore.secrets.borgbackup = {
facts."borgbackup.ssh.pub" = { };
secrets."borgbackup.ssh" = { };
generator.path = [ pkgs.openssh pkgs.coreutils ];
secrets."borgbackup.repokey" = { };
generator.path = [ pkgs.openssh pkgs.coreutils pkgs.xkcdpass ];
generator.script = ''
ssh-keygen -t ed25519 -N "" -f "$secrets"/borgbackup.ssh
mv "$secrets"/borgbackup.ssh.pub "$facts"/borgbackup.ssh.pub
xkcdpass -n 4 -d - > "$secrets"/borgbackup.repokey
'';
};
clanCore.backups.providers.borgbackup = {
# TODO list needs to run locally or on the remote machine
list = ''
${lib.concatMapStringsSep "\n" (dest: ''
# we need yes here to skip the changed url verification
yes y | borg-job-${dest.name} list --json | jq -r '. + {"job-name": "${dest.name}"}'
'') (lib.attrValues cfg.destinations)}
# we need yes here to skip the changed url verification
${lib.concatMapStringsSep "\n" (dest: ''yes y | borg-job-${dest.name} list --json | jq -r '. + {"job-name": "${dest.name}"}' '')
(lib.attrValues cfg.destinations)}
'';
create = ''
${lib.concatMapStringsSep "\n" (dest: ''

View File

@@ -14,5 +14,6 @@
xfce = ./xfce.nix;
zt-tcp-relay = ./zt-tcp-relay.nix;
localsend = ./localsend.nix;
waypipe = ./waypipe.nix;
};
}

View File

@@ -63,15 +63,18 @@
];
# Activates inofify compatibilty on syncthing
boot.kernel.sysctl."fs.inotify.max_user_watches" = 524288;
boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkDefault 524288;
services.syncthing = {
enable = true;
configDir = "/var/lib/syncthing";
overrideFolders = true;
overrideDevices = true;
overrideFolders = lib.mkDefault (
if (config.clan.syncthing.introducer == null) then true else false
);
overrideDevices = lib.mkDefault (
if (config.clan.syncthing.introducer == null) then true else false
);
dataDir = lib.mkDefault "/home/user/";
@@ -79,10 +82,10 @@
key =
lib.mkDefault
config.clanCore.secrets.syncthing.secrets."syncthing.key".path or null;
config.clan.secrets.syncthing.secrets."syncthing.key".path or null;
cert =
lib.mkDefault
config.clanCore.secrets.syncthing.secrets."syncthing.cert".path or null;
config.clan.secrets.syncthing.secrets."syncthing.cert".path or null;
settings = {
options = {

74
clanModules/waypipe.nix Normal file
View File

@@ -0,0 +1,74 @@
{ pkgs
, lib
, config
, ...
}:
{
options.clan.services.waypipe = {
enable = lib.mkEnableOption "waypipe";
user = lib.mkOption {
type = lib.types.str;
default = "user";
description = "User the program is run under";
};
flags = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [
"--vsock"
"-s"
"3049"
"server"
];
description = "Flags that will be passed to waypipe";
};
command = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ (lib.getExe pkgs.foot) ];
description = "Commands that waypipe should run";
};
};
config = lib.mkIf config.clan.services.waypipe.enable {
# Waypipe needs pipewire
services.pipewire = {
enable = lib.mkDefault true;
alsa.enable = lib.mkDefault true;
alsa.support32Bit = lib.mkDefault true;
pulse.enable = lib.mkDefault true;
};
# General default settings
fonts.enableDefaultPackages = lib.mkDefault true;
hardware.opengl.enable = lib.mkDefault true;
# Assume it is run inside a clan context
clan.virtualisation.waypipe = lib.mkDefault true;
# User account
services.getty.autologinUser = lib.mkDefault config.clan.services.waypipe.user;
security.sudo.wheelNeedsPassword = false;
users.users.user = lib.mkIf (config.clan.services.waypipe.user == "user") {
isNormalUser = true;
uid = 1000;
password = "";
extraGroups = [ "wheel" "video" ];
shell = "/run/current-system/sw/bin/bash";
};
systemd.user.services.waypipe = {
serviceConfig.PassEnvironment = "DISPLAY";
serviceConfig.Environment = ''
XDG_SESSION_TYPE=wayland \
NIXOS_OZONE_WL=1 \
GDK_BACKEND=wayland \
QT_QPA_PLATFORM=wayland \
CLUTTER_BACKEND = "wayland" \
SDL_VIDEODRIVER=wayland
'';
script = ''
${lib.getExe config.clanCore.clanPkgs.waypipe} \
${lib.escapeShellArgs config.clan.services.waypipe.flags} \
${lib.escapeShellArgs config.clan.services.waypipe.command}
'';
wantedBy = [ "default.target" ];
};
};
}

106
devShell-python.nix Normal file
View File

@@ -0,0 +1,106 @@
{
perSystem =
{ pkgs
, self'
, lib
, ...
}:
let
python3 = pkgs.python3;
pypkgs = python3.pkgs;
clan-cli = self'.packages.clan-cli;
clan-vm-manager = self'.packages.clan-vm-manager;
pythonWithDeps = python3.withPackages (
ps:
clan-cli.propagatedBuildInputs
++ clan-cli.devDependencies
++ [
ps.pip
# clan-vm-manager deps
ps.pygobject3
]
);
linuxOnlyPackages = lib.optionals pkgs.stdenv.isLinux [
pkgs.xdg-utils
];
in
{
devShells.python = pkgs.mkShell {
inputsFrom = [ self'.devShells.default ];
packages =
[
pythonWithDeps
pypkgs.mypy
pypkgs.ipdb
pkgs.desktop-file-utils
pkgs.gtk4.dev
pkgs.ruff
pkgs.libadwaita.devdoc # has the demo called 'adwaita-1-demo'
]
++ linuxOnlyPackages
++ clan-vm-manager.nativeBuildInputs
++ clan-vm-manager.buildInputs
++ clan-cli.nativeBuildInputs;
PYTHONBREAKPOINT = "ipdb.set_trace";
shellHook = ''
ln -sfT ${clan-cli.nixpkgs} ./pkgs/clan-cli/clan_cli/nixpkgs
## PYTHON
tmp_path=$(realpath ./.direnv)
repo_root=$(realpath .)
mkdir -p "$tmp_path/python/${pythonWithDeps.sitePackages}"
# local dependencies
localPackages=(
$repo_root/pkgs/clan-cli
$repo_root/pkgs/clan-vm-manager
)
# Install the package in editable mode
# This allows executing `clan` from within the dev-shell using the current
# version of the code and its dependencies.
# TODO: this is slow. get rid of pip or add better caching
echo "==== Installing local python packages in editable mode ===="
for package in "''${localPackages[@]}"; do
${pythonWithDeps}/bin/pip install \
--quiet \
--disable-pip-version-check \
--no-index \
--no-build-isolation \
--prefix "$tmp_path/python" \
--editable "$package"
done
export PATH="$tmp_path/python/bin:$PATH"
export PYTHONPATH="''${PYTHONPATH:+$PYTHONPATH:}$tmp_path/python/${pythonWithDeps.sitePackages}"
for package in "''${localPackages[@]}"; do
export PYTHONPATH="$package:$PYTHONPATH"
done
if ! command -v xdg-mime &> /dev/null; then
echo "Warning: 'xdg-mime' is not available. The desktop file cannot be installed."
fi
# install desktop file
set -eou pipefail
DESKTOP_FILE_NAME=org.clan.vm-manager.desktop
DESKTOP_DST=~/.local/share/applications/$DESKTOP_FILE_NAME
DESKTOP_SRC=${clan-vm-manager.desktop-file}/share/applications/$DESKTOP_FILE_NAME
UI_BIN="clan-vm-manager"
cp -f $DESKTOP_SRC $DESKTOP_DST
sleep 2
sed -i "s|Exec=.*clan-vm-manager|Exec=$UI_BIN|" $DESKTOP_DST
xdg-mime default $DESKTOP_FILE_NAME x-scheme-handler/clan
echo "==== Validating desktop file installation ===="
set -x
desktop-file-validate $DESKTOP_DST
set +xeou pipefail
'';
};
};
}

View File

@@ -4,9 +4,27 @@
, self'
, config
, ...
}: {
}:
let
writers = pkgs.callPackage ./pkgs/builders/script-writers.nix { };
ansiEscapes = {
reset = ''\033[0m'';
green = ''\033[32m'';
};
# A python program to switch between dev-shells
# usage: select-shell shell-name
# the currently enabled dev-shell gets stored in ./.direnv/selected-shell
select-shell = writers.writePython3Bin "select-shell"
{
flakeIgnore = [ "E501" ];
} ./pkgs/scripts/select-shell.py;
in
{
devShells.default = pkgs.mkShell {
packages = [
select-shell
pkgs.tea
self'.packages.tea-create-pr
self'.packages.merge-after-ci
@@ -17,6 +35,8 @@
shellHook = ''
# no longer used
rm -f "$(git rev-parse --show-toplevel)/.git/hooks/pre-commit"
echo -e "${ansiEscapes.green}switch to another dev-shell using: select-shell${ansiEscapes.reset}"
'';
};
};

View File

@@ -88,17 +88,18 @@ $ clan machines install my-machine <target_host>
## Update Your Machines
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a deployment address for each target machine.
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a target address for each target machine.
### Setting the Deployment Address
### Setting the Target Host
Replace `host_or_ip` with the actual hostname or IP address of your target machine:
```shellSession
$ clan config --machine my-machine clan.networking.deploymentAddress root@host_or_ip
$ clan config --machine my-machine clan.networking.targetHost root@host_or_ip
```
_Note: The use of `root@` in the deployment address implies SSH access as the root user. Ensure that the root login is secured and only used when necessary._
_Note: The use of `root@` in the target address implies SSH access as the root user.
Ensure that the root login is secured and only used when necessary._
### Updating Machine Configurations
@@ -113,3 +114,25 @@ You can also update all configured machines simultaneously by omitting the machi
```shellSession
$ clan machines update
```
### Setting a Build Host
If the machine does not have enough resources to run the NixOS evaluation or build itself,
it is also possible to specify a build host instead.
During an update, the cli will ssh into the build host and run `nixos-rebuild` from there.
```shellSession
$ clan config --machine my-machine clan.networking.buildHost root@host_or_ip
```
### Excluding a machine from `clan machine update`
To exclude machines from beeing updated when running `clan machines update` without any machines specified,
one can set the `clan.deployment.requireExplicitUpdate` option to true:
```shellSession
$ clan config --machine my-machine clan.deployment.requireExplicitUpdate true
```
This is useful for machines that are not always online or are not part of the regular update cycle.

View File

@@ -1,138 +0,0 @@
# API Guidelines
This issue serves to collect our common understanding how to design our API so that it is extensible and usable and understandable.
## Resource oriented
A resource-oriented API is generally modeled as a resource hierarchy, where each node is either a simple resource or a collection resource. For convenience, they are often called a resource and a collection, respectively.
Examples of Resource Nouns:
`machine`
`user`
`flake`
Often resources have sub-resources. Even if it is not foreseen, it is recommended to use plural (trailing `s`) on resources to allow them to be collections of sub-resources.
e.g,
`users`
->
`users/*/profile`
## Verbs
Verbs should not be part of the URL
Bad:
`/api/create-products`
Good:
`/api/products`
Only resources are part of the URL, verbs are described via the HTTP Method.
Exception:
If a different HTTP Method must be used for technical reasons it is okay to terminate the path with a (short) verb / action.
Okay ish:
`/api/products/create`
## Usually the following HTTP Methods exist to interact with a resource
- POST (create an order for a resource)
- GET (retrieve the information)
- PUT (update and replace information)
- PATCH (update and modify information) **(Not used yet)**
- DELETE (delete the item)
## Every resource should be CRUD compatible
All API resources MUST be designed in a way that allows the typical CRUD operations.
Where crud stands for:
C - Create
R - Read
U - Update
D - Delete
Resources should implement at least a "Read" operation.
## Body
Use JSON as an exchange format.
All responses MUST be JSON parseable.
Bad:
`bare string`
Better:
`"quoted string"`
Best: (Enveloped see next section)
`{ name: "quoted string"}`
Errors should have a consistent JSON format, such that it is clear in which field to look at for displaying error messages.
## Envelop all Data collections
Response data should be wrapped into an JSON Object `{}`
Lists `[]` should also contain Objects `{}`.
This allows everything, to be extensible, without breaking backwards compatibility. (Adding fields is trivial, since the schema doesn't change)
Example:
```
{
"users": [{
first_name: "John",
last_name: "Doe",
}, {
first_name: "Jane",
last_name: "Doe",
}
....
],
"skip": 0,
"limit": 20,
....
}
```
Bad Example of a breaking change:
`GET /api/flakes`
`old`
```
[
"dream2nix"
"disko"
]
```
`new`
```
[
{
name: "dream2nix",
url: "github/...."
},
{
name: "disko",
url: "github/...."
}
]
```
Those kind of breaking changes can be avoided by using an object from the beginning.
Even if the object only contains one key, it is extensible, without breaking.
## More will follow.
...maybe

View File

@@ -1,10 +1,6 @@
# Contributing
**Frontend**: Our frontend is powered by [React NextJS](https://nextjs.org/), a popular and versatile framework for building web applications.
**Backend**: For the backend, we use Python along with the [FastAPI framework](https://fastapi.tiangolo.com/). To ensure seamless communication between the frontend and backend, we generate an `openapi.json` file from the Python code, which defines the REST API. This file is then used with [Orval](https://orval.dev/) to generate TypeScript bindings for the REST API. We're committed to code correctness, so we use [mypy](https://mypy-lang.org/) to ensure that our Python code is statically typed correctly. For backend testing, we rely on [pytest](https://docs.pytest.org/en/7.4.x/).
**Continuous Integration (CI)**: We've set up a CI bot that rigorously checks your code using the quality assurance (QA) tools mentioned above. If any errors are detected, it will block pull requests until they're resolved.
**Continuous Integration (CI)**: Each pull request gets automatically tested by gitea. If any errors are detected, it will block pull requests until they're resolved.
**Dependency Management**: We use the [Nix package manager](https://nixos.org/) to manage dependencies and ensure reproducibility, making your development process more robust.
@@ -34,7 +30,7 @@ Let's get your development environment up and running:
3. **Add direnv to your shell**:
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
You can do this by executing following command:
You can do this by executing following command. The example below will setup direnv for `zsh` and `bash`
```bash
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
@@ -52,39 +48,6 @@ Let's get your development environment up and running:
```
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
6. **Build the Backend**:
- Go to the `pkgs/clan-cli` directory and execute:
```bash
direnv allow
```
- Wait for the backend to build.
7. **Start the Backend Server**:
- To start the backend server, execute:
```bash
clan webui --reload --no-open --log-level debug
```
- The server will automatically restart if any Python files change.
8. **Build the Frontend**:
- In a different shell, navigate to the `pkgs/ui` directory and execute:
```bash
direnv allow
```
- Wait for the frontend to build.
NOTE: If you have the error "@clan/colors.json" you executed `npm install`, please do not do that. `direnv reload` will handle dependency management. Please delete node_modules with `rm -rf node_modules`.
9. **Start the Frontend**:
- To start the frontend, execute:
```bash
npm run dev
```
- Access the website by going to [http://localhost:3000](http://localhost:3000).
# Setting Up Your Git Workflow
Let's set up your Git workflow to collaborate effectively:
@@ -96,7 +59,7 @@ Let's set up your Git workflow to collaborate effectively:
tea login add
```
- Fill out the prompt as follows:
- URL of Gitea instance: `https://gitea.gchq.icu`
- URL of Gitea instance: `https://git.clan.lol`
- Name of new Login [gitea.gchq.icu]: `gitea.gchq.icu:7171`
- Do you have an access token? No
- Username: YourUsername
@@ -125,7 +88,7 @@ Let's set up your Git workflow to collaborate effectively:
4. **Review Your Pull Request**:
- Visit https://gitea.gchq.icu and go to the project page. Check under "Pull Requests" for any issues with your pull request.
- Visit https://git.clan.lol and go to the project page. Check under "Pull Requests" for any issues with your pull request.
5. **Push Your Changes**:
- If there are issues, fix them and redo step 2. Afterward, execute:
@@ -136,21 +99,22 @@ Let's set up your Git workflow to collaborate effectively:
# Debugging
When working on the backend of your project, debugging is an essential part of the development process. Here are some methods for debugging and testing the backend of your application:
Here are some methods for debugging and testing the clan-cli:
## Test Backend Locally in Devshell with Breakpoints
## Test Locally in Devshell with Breakpoints
To test the backend locally in a development environment and set breakpoints for debugging, follow these steps:
To test the cli locally in a development environment and set breakpoints for debugging, follow these steps:
1. Run the following command to execute your tests and allow for debugging with breakpoints:
```bash
pytest -n0 -s --maxfail=1
cd ./pkgs/clan-cli
pytest -n0 -s --maxfail=1 ./tests/test_nameofthetest.py
```
You can place `breakpoint()` in your Python code where you want to trigger a breakpoint for debugging.
## Test Backend Locally in a Nix Sandbox
## Test Locally in a Nix Sandbox
To run your backend tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
To run tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
@@ -189,29 +153,3 @@ If you need to inspect the Nix sandbox while running tests, follow these steps:
cntr exec -w your_sandbox_name
psgrep -a -x your_python_process_name
```
These debugging and testing methods will help you identify and fix issues in your backend code efficiently, ensuring the reliability and robustness of your application.
For more information on testing read [property and contract based testing](testing.md)
# Using this Template
To make the most of this template:
1. Set up a new Gitea account named `ui-asset-bot`. Generate an access token with all access permissions and set it under `settings/actions/secrets` as a secret called `BOT_ACCESS_TOKEN`.
- Also, edit the file `.gitea/workflows/ui_assets.yaml` and change the `BOT_EMAIL` variable to match the email you set for that account. Gitea matches commits to accounts by their email address, so this step is essential.
2. Create a second Gitea account named `merge-bot`. Edit the file `pkgs/merge-after-ci/default.nix` if the name should be different. Under "Branches," set the main branch to be protected and add `merge-bot` to the whitelisted users for pushing. Set the unprotected file pattern to `**/ui-assets.nix`.
- Enable the status check for "build / test (pull_request)."
3. Add both `merge-bot` and `ui-asset-bot` as collaborators.
- Set the option to "Delete pull request branch after merge by default."
- Also, set the default merge style to "Rebase then create merge commit."
With this template, you're well-equipped to build and collaborate on high-quality websites efficiently. Happy coding!.
# API guidelines
see [./api-guidelines](./api-guidelines)

View File

@@ -1,111 +0,0 @@
# Property vs Contract based testing
In this section, we'll explore the importance of testing the backend of your FastAPI application, specifically focusing on the advantages of using contract-based testing with property-based testing frameworks.
## Why Use Property-Based Testing?
Property-based testing is a powerful approach to test your APIs, offering several key benefits:
### 1. Scope
Instead of having to write numerous test cases for various input arguments, property-based testing enables you to test a range of arguments for each parameter using a single test. This approach significantly enhances the robustness of your test suite while reducing redundancy in your testing code. In short, your test code becomes cleaner, more DRY (Don't Repeat Yourself), and more efficient. It also becomes more effective as you can easily test numerous edge cases.
### 2. Reproducibility
Property-based testing tools retain test cases and their results, allowing you to reproduce and replay tests in case of failure. This feature is invaluable for debugging and ensuring the stability of your application over time.
## Frameworks for Property-Based Testing
To implement property-based testing in FastAPI, you can use the following framework:
- [Hypothesis: Property-Based Testing](https://hypothesis.readthedocs.io/en/latest/quickstart.html)
- [Schemathesis](https://schemathesis.readthedocs.io/en/stable/#id2)
## Example
Running schemathesis fuzzer on GET requests
```bash
nix run .#runSchemaTests
```
If you want to test more request types edit the file [flake-module.nix](../checks/impure/flake-module.nix)
After a run it will upload the results to `schemathesis.io` and give you a link to the report.
The credentials to the account are `Username: schemathesis@qube.email` and `Password:6tv4eP96WXsarF`
## Why Schemas Are Not Contracts
A schema is a description of the data structure of your API, whereas a contract defines not only the structure but also the expected behavior and constraints. The following resource explains why schemas are not contracts in more detail:
- [Why Schemas Are Not Contracts](https://pactflow.io/blog/schemas-are-not-contracts/)
In a nutshell, schemas may define the data structure but often fail to capture complex constraints and the expected interactions between different API endpoints. Contracts fill this gap by specifying both the structure and behavior of your API.
## Why Use Contract-Driven Testing?
Contract-driven testing combines the benefits of type annotations and property-based testing, providing a robust approach to ensuring the correctness of your APIs.
- Contracts become an integral part of the function signature and can be checked statically, ensuring that the API adheres to the defined contract.
- Contracts, like property-based tests, allow you to specify conditions and constraints, with the testing framework automatically generating test cases and verifying call results.
### Frameworks for Contract-Driven Testing
To implement contract-driven testing in FastAPI, consider the following framework and extension:
- [Deal: Contract Driven Development](https://deal.readthedocs.io/)
By adopting contract-driven testing, you can ensure that your FastAPI application not only has a well-defined structure but also behaves correctly, making it more robust and reliable.
- [Whitepaper: Python by contract](https://users.ece.utexas.edu/~gligoric/papers/ZhangETAL22PythonByContractDataset.pdf) This paper goes more into detail how it works
## Examples
You can annotate functions with `@deal.raises(ClanError)` to say that they can _only_ raise a ClanError Exception.
```python
import deal
@deal.raises(ClanError)
def get_task(uuid: UUID) -> BaseTask:
global POOL
return POOL[uuid]
```
To say that it can raise multiple exceptions just add after one another separated with a `,`
```python
import deal
@deal.raises(ClanError, IndexError, ZeroDivisionError)
def get_task(uuid: UUID) -> BaseTask:
global POOL
return POOL[uuid]
```
### Adding deal annotated functions to pytest
```python
from clan_cli.task_manager import get_task
import deal
@deal.cases(get_task) # <--- Add function get_task to testing corpus
def test_get_task(case: deal.TestCase) -> None:
case() # <--- Call testing framework with function
```
### Adding example input for deeper testing
You can combine hypothesis annotations with deal annotations to add example inputs to the function so that the verifier can reach deeper parts of the function.
```python
import deal
@deal.example(lambda: get_task(UUID("5c2061e0-4512-4b30-aa8e-7be4a75b8b45"))) # type: ignore
@deal.example(lambda: get_task(UUID("7c2061e6-4512-4b30-aa8e-7be4a75b8b45"))) # type: ignore
@deal.raises(ClanError)
def get_task(uuid: UUID) -> BaseTask:
global POOL
return POOL[uuid]
```
You can also add `pre` and `post` conditions. A `pre` condition must be true before the function is executed. A `post` condition must be true after the function was executed. For more information read the [Writing Contracts Section](https://deal.readthedocs.io/basic/values.html).
Or read the [API doc of Deal](https://deal.readthedocs.io/details/api.html)

42
flake.lock generated
View File

@@ -7,11 +7,11 @@
]
},
"locked": {
"lastModified": 1706491084,
"narHash": "sha256-eaEv+orTmr2arXpoE4aFZQMVPOYXCBEbLgK22kOtkhs=",
"lastModified": 1708564520,
"narHash": "sha256-juduDTYBhGN6jNfQ5RMDpbQF+MkO0pj3k7XGDSTjAbs=",
"owner": "nix-community",
"repo": "disko",
"rev": "f67ba6552845ea5d7f596a24d57c33a8a9dc8de9",
"rev": "23d308f0059955e3719efc81a34d1fc0369fbb74",
"type": "github"
},
"original": {
@@ -27,11 +27,11 @@
]
},
"locked": {
"lastModified": 1704982712,
"narHash": "sha256-2Ptt+9h8dczgle2Oo6z5ni5rt/uLMG47UFTR1ry/wgg=",
"lastModified": 1706830856,
"narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "07f6395285469419cf9d078f59b5b49993198c00",
"rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f",
"type": "github"
},
"original": {
@@ -42,11 +42,11 @@
},
"nixlib": {
"locked": {
"lastModified": 1693701915,
"narHash": "sha256-waHPLdDYUOHSEtMKKabcKIMhlUOHPOOPQ9UyFeEoovs=",
"lastModified": 1708217146,
"narHash": "sha256-nGfEv7k78slqIR5E0zzWSx214d/4/ZPKDkObLJqVLVw=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "f5af57d3ef9947a70ac86e42695231ac1ad00c25",
"rev": "e623008d8a46517470e6365505f1a3ce171fa46a",
"type": "github"
},
"original": {
@@ -63,11 +63,11 @@
]
},
"locked": {
"lastModified": 1706085261,
"narHash": "sha256-7PgpHRHyShINcqgevPP1fJ6N8kM5ZSOJnk3QZBrOCQ0=",
"lastModified": 1708563055,
"narHash": "sha256-FaojUZNu+YPFi3eCI7mL4kxPKQ51DoySa7mqmllUOuc=",
"owner": "nix-community",
"repo": "nixos-generators",
"rev": "896f6589db5b25023b812bbb6c1f5d3a499b1132",
"rev": "f4631dee1a0fd56c0db89860e83e3588a28c7631",
"type": "github"
},
"original": {
@@ -78,11 +78,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1706440623,
"narHash": "sha256-MzqsevUkrIVpAbbN7Wn3mGlYklkm2geaozGTFxtnYgA=",
"lastModified": 1708847675,
"narHash": "sha256-RUZ7KEs/a4EzRELYDGnRB6i7M1Izii3JD/LyzH0c6Tg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "50071d87c75300c037e28439c5176c3933b9fce5",
"rev": "2a34566b67bef34c551f204063faeecc444ae9da",
"type": "github"
},
"original": {
@@ -110,11 +110,11 @@
"nixpkgs-stable": []
},
"locked": {
"lastModified": 1706410821,
"narHash": "sha256-iCfXspqUOPLwRobqQNAQeKzprEyVowLMn17QaRPQc+M=",
"lastModified": 1708830076,
"narHash": "sha256-Cjh2xdjxC6S6nW6Whr2dxSeh8vjodzhTmQdI4zPJ4RA=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "73bf36912e31a6b21af6e0f39218e067283c67ef",
"rev": "2874fbbe4a65bd2484b0ad757d27a16107f6bc17",
"type": "github"
},
"original": {
@@ -130,11 +130,11 @@
]
},
"locked": {
"lastModified": 1706462057,
"narHash": "sha256-7dG1D4iqqt0bEbBqUWk6lZiSqqwwAO0Hd1L5opVyhNM=",
"lastModified": 1708897213,
"narHash": "sha256-QECZB+Hgz/2F/8lWvHNk05N6NU/rD9bWzuNn6Cv8oUk=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "c6153c2a3ff4c38d231e3ae99af29b87f1df5901",
"rev": "e497a9ddecff769c2a7cbab51e1ed7a8501e7a3a",
"type": "github"
},
"original": {

View File

@@ -30,6 +30,7 @@
imports = [
./checks/flake-module.nix
./devShell.nix
./devShell-python.nix
./formatter.nix
./templates/flake-module.nix
./clanModules/flake-module.nix
@@ -49,6 +50,9 @@
machines = lib.mkOption {
type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified);
};
machinesFunc = lib.mkOption {
type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified);
};
};
};
};

View File

@@ -30,6 +30,10 @@
"--" # this argument is ignored by bash
];
includes = [ "*.nix" ];
excludes = [
# Was copied from nixpkgs. Keep diff minimal to simplify upstreaming.
"pkgs/builders/script-writers.nix"
];
};
treefmt.settings.formatter.python = {
command = "sh";

View File

@@ -3,7 +3,9 @@
, specialArgs ? { } # Extra arguments to pass to nixosSystem i.e. useful to make self available
, machines ? { } # allows to include machine-specific modules i.e. machines.${name} = { ... }
, clanName # Needs to be (globally) unique, as this determines the folder name where the flake gets downloaded to.
, clanIcon ? null # A path to an icon to be used for the clan
, clanIcon ? null # A path to an icon to be used for the clan, should be the same for all machines
, pkgsForSystem ? (_system: null) # A map from arch to pkgs, if specified this nixpkgs will be only imported once for each system.
# This improves performance, but all nipxkgs.* options will be ignored.
}:
let
machinesDirs = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (builtins.readDir (directory + /machines));
@@ -30,7 +32,7 @@ let
(machineSettings.clanImports or [ ]);
# TODO: remove default system once we have a hardware-config mechanism
nixosConfiguration = { system ? "x86_64-linux", name, forceSystem ? false }: nixpkgs.lib.nixosSystem {
nixosConfiguration = { system ? "x86_64-linux", name, pkgs ? null, extraConfig ? { } }: nixpkgs.lib.nixosSystem {
modules =
let
settings = machineSettings name;
@@ -39,20 +41,23 @@ let
++ [
settings
clan-core.nixosModules.clanCore
extraConfig
(machines.${name} or { })
{
clanCore.machineName = name;
({
clanCore.clanName = clanName;
clanCore.clanIcon = clanIcon;
clanCore.clanDir = directory;
nixpkgs.hostPlatform = if forceSystem then lib.mkForce system else lib.mkDefault system;
clanCore.machineName = name;
nixpkgs.hostPlatform = lib.mkDefault system;
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
nix.registry.nixpkgs.to = {
type = "path";
path = lib.mkDefault nixpkgs;
};
}
} // lib.optionalAttrs (pkgs != null) {
nixpkgs.pkgs = lib.mkForce pkgs;
})
];
inherit specialArgs;
};
@@ -75,7 +80,23 @@ let
configsPerSystem = builtins.listToAttrs
(builtins.map
(system: lib.nameValuePair system
(lib.mapAttrs (name: _: nixosConfiguration { inherit name system; forceSystem = true; }) allMachines))
(lib.mapAttrs
(name: _: nixosConfiguration {
inherit name system;
pkgs = pkgsForSystem system;
})
allMachines))
supportedSystems);
configsFuncPerSystem = builtins.listToAttrs
(builtins.map
(system: lib.nameValuePair system
(lib.mapAttrs
(name: _: args: nixosConfiguration (args // {
inherit name system;
pkgs = pkgsForSystem system;
}))
allMachines))
supportedSystems);
in
{
@@ -83,6 +104,7 @@ in
clanInternals = {
machines = configsPerSystem;
machinesFunc = configsFuncPerSystem;
all-machines-json = lib.mapAttrs
(system: configs: nixpkgs.legacyPackages.${system}.writers.writeJSON "machines.json" (lib.mapAttrs (_: m: m.config.system.clan.deployment.data) configs))
configsPerSystem;

View File

@@ -6,6 +6,20 @@
the name of the clan
'';
};
machineIcon = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
the location of the machine icon
'';
};
machineDescription = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
the description of the machine
'';
};
clanDir = lib.mkOption {
type = lib.types.either lib.types.path lib.types.str;
description = ''

View File

@@ -1,23 +1,56 @@
{ config, lib, ... }:
{
options.clan.networking = {
deploymentAddress = lib.mkOption {
description = ''
The target SSH node for deployment.
options.clan = {
networking = {
targetHost = lib.mkOption {
description = ''
The target SSH node for deployment.
By default, the node's attribute name will be used.
If set to null, only local deployment will be supported.
By default, the node's attribute name will be used.
If set to null, only local deployment will be supported.
format: user@host:port&SSH_OPTION=SSH_VALUE
examples:
- machine.example.com
- user@machine2.example.com
- root@example.com:2222&IdentityFile=/path/to/private/key
'';
type = lib.types.nullOr lib.types.str;
default = "root@${config.networking.hostName}";
format: user@host:port&SSH_OPTION=SSH_VALUE
examples:
- machine.example.com
- user@machine2.example.com
- root@example.com:2222&IdentityFile=/path/to/private/key
'';
default = null;
type = lib.types.nullOr lib.types.str;
};
buildHost = lib.mkOption {
description = ''
The build SSH node where nixos-rebuild will be executed.
If set to null, the targetHost will be used.
format: user@host:port&SSH_OPTION=SSH_VALUE
examples:
- machine.example.com
- user@machine2.example.com
- root@example.com:2222&IdentityFile=/path/to/private/key
'';
type = lib.types.nullOr lib.types.str;
default = null;
};
};
deployment = {
requireExplicitUpdate = lib.mkOption {
description = ''
Do not update this machine when running `clan machines update` without any machines specified.
This is useful for machines that are not always online or are not part of the regular update cycle.
'';
type = lib.types.bool;
default = false;
};
};
};
imports = [
(lib.mkRenamedOptionModule [ "clan" "networking" "deploymentAddress" ] [ "clan" "networking" "targetHost" ])
];
config = {
# conflicts with systemd-resolved
networking.useHostResolvConf = false;

View File

@@ -19,18 +19,38 @@
the location of the deployment.json file
'';
};
deploymentAddress = lib.mkOption {
type = lib.types.str;
deployment.buildHost = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = ''
the address of the deployment server
the hostname of the build host where nixos-rebuild is run
'';
};
deployment.targetHost = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = ''
the hostname of the target host to be deployed to
'';
};
deployment.requireExplicitUpdate = lib.mkOption {
type = lib.types.bool;
description = ''
if true, the deployment will not be updated automatically.
'';
default = false;
};
secretsUploadDirectory = lib.mkOption {
type = lib.types.path;
description = ''
the directory on the deployment server where secrets are uploaded
'';
};
factsModule = lib.mkOption {
type = lib.types.str;
description = ''
the python import path to the facts module
'';
default = "clan_cli.facts.modules.in_repo";
};
secretsModule = lib.mkOption {
type = lib.types.str;
description = ''
@@ -42,13 +62,7 @@
description = ''
secret data as json for the generator
'';
default = pkgs.writers.writeJSON "secrets.json" (lib.mapAttrs
(_name: secret: {
secrets = builtins.attrNames secret.secrets;
facts = lib.mapAttrs (_: secret: secret.path) secret.facts;
generator = secret.generator.finalScript;
})
config.clanCore.secrets);
default = pkgs.writers.writeJSON "secrets.json" config.clanCore.secrets;
};
vm.create = lib.mkOption {
type = lib.types.path;
@@ -56,6 +70,12 @@
json metadata about the vm
'';
};
iso = lib.mkOption {
type = lib.types.path;
description = ''
A generated iso of the machine for the flash command
'';
};
};
};
description = ''
@@ -65,11 +85,11 @@
# optimization for faster secret generate/upload and machines update
config = {
system.clan.deployment.data = {
inherit (config.system.clan) secretsModule secretsData;
inherit (config.clan.networking) deploymentAddress;
inherit (config.system.clan) factsModule secretsModule secretsData;
inherit (config.clan.networking) targetHost buildHost;
inherit (config.clan.deployment) requireExplicitUpdate;
inherit (config.clanCore) secretsUploadDirectory;
};
system.clan.deployment.file = pkgs.writeText "deployment.json" (builtins.toJSON config.system.clan.deployment.data);
};
}

View File

@@ -1,7 +1,7 @@
{ config, lib, ... }:
{ config, lib, pkgs, ... }:
{
options.clanCore.secretStore = lib.mkOption {
type = lib.types.enum [ "sops" "password-store" "custom" ];
type = lib.types.enum [ "sops" "password-store" "vm" "custom" ];
default = "sops";
description = ''
method to store secrets
@@ -35,13 +35,13 @@
options.clanCore.secrets = lib.mkOption {
default = { };
type = lib.types.attrsOf
(lib.types.submodule (secret: {
(lib.types.submodule (service: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = secret.config._module.args.name;
default = service.config._module.args.name;
description = ''
Namespace of the secret
Namespace of the service
'';
};
generator = lib.mkOption {
@@ -54,6 +54,14 @@
Extra paths to add to the PATH environment variable when running the generator.
'';
};
prompt = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
prompt text to ask for a value.
This value will be passed to the script as the environment variabel $prompt_value.
'';
};
script = lib.mkOption {
type = lib.types.str;
description = ''
@@ -69,8 +77,18 @@
readOnly = true;
internal = true;
default = ''
export PATH="${lib.makeBinPath config.path}"
set -efu -o pipefail
set -eu -o pipefail
export PATH="${lib.makeBinPath config.path}:${pkgs.coreutils}/bin"
# prepare sandbox user
mkdir -p /etc
cp ${pkgs.runCommand "fake-etc" {} ''
export PATH="${pkgs.coreutils}/bin"
mkdir -p $out
cp /etc/* $out/
''}/* /etc/
${config.script}
'';
};
@@ -82,14 +100,14 @@
config' = config;
in
lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ config, ... }: {
type = lib.types.attrsOf (lib.types.submodule ({ config, name, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the secret
'';
default = config._module.args.name;
default = name;
};
path = lib.mkOption {
type = lib.types.str;
@@ -98,6 +116,14 @@
'';
default = "${config'.clanCore.secretsDirectory}/${config'.clanCore.secretsPrefix}${config.name}";
};
} // lib.optionalAttrs (config'.clanCore.secretStore == "sops") {
groups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = config'.clanCore.sops.defaultGroups;
description = ''
Groups to decrypt the secret for. By default we always use the user's key.
'';
};
};
}));
description = ''
@@ -140,5 +166,6 @@
imports = [
./sops.nix
./password-store.nix
./vm.nix
];
}

View File

@@ -22,6 +22,14 @@ let
secrets = filterDir containsMachineOrGroups secretsDir;
in
{
options = {
clanCore.sops.defaultGroups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "admins" ];
description = "The default groups to for encryption use when no groups are specified.";
};
};
config = lib.mkIf (config.clanCore.secretStore == "sops") {
clanCore.secretsDirectory = "/run/secrets";
clanCore.secretsPrefix = config.clanCore.machineName + "-";

View File

@@ -0,0 +1,10 @@
{ config, lib, ... }:
{
config = lib.mkIf (config.clanCore.secretStore == "vm") {
clanCore.secretsDirectory = "/etc/secrets";
clanCore.secretsUploadDirectory = "/etc/secrets";
system.clan.secretsModule = "clan_cli.secrets.modules.vm";
system.clan.factsModule = "clan_cli.facts.modules.vm";
};
}

View File

@@ -1,8 +1,9 @@
{ lib, ... }:
{
# defaults
# FIXME: currently broken, will be fixed soon
#config.clanCore.state.HOME.folders = [ "/home" ];
config.clanCore.state.HOME.folders = [
"/home"
];
# interface
options.clanCore.state = lib.mkOption {

View File

@@ -1,17 +1,5 @@
{ lib, config, pkgs, options, extendModules, modulesPath, ... }:
let
# Generates a fileSystems entry for bind mounting a given state folder path
# It binds directories from /var/clanstate/{some-path} to /{some-path}.
# As a result, all state paths will be persisted across reboots, because
# the state folder is mounted from the host system.
mkBindMount = path: {
name = path;
value = {
device = "/var/clanstate/${path}";
options = [ "bind" ];
};
};
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList
@@ -19,33 +7,74 @@ let
config.clanCore.state
);
# A module setting up bind mounts for all state folders
stateMounts = {
virtualisation.fileSystems =
lib.listToAttrs
(map mkBindMount stateFolders);
};
vmModule = {
imports = [
(modulesPath + "/virtualisation/qemu-vm.nix")
./serial.nix
stateMounts
];
virtualisation.fileSystems = {
${config.clanCore.secretsUploadDirectory} = lib.mkForce {
# required for issuing shell commands via qga
services.qemuGuest.enable = true;
# required to react to system_powerdown qmp command
# Some desktop managers like xfce override the poweroff signal and therefore
# make it impossible to handle it via 'logind' diretly.
services.acpid.enable = true;
services.acpid.handlers.power.event = "button/power.*";
services.acpid.handlers.power.action = "poweroff";
boot.initrd.systemd.enable = true;
# currently needed for system.etc.overlay.enable
boot.kernelPackages = pkgs.linuxPackages_latest;
boot.initrd.systemd.storePaths = [ pkgs.util-linux pkgs.e2fsprogs ];
boot.initrd.systemd.emergencyAccess = true;
# sysusers is faster than nixos's perl scripts
# and doesn't require state.
systemd.sysusers.enable = true;
users.mutableUsers = false;
users.allowNoPasswordLogin = true;
boot.initrd.kernelModules = [ "virtiofs" ];
virtualisation.writableStore = false;
virtualisation.fileSystems = lib.mkForce ({
"/nix/store" = {
device = "nix-store";
options = [ "x-systemd.requires=systemd-modules-load.service" "ro" ];
fsType = "virtiofs";
};
"/" = {
device = "/dev/vda";
fsType = "ext4";
options = [ "defaults" "x-systemd.makefs" "nobarrier" "noatime" "nodiratime" "data=writeback" "discard" ];
};
"/vmstate" = {
device = "/dev/vdb";
options = [ "x-systemd.makefs" "noatime" "nodiratime" "discard" ];
noCheck = true;
fsType = "ext4";
};
${config.clanCore.secretsUploadDirectory} = {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
};
"/var/clanstate" = {
device = "state";
fsType = "9p";
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
};
};
boot.initrd.systemd.enable = true;
} // lib.listToAttrs (map
(folder:
lib.nameValuePair folder {
device = "/vmstate${folder}";
fsType = "none";
options = [ "bind" ];
})
stateFolders));
};
# We cannot simply merge the VM config into the current system config, because
@@ -53,7 +82,7 @@ let
# Instead we use extendModules to create a second instance of the current
# system configuration, and then merge the VM config into that.
vmConfig = extendModules {
modules = [ vmModule stateMounts ];
modules = [ vmModule ];
};
in
{
@@ -86,6 +115,14 @@ in
change the preferred console.
'';
};
waypipe = lib.mkOption {
type = lib.types.bool;
default = false;
description = lib.mdDoc ''
Whether to use waypipe for native wayland passthrough, or not.
'';
};
};
# All important VM config variables needed by the vm runner
# this is really just a remapping of values defined elsewhere
@@ -123,6 +160,38 @@ in
whether to enable graphics for the vm
'';
};
waypipe = lib.mkOption {
type = lib.types.bool;
internal = true;
readOnly = true;
description = ''
whether to enable native wayland window passthrough with waypipe for the vm
'';
};
machine_icon = lib.mkOption {
type = lib.types.nullOr lib.types.path;
internal = true;
readOnly = true;
description = ''
the location of the clan icon
'';
};
machine_name = lib.mkOption {
type = lib.types.str;
internal = true;
readOnly = true;
description = ''
the name of the vm
'';
};
machine_description = lib.mkOption {
type = lib.types.nullOr lib.types.str;
internal = true;
readOnly = true;
description = ''
the description of the vm
'';
};
};
};
@@ -130,8 +199,11 @@ in
# for clan vm inspect
clanCore.vm.inspect = {
clan_name = config.clanCore.clanName;
machine_icon = config.clanCore.machineIcon or config.clanCore.clanIcon;
machine_name = config.clanCore.machineName;
machine_description = config.clanCore.machineDescription;
memory_size = config.clan.virtualisation.memorySize;
inherit (config.clan.virtualisation) cores graphics;
inherit (config.clan.virtualisation) cores graphics waypipe;
};
# for clan vm create
system.clan.vm = {

View File

@@ -6,46 +6,6 @@ let
install -Dm755 ${./genmoon.py} $out/bin/genmoon
patchShebangs $out/bin/genmoon
'';
networkConfig = {
authTokens = [
null
];
authorizationEndpoint = "";
capabilities = [ ];
clientId = "";
dns = [ ];
enableBroadcast = true;
id = cfg.networkId;
ipAssignmentPools = [ ];
mtu = 2800;
multicastLimit = 32;
name = cfg.name;
uwid = cfg.networkId;
objtype = "network";
private = !cfg.controller.public;
remoteTraceLevel = 0;
remoteTraceTarget = null;
revision = 1;
routes = [ ];
rules = [
{
not = false;
or = false;
type = "ACTION_ACCEPT";
}
];
rulesSource = "";
ssoEnabled = false;
tags = [ ];
v4AssignMode = {
zt = false;
};
v6AssignMode = {
"6plane" = false;
rfc4193 = true;
zt = false;
};
};
in
{
options.clan.networking.zerotier = {
@@ -114,6 +74,12 @@ in
'';
};
};
settings = lib.mkOption {
description = lib.mdDoc "override the network config in /var/lib/zerotier/bla/$network.json";
type = lib.types.submodule {
freeformType = (pkgs.formats.json { }).type;
};
};
};
config = lib.mkMerge [
({
@@ -147,7 +113,7 @@ in
${lib.optionalString (cfg.controller.enable) ''
mkdir -p /var/lib/zerotier-one/controller.d/network
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON networkConfig)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON cfg.settings)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
''}
${lib.optionalString (cfg.moon.stableEndpoints != []) ''
if [[ ! -f /var/lib/zerotier-one/moon.json ]]; then
@@ -220,11 +186,11 @@ in
--network-id "$facts/zerotier-network-id"
'';
};
# clanCore.state.zerotier.folders = [ "/var/lib/zerotier-one" ];
clanCore.state.zerotier.folders = [ "/var/lib/zerotier-one" ];
environment.systemPackages = [ config.clanCore.clanPkgs.zerotier-members ];
})
(lib.mkIf (config.clanCore.secretsUploadDirectory != null && !cfg.controller.enable && cfg.networkId != null) {
(lib.mkIf (!cfg.controller.enable && cfg.networkId != null) {
clanCore.secrets.zerotier = {
facts.zerotier-ip = { };
facts.zerotier-meshname = { };
@@ -241,6 +207,46 @@ in
})
(lib.mkIf (cfg.controller.enable && (facts.zerotier-network-id.value or null) != null) {
clan.networking.zerotier.networkId = facts.zerotier-network-id.value;
clan.networking.zerotier.settings = {
authTokens = [
null
];
authorizationEndpoint = "";
capabilities = [ ];
clientId = "";
dns = { };
enableBroadcast = true;
id = cfg.networkId;
ipAssignmentPools = [ ];
mtu = 2800;
multicastLimit = 32;
name = cfg.name;
uwid = cfg.networkId;
objtype = "network";
private = !cfg.controller.public;
remoteTraceLevel = 0;
remoteTraceTarget = null;
revision = 1;
routes = [ ];
rules = [
{
not = false;
or = false;
type = "ACTION_ACCEPT";
}
];
rulesSource = "";
ssoEnabled = false;
tags = [ ];
v4AssignMode = {
zt = false;
};
v6AssignMode = {
"6plane" = false;
rfc4193 = true;
zt = false;
};
};
environment.etc."zerotier/network-id".text = facts.zerotier-network-id.value;
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
"+${pkgs.writeShellScript "whitelist-controller" ''

View File

@@ -3,8 +3,11 @@ import base64
import contextlib
import ipaddress
import json
import os
import signal
import socket
import subprocess
import sys
import time
import urllib.request
from collections.abc import Iterator
@@ -115,7 +118,11 @@ def zerotier_controller() -> Iterator[ZerotierController]:
f"-p{controller_port}",
str(home),
]
with subprocess.Popen(cmd) as p:
with subprocess.Popen(
cmd,
preexec_fn=os.setsid,
) as p:
process_group = os.getpgid(p.pid)
try:
print(
f"wait for controller to be started on 127.0.0.1:{controller_port}...",
@@ -131,8 +138,7 @@ def zerotier_controller() -> Iterator[ZerotierController]:
yield ZerotierController(controller_port, home)
finally:
p.terminate()
p.wait()
os.killpg(process_group, signal.SIGKILL)
@dataclass
@@ -143,9 +149,15 @@ class NetworkController:
# TODO: allow merging more network configuration here
def create_network_controller() -> NetworkController:
with zerotier_controller() as controller:
network = controller.create_network()
return NetworkController(network["nwid"], controller.identity)
e = ClanError("Bug, should never happen")
for _ in range(10):
try:
with zerotier_controller() as controller:
network = controller.create_network()
return NetworkController(network["nwid"], controller.identity)
except ClanError: # probably failed to allocate port, so retry
print("failed to create network, retrying..., probabl", file=sys.stderr)
raise e
def create_identity() -> Identity:

View File

@@ -5,6 +5,7 @@
clanCore.imports = [
inputs.sops-nix.nixosModules.sops
./clanCore
./iso
({ pkgs, lib, ... }: {
clanCore.clanPkgs = lib.mkDefault self.packages.${pkgs.hostPlatform.system};
})

View File

@@ -33,7 +33,7 @@
systemd.services.hidden-ssh-announce = {
description = "announce hidden ssh";
after = [ "tor.service" "network-online.target" ];
wants = [ "tor.service" ];
wants = [ "tor.service" "network-online.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
# ${pkgs.tor}/bin/torify

View File

@@ -0,0 +1,90 @@
{ config, extendModules, lib, pkgs, ... }:
let
# Generates a fileSystems entry for bind mounting a given state folder path
# It binds directories from /var/clanstate/{some-path} to /{some-path}.
# As a result, all state paths will be persisted across reboots, because
# the state folder is mounted from the host system.
mkBindMount = path: {
name = path;
value = {
device = "/var/clanstate/${path}";
options = [ "bind" ];
};
};
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList
(_item: attrs: attrs.folders)
config.clanCore.state
);
# A module setting up bind mounts for all state folders
stateMounts = {
fileSystems =
lib.listToAttrs
(map mkBindMount stateFolders);
};
isoModule = { config, ... }: {
imports = [
stateMounts
];
options.clan.iso.disko = lib.mkOption {
type = lib.types.submodule {
freeformType = (pkgs.formats.json { }).type;
};
default = {
disk = {
iso = {
type = "disk";
imageSize = "10G"; # TODO add auto image size in disko
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
ESP = {
size = "100M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
};
config = {
disko.devices = lib.mkOverride 51 config.clan.iso.disko;
boot.loader.grub.enable = true;
boot.loader.grub.efiSupport = true;
boot.loader.grub.device = lib.mkForce "/dev/vda";
boot.loader.grub.efiInstallAsRemovable = true;
};
};
isoConfig = extendModules {
modules = [ isoModule ];
};
in
{
config = {
# for clan vm create
system.clan.iso = isoConfig.config.system.build.diskoImages;
};
}

View File

@@ -0,0 +1,497 @@
{
buildPackages,
gixy,
lib,
libiconv,
makeWrapper,
mkNugetDeps,
mkNugetSource,
pkgs,
stdenv,
}:
let
inherit (lib)
concatMapStringsSep
elem
escapeShellArg
last
optionalString
strings
types
;
in
rec {
# Base implementation for non-compiled executables.
# Takes an interpreter, for example `${lib.getExe pkgs.bash}`
#
# Examples:
# writeBash = makeScriptWriter { interpreter = "${pkgs.bash}/bin/bash"; }
# makeScriptWriter { interpreter = "${pkgs.dash}/bin/dash"; } "hello" "echo hello world"
makeScriptWriter = { interpreter, check ? "", makeWrapperArgs ? [], }: nameOrPath: content:
assert lib.or (types.path.check nameOrPath) (builtins.match "([0-9A-Za-z._])[0-9A-Za-z._-]*" nameOrPath != null);
assert lib.or (types.path.check content) (types.str.check content);
let
name = last (builtins.split "/" nameOrPath);
in
pkgs.runCommandLocal name (
{
inherit makeWrapperArgs;
nativeBuildInputs = [
makeWrapper
];
}
// lib.optionalAttrs (nameOrPath == "/bin/${name}") {
meta.mainProgram = name;
}
// (
if (types.str.check content) then {
inherit content interpreter;
passAsFile = [ "content" ];
} else {
inherit interpreter;
contentPath = content;
}
)
)
''
# On darwin a script cannot be used as an interpreter in a shebang but
# there doesn't seem to be a limit to the size of shebang and multiple
# arguments to the interpreter are allowed.
if [[ -n "${toString pkgs.stdenvNoCC.isDarwin}" ]] && isScript $interpreter
then
wrapperInterpreterLine=$(head -1 "$interpreter" | tail -c+3)
# Get first word from the line (note: xargs echo remove leading spaces)
wrapperInterpreter=$(echo "$wrapperInterpreterLine" | xargs echo | cut -d " " -f1)
if isScript $wrapperInterpreter
then
echo "error: passed interpreter ($interpreter) is a script which has another script ($wrapperInterpreter) as an interpreter, which is not supported."
exit 1
fi
# This should work as long as wrapperInterpreter is a shell, which is
# the case for programs wrapped with makeWrapper, like
# python3.withPackages etc.
interpreterLine="$wrapperInterpreterLine $interpreter"
else
interpreterLine=$interpreter
fi
echo "#! $interpreterLine" > $out
cat "$contentPath" >> $out
${optionalString (check != "") ''
${check} $out
''}
chmod +x $out
# Relocate executable if path was specified instead of name.
# Only in this case wrapProgram is applied, as it wouldn't work with a
# single executable file under $out.
${optionalString (types.path.check nameOrPath) ''
mv $out tmp
mkdir -p $out/$(dirname "${nameOrPath}")
mv tmp $out/${nameOrPath}
wrapProgram $out/${nameOrPath} ''${makeWrapperArgs[@]}
''}
'';
# Base implementation for compiled executables.
# Takes a compile script, which in turn takes the name as an argument.
#
# Examples:
# writeSimpleC = makeBinWriter { compileScript = name: "gcc -o $out $contentPath"; }
makeBinWriter = { compileScript, strip ? true }: nameOrPath: content:
assert lib.or (types.path.check nameOrPath) (builtins.match "([0-9A-Za-z._])[0-9A-Za-z._-]*" nameOrPath != null);
assert lib.or (types.path.check content) (types.str.check content);
let
name = last (builtins.split "/" nameOrPath);
in
pkgs.runCommand name ((if (types.str.check content) then {
inherit content;
passAsFile = [ "content" ];
} else {
contentPath = content;
}) // lib.optionalAttrs (nameOrPath == "/bin/${name}") {
meta.mainProgram = name;
}) ''
${compileScript}
${lib.optionalString strip
"${lib.getBin buildPackages.bintools-unwrapped}/bin/${buildPackages.bintools-unwrapped.targetPrefix}strip -S $out"}
# Sometimes binaries produced for darwin (e. g. by GHC) won't be valid
# mach-o executables from the get-go, but need to be corrected somehow
# which is done by fixupPhase.
${lib.optionalString pkgs.stdenvNoCC.hostPlatform.isDarwin "fixupPhase"}
${optionalString (types.path.check nameOrPath) ''
mv $out tmp
mkdir -p $out/$(dirname "${nameOrPath}")
mv tmp $out/${nameOrPath}
''}
'';
# Like writeScript but the first line is a shebang to bash
#
# Example:
# writeBash "example" ''
# echo hello world
# ''
writeBash = makeScriptWriter {
interpreter = "${lib.getExe pkgs.bash}";
};
# Like writeScriptBin but the first line is a shebang to bash
writeBashBin = name:
writeBash "/bin/${name}";
# Like writeScript but the first line is a shebang to dash
#
# Example:
# writeDash "example" ''
# echo hello world
# ''
writeDash = makeScriptWriter {
interpreter = "${lib.getExe pkgs.dash}";
};
# Like writeScriptBin but the first line is a shebang to dash
writeDashBin = name:
writeDash "/bin/${name}";
# Like writeScript but the first line is a shebang to fish
#
# Example:
# writeFish "example" ''
# echo hello world
# ''
writeFish = makeScriptWriter {
interpreter = "${lib.getExe pkgs.fish} --no-config";
check = "${lib.getExe pkgs.fish} --no-config --no-execute"; # syntax check only
};
# Like writeScriptBin but the first line is a shebang to fish
writeFishBin = name:
writeFish "/bin/${name}";
# writeHaskell takes a name, an attrset with libraries and haskell version (both optional)
# and some haskell source code and returns an executable.
#
# Example:
# writeHaskell "missiles" { libraries = [ pkgs.haskellPackages.acme-missiles ]; } ''
# import Acme.Missiles
#
# main = launchMissiles
# '';
writeHaskell = name: {
libraries ? [],
ghc ? pkgs.ghc,
ghcArgs ? [],
threadedRuntime ? true,
strip ? true
}:
let
appendIfNotSet = el: list: if elem el list then list else list ++ [ el ];
ghcArgs' = if threadedRuntime then appendIfNotSet "-threaded" ghcArgs else ghcArgs;
in makeBinWriter {
compileScript = ''
cp $contentPath tmp.hs
${(ghc.withPackages (_: libraries ))}/bin/ghc ${lib.escapeShellArgs ghcArgs'} tmp.hs
mv tmp $out
'';
inherit strip;
} name;
# writeHaskellBin takes the same arguments as writeHaskell but outputs a directory (like writeScriptBin)
writeHaskellBin = name:
writeHaskell "/bin/${name}";
# Like writeScript but the first line is a shebang to nu
#
# Example:
# writeNu "example" ''
# echo hello world
# ''
writeNu = makeScriptWriter {
interpreter = "${lib.getExe pkgs.nushell} --no-config-file";
};
# Like writeScriptBin but the first line is a shebang to nu
writeNuBin = name:
writeNu "/bin/${name}";
# makeRubyWriter takes ruby and compatible rubyPackages and produces ruby script writer,
# If any libraries are specified, ruby.withPackages is used as interpreter, otherwise the "bare" ruby is used.
makeRubyWriter = ruby: rubyPackages: buildRubyPackages: name: { libraries ? [], ... } @ args:
makeScriptWriter (
(builtins.removeAttrs args ["libraries"])
// {
interpreter =
if libraries == []
then "${ruby}/bin/ruby"
else "${(ruby.withPackages (ps: libraries))}/bin/ruby";
# Rubocop doesnt seem to like running in this fashion.
#check = (writeDash "rubocop.sh" ''
# exec ${lib.getExe buildRubyPackages.rubocop} "$1"
#'');
}
) name;
# Like writeScript but the first line is a shebang to ruby
#
# Example:
# writeRuby "example" ''
# puts "hello world"
# ''
writeRuby = makeRubyWriter pkgs.ruby pkgs.rubyPackages buildPackages.rubyPackages;
writeRubyBin = name:
writeRuby "/bin/${name}";
# makeLuaWriter takes lua and compatible luaPackages and produces lua script writer,
# which validates the script with luacheck at build time. If any libraries are specified,
# lua.withPackages is used as interpreter, otherwise the "bare" lua is used.
makeLuaWriter = lua: luaPackages: buildLuaPackages: name: { libraries ? [], ... } @ args:
makeScriptWriter (
(builtins.removeAttrs args ["libraries"])
// {
interpreter = lua.interpreter;
# if libraries == []
# then lua.interpreter
# else (lua.withPackages (ps: libraries)).interpreter
# This should support packages! I just cant figure out why some dependency collision happens whenever I try to run this.
check = (writeDash "luacheck.sh" ''
exec ${buildLuaPackages.luacheck}/bin/luacheck "$1"
'');
}
) name;
# writeLua takes a name an attributeset with libraries and some lua source code and
# returns an executable (should also work with luajit)
#
# Example:
# writeLua "test_lua" { libraries = [ pkgs.luaPackages.say ]; } ''
# s = require("say")
# s:set_namespace("en")
#
# s:set('money', 'I have %s dollars')
# s:set('wow', 'So much money!')
#
# print(s('money', {1000})) -- I have 1000 dollars
#
# s:set_namespace("fr") -- switch to french!
# s:set('wow', "Tant d'argent!")
#
# print(s('wow')) -- Tant d'argent!
# s:set_namespace("en") -- switch back to english!
# print(s('wow')) -- So much money!
# ''
writeLua = makeLuaWriter pkgs.lua pkgs.luaPackages buildPackages.luaPackages;
writeLuaBin = name:
writeLua "/bin/${name}";
writeRust = name: {
rustc ? pkgs.rustc,
rustcArgs ? [],
strip ? true
}:
let
darwinArgs = lib.optionals stdenv.isDarwin [ "-L${lib.getLib libiconv}/lib" ];
in
makeBinWriter {
compileScript = ''
cp "$contentPath" tmp.rs
PATH=${lib.makeBinPath [pkgs.gcc]} ${rustc}/bin/rustc ${lib.escapeShellArgs rustcArgs} ${lib.escapeShellArgs darwinArgs} -o "$out" tmp.rs
'';
inherit strip;
} name;
writeRustBin = name:
writeRust "/bin/${name}";
# writeJS takes a name an attributeset with libraries and some JavaScript sourcecode and
# returns an executable
#
# Example:
# writeJS "example" { libraries = [ pkgs.nodePackages.uglify-js ]; } ''
# var UglifyJS = require("uglify-js");
# var code = "function add(first, second) { return first + second; }";
# var result = UglifyJS.minify(code);
# console.log(result.code);
# ''
writeJS = name: { libraries ? [] }: content:
let
node-env = pkgs.buildEnv {
name = "node";
paths = libraries;
pathsToLink = [
"/lib/node_modules"
];
};
in writeDash name ''
export NODE_PATH=${node-env}/lib/node_modules
exec ${lib.getExe pkgs.nodejs} ${pkgs.writeText "js" content} "$@"
'';
# writeJSBin takes the same arguments as writeJS but outputs a directory (like writeScriptBin)
writeJSBin = name:
writeJS "/bin/${name}";
awkFormatNginx = builtins.toFile "awkFormat-nginx.awk" ''
awk -f
{sub(/^[ \t]+/,"");idx=0}
/\{/{ctx++;idx=1}
/\}/{ctx--}
{id="";for(i=idx;i<ctx;i++)id=sprintf("%s%s", id, "\t");printf "%s%s\n", id, $0}
'';
writeNginxConfig = name: text: pkgs.runCommandLocal name {
inherit text;
passAsFile = [ "text" ];
nativeBuildInputs = [ gixy ];
} /* sh */ ''
# nginx-config-formatter has an error - https://github.com/1connect/nginx-config-formatter/issues/16
awk -f ${awkFormatNginx} "$textPath" | sed '/^\s*$/d' > $out
gixy $out
'';
# writePerl takes a name an attributeset with libraries and some perl sourcecode and
# returns an executable
#
# Example:
# writePerl "example" { libraries = [ pkgs.perlPackages.boolean ]; } ''
# use boolean;
# print "Howdy!\n" if true;
# ''
writePerl = name: { libraries ? [], ... } @ args:
makeScriptWriter (
(builtins.removeAttrs args ["libraries"])
// {
interpreter = "${lib.getExe (pkgs.perl.withPackages (p: libraries))}";
}
) name;
# writePerlBin takes the same arguments as writePerl but outputs a directory (like writeScriptBin)
writePerlBin = name:
writePerl "/bin/${name}";
# makePythonWriter takes python and compatible pythonPackages and produces python script writer,
# which validates the script with flake8 at build time. If any libraries are specified,
# python.withPackages is used as interpreter, otherwise the "bare" python is used.
makePythonWriter = python: pythonPackages: buildPythonPackages: name: { libraries ? [], flakeIgnore ? [], ... } @ args:
let
ignoreAttribute = optionalString (flakeIgnore != []) "--ignore ${concatMapStringsSep "," escapeShellArg flakeIgnore}";
in
makeScriptWriter
(
(builtins.removeAttrs args ["libraries" "flakeIgnore"])
// {
interpreter =
if pythonPackages != pkgs.pypy2Packages || pythonPackages != pkgs.pypy3Packages then
if libraries == []
then python.interpreter
else (python.withPackages (ps: libraries)).interpreter
else python.interpreter
;
check = optionalString python.isPy3k (writeDash "pythoncheck.sh" ''
exec ${buildPythonPackages.flake8}/bin/flake8 --show-source ${ignoreAttribute} "$1"
'');
}
)
name;
# writePyPy2 takes a name an attributeset with libraries and some pypy2 sourcecode and
# returns an executable
#
# Example:
# writePyPy2 "test_pypy2" { libraries = [ pkgs.pypy2Packages.enum ]; } ''
# from enum import Enum
#
# class Test(Enum):
# a = "success"
#
# print Test.a
# ''
writePyPy2 = makePythonWriter pkgs.pypy2 pkgs.pypy2Packages buildPackages.pypy2Packages;
# writePyPy2Bin takes the same arguments as writePyPy2 but outputs a directory (like writeScriptBin)
writePyPy2Bin = name:
writePyPy2 "/bin/${name}";
# writePython3 takes a name an attributeset with libraries and some python3 sourcecode and
# returns an executable
#
# Example:
# writePython3 "test_python3" { libraries = [ pkgs.python3Packages.pyyaml ]; } ''
# import yaml
#
# y = yaml.load("""
# - test: success
# """)
# print(y[0]['test'])
# ''
writePython3 = makePythonWriter pkgs.python3 pkgs.python3Packages buildPackages.python3Packages;
# writePython3Bin takes the same arguments as writePython3 but outputs a directory (like writeScriptBin)
writePython3Bin = name:
writePython3 "/bin/${name}";
# writePyPy3 takes a name an attributeset with libraries and some pypy3 sourcecode and
# returns an executable
#
# Example:
# writePyPy3 "test_pypy3" { libraries = [ pkgs.pypy3Packages.pyyaml ]; } ''
# import yaml
#
# y = yaml.load("""
# - test: success
# """)
# print(y[0]['test'])
# ''
writePyPy3 = makePythonWriter pkgs.pypy3 pkgs.pypy3Packages buildPackages.pypy3Packages;
# writePyPy3Bin takes the same arguments as writePyPy3 but outputs a directory (like writeScriptBin)
writePyPy3Bin = name:
writePyPy3 "/bin/${name}";
makeFSharpWriter = { dotnet-sdk ? pkgs.dotnet-sdk, fsi-flags ? "", libraries ? _: [], ... } @ args: nameOrPath:
let
fname = last (builtins.split "/" nameOrPath);
path = if strings.hasSuffix ".fsx" nameOrPath then nameOrPath else "${nameOrPath}.fsx";
_nugetDeps = mkNugetDeps { name = "${fname}-nuget-deps"; nugetDeps = libraries; };
nuget-source = mkNugetSource {
name = "${fname}-nuget-source";
description = "A Nuget source with the dependencies for ${fname}";
deps = [ _nugetDeps ];
};
fsi = writeBash "fsi" ''
export HOME=$NIX_BUILD_TOP/.home
export DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_NOLOGO=1
script="$1"; shift
${lib.getExe dotnet-sdk} fsi --quiet --nologo --readline- ${fsi-flags} "$@" < "$script"
'';
in content: makeScriptWriter (
(builtins.removeAttrs args ["dotnet-sdk" "fsi-flags" "libraries"])
// {
interpreter = fsi;
}
) path
''
#i "nuget: ${nuget-source}/lib"
${ content }
exit 0
'';
writeFSharp =
makeFSharpWriter {};
writeFSharpBin = name:
writeFSharp "/bin/${name}";
}

View File

@@ -6,7 +6,9 @@ from pathlib import Path
from types import ModuleType
from typing import Any
from . import backups, config, flakes, history, machines, secrets, vms
from clan_cli import clana
from . import backups, config, facts, flakes, flash, history, machines, secrets, vms
from .custom_logger import setup_logging
from .dirs import get_clan_flake_toplevel
from .errors import ClanCmdError, ClanError
@@ -91,6 +93,9 @@ def create_parser(prog: str | None = None) -> argparse.ArgumentParser:
parser_secrets = subparsers.add_parser("secrets", help="manage secrets")
secrets.register_parser(parser_secrets)
parser_facts = subparsers.add_parser("facts", help="manage facts")
facts.register_parser(parser_facts)
parser_machine = subparsers.add_parser(
"machines", help="Manage machines and their configuration"
)
@@ -102,6 +107,16 @@ def create_parser(prog: str | None = None) -> argparse.ArgumentParser:
parser_history = subparsers.add_parser("history", help="manage history")
history.register_parser(parser_history)
parser_flash = subparsers.add_parser(
"flash", help="flash machines to usb sticks or into isos"
)
flash.register_parser(parser_flash)
parser_clana = subparsers.add_parser(
"clana", help="Describe a VM with natural language and launch it"
)
clana.register_parser(parser_clana)
if argcomplete:
argcomplete.autocomplete(parser)
@@ -117,10 +132,10 @@ def main() -> None:
parser.print_help()
if args.debug:
setup_logging(logging.DEBUG)
setup_logging(logging.DEBUG, root_log_name=__name__.split(".")[0])
log.debug("Debug log activated")
else:
setup_logging(logging.INFO)
setup_logging(logging.INFO, root_log_name=__name__.split(".")[0])
if not hasattr(args, "func"):
return

View File

@@ -13,7 +13,7 @@ def create_backup(machine: Machine, provider: str | None = None) -> None:
backup_scripts = json.loads(machine.eval_nix("config.clanCore.backups"))
if provider is None:
for provider in backup_scripts["providers"]:
proc = machine.host.run(
proc = machine.target_host.run(
["bash", "-c", backup_scripts["providers"][provider]["create"]],
)
if proc.returncode != 0:
@@ -23,7 +23,7 @@ def create_backup(machine: Machine, provider: str | None = None) -> None:
else:
if provider not in backup_scripts["providers"]:
raise ClanError(f"provider {provider} not found")
proc = machine.host.run(
proc = machine.target_host.run(
["bash", "-c", backup_scripts["providers"][provider]["create"]],
)
if proc.returncode != 0:

View File

@@ -19,7 +19,7 @@ class Backup:
def list_provider(machine: Machine, provider: str) -> list[Backup]:
results = []
backup_metadata = json.loads(machine.eval_nix("config.clanCore.backups"))
proc = machine.host.run(
proc = machine.target_host.run(
["bash", "-c", backup_metadata["providers"][provider]["list"]],
stdout=subprocess.PIPE,
check=False,
@@ -58,7 +58,8 @@ def list_backups(machine: Machine, provider: str | None = None) -> list[Backup]:
def list_command(args: argparse.Namespace) -> None:
machine = Machine(name=args.machine, flake=args.flake)
backups = list_backups(machine=machine, provider=args.provider)
print(backups)
for backup in backups:
print(backup.archive_id)
def register_list_parser(parser: argparse.ArgumentParser) -> None:

View File

@@ -20,7 +20,7 @@ def restore_service(
env["JOB"] = backup.job_name
env["FOLDERS"] = ":".join(folders)
proc = machine.host.run(
proc = machine.target_host.run(
[
"bash",
"-c",
@@ -34,7 +34,7 @@ def restore_service(
f"failed to run preRestoreScript: {backup_folders[service]['preRestoreScript']}, error was: {proc.stdout}"
)
proc = machine.host.run(
proc = machine.target_host.run(
[
"bash",
"-c",
@@ -48,7 +48,7 @@ def restore_service(
f"failed to restore backup: {backup_metadata['providers'][provider]['restore']}"
)
proc = machine.host.run(
proc = machine.target_host.run(
[
"bash",
"-c",

View File

@@ -1,33 +0,0 @@
import json
from pathlib import Path
from clan_cli.nix import nix_eval
from .cmd import run
def get_clan_module_names(
flake_dir: Path,
) -> list[str]:
"""
Get the list of clan modules from the clan-core flake input
"""
proc = run(
nix_eval(
[
"--impure",
"--show-trace",
"--expr",
f"""
let
flake = builtins.getFlake (toString {flake_dir});
in
builtins.attrNames flake.inputs.clan-core.clanModules
""",
],
),
cwd=flake_dir,
)
module_names = json.loads(proc.stdout)
return module_names

View File

@@ -0,0 +1,51 @@
import json
import os
import urllib.request
from typing import Any
# Your OpenAI API key
api_key: str = os.environ["OPENAI_API_KEY"]
# The URL to which the request is sent
url: str = "https://api.openai.com/v1/chat/completions"
# The header includes the content type and the authorization with your API key
headers: dict[str, str] = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
def complete(
messages: list[dict[str, Any]],
model: str = "gpt-3.5-turbo",
temperature: float = 1.0,
) -> str:
# Data to be sent in the request
data = {
"model": model,
"messages": messages,
"temperature": temperature,
}
# Create a request object with the URL and the headers
req = urllib.request.Request(url, json.dumps(data).encode("utf-8"), headers)
# Make the request and read the response
with urllib.request.urlopen(req) as response:
response_body = response.read()
resp_data = json.loads(response_body)
return resp_data["choices"][0]["message"]["content"]
def complete_prompt(
prompt: str,
system: str = "",
model: str = "gpt-3.5-turbo",
temperature: float = 1.0,
) -> str:
return complete(
[{"role": "system", "content": system}, {"role": "user", "content": prompt}],
model,
temperature,
)

View File

@@ -116,10 +116,8 @@ class ClanURI:
def get_full_uri(self) -> str:
return self._full_uri
# TODO(@Qubasa): return a comparable id e.g. f"{url}#{attr}"
# This should be our standard.
def get_id(self) -> str:
return f"{self._components.path}#{self._components.fragment}"
return f"{self.get_internal()}#{self.params.flake_attr}"
@classmethod
def from_path(

View File

@@ -0,0 +1,114 @@
# !/usr/bin/env python3
# A subcommand that interfaces with openai to generate nixos configurations and launches VMs with them.
# The `clan clana` command allows the user to enter a prompt with the wishes for the VM and then generates a nixos configuration and launches a VM with it.
# for now this POC should be stateless. A configuration.nix should be generated ina temporary directory and directly launched.
# there should be no additional arguments.
# THe prompt is read from stdin
import argparse
import os
from pathlib import Path
from clan_cli import clan_openai
from clan_cli.errors import ClanCmdError
from clan_cli.vms.run import run_command
base_config = Path(__file__).parent.joinpath("base-config.nix").read_text()
system_msg = f"""
Your name is clana, an assistant for creating NixOS configurations.
Your task is to generate a NixOS configuration.nix file.
Do not output any explanations or comments, not even when the user asks a question or provides feedback.
Always provide only the content of the configuration.nix file.
Don't use any nixos options for which you are not sure about their syntax.
Generate a configuration.nix which has a very high probability of just working.
The user who provides the prompt might have technical expertise, or none at all.
Even a grandmother who has no idea about computers should be able to use this.
Translate the users requirements to a working configuration.nix file.
Don't set any options under `nix.`.
The user should not have a password and log in automatically.
Take care specifically about:
- specify every option only once within the same file. Otherwise it will lead to an error like this: error: attribute 'environment.systemPackages' already defined at [...]/configuration.nix:X:X
- don't set a password for the user. it's already set in the base config
Assume the following base config is already imported. Any option set in there is already configured and doesn't need to be specified anymore:
```nix
{base_config}
```
The base config will be imported by the system. No need to import it anymore.
"""
# takes a (sub)parser and configures it
def register_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--show", action="store_true", help="show the configuration")
parser.set_defaults(func=clana_command)
def clana_command(args: argparse.Namespace) -> None:
print("Please enter your wishes for the new computer: ")
prompt = input()
# prompt = "I want to email my grandchildren and watch them on facebook"
print("Thank you. Generating your computer...")
# config = clan_openai.complete(messages, model="gpt-4-turbo-preview").strip()
config = Path(".direnv/configuration.nix").read_text()
messages = [
{"role": "system", "content": system_msg},
{"role": "user", "content": prompt},
]
conf_dir = Path("/tmp/clana")
conf_dir.mkdir(exist_ok=True)
for f in conf_dir.iterdir():
f.unlink()
(conf_dir / "flake.nix").write_bytes(
Path(__file__).parent.joinpath("flake.nix.template").read_bytes()
)
with open(conf_dir / "base-config.nix", "w") as f:
f.write(base_config)
with open(conf_dir / "hardware-configuration.nix", "w") as f:
f.write("{}")
with open(conf_dir / "configuration.nix", "w") as f:
f.write(
"""
{
imports = [
./base-config.nix
./ai-config.nix
];
}
"""
)
while True:
config_orig = clan_openai.complete(
messages, model="gpt-4-turbo-preview"
).strip()
# remove code blocks
lines = config_orig.split("\n")
if lines[0].startswith("```"):
lines = lines[1:-1]
config = "\n".join(lines)
if args.show:
print("Configuration generated:")
print(config)
print("Configuration generated. Launching...")
with open(conf_dir / "ai-config.nix", "w") as f:
f.write(config)
os.environ["NIXPKGS_ALLOW_UNFREE"] = "1"
try:
run_command(
machine="clana-machine", flake=conf_dir, nix_options=["--impure"]
)
break
except ClanCmdError as e:
messages += [
{"role": "assistant", "content": config_orig},
{
"role": "system",
"content": f"There was a problem that needs to be fixed:\n{e.cmd.stderr}",
},
]

View File

@@ -0,0 +1,60 @@
{ config, ... }:
{
imports =
[
# Include the results of the hardware scan.
./hardware-configuration.nix
];
# Ensure that software properties (e.g., being unfree) are respected.
nixpkgs.config = {
allowUnfree = true;
};
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "clana"; # Define your hostname.
networking.networkmanager.enable = true;
# Enable the X11 windowing system.
services.xserver.enable = true;
services.xserver.layout = "us";
services.xserver.xkbOptions = "eurosign:e";
# Enable touchpad support.
services.xserver.libinput.enable = true;
# Enable the KDE Desktop Environment.
services.xserver.displayManager.sddm.enable = true;
services.xserver.desktopManager.plasma5.enable = true;
# Enable sound.
sound.enable = true;
hardware.pulseaudio.enable = true;
# Autologin settings.
services.xserver.displayManager.autoLogin.enable = true;
services.xserver.displayManager.autoLogin.user = "user";
# User settings.
users.users.user = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user.
uid = 1000;
password = "hello";
openssh.authorizedKeys.keys = [ ];
};
# Enable firewall.
networking.firewall.enable = true;
networking.firewall.allowedTCPPorts = [ 80 443 ]; # HTTP and HTTPS
# Set time zone.
time.timeZone = "UTC";
# System-wide settings.
system.stateVersion = "22.05"; # Edit this to your NixOS release version.
}

View File

@@ -0,0 +1,30 @@
{
description = "<Put your description here>";
inputs.clan-core.url = "git+https://git.clan.lol/clan/clan-core";
outputs = { self, clan-core, ... }:
let
system = "x86_64-linux";
pkgs = clan-core.inputs.nixpkgs.legacyPackages.${system};
clan = clan-core.lib.buildClan {
directory = self;
clanName = "clana-clan";
machines.clana-machine = {
imports = [
./configuration.nix
];
};
};
in
{
# all machines managed by cLAN
inherit (clan) nixosConfigurations clanInternals;
# add the cLAN cli tool to the dev shell
devShells.${system}.default = pkgs.mkShell {
packages = [
clan-core.packages.${system}.clan-cli
];
};
};
}

View File

@@ -17,12 +17,18 @@ def get_formatter(color: str) -> Callable[[logging.LogRecord, bool], logging.For
record: logging.LogRecord, with_location: bool
) -> logging.Formatter:
reset = "\x1b[0m"
filepath = Path(record.pathname).resolve()
try:
filepath = Path(record.pathname).resolve()
filepath = Path("~", filepath.relative_to(Path.home()))
except Exception:
filepath = Path(record.pathname)
if not with_location:
return logging.Formatter(f"{color}%(levelname)s{reset}: %(message)s")
return logging.Formatter(
f"{color}%(levelname)s{reset}: %(message)s\n {filepath}:%(lineno)d::%(funcName)s\n"
f"{color}%(levelname)s{reset}: %(message)s\nLocation: {filepath}:%(lineno)d::%(funcName)s\n"
)
return myformatter
@@ -62,13 +68,20 @@ def get_caller() -> str:
if caller_frame is None:
return "unknown"
frame_info = inspect.getframeinfo(caller_frame)
ret = f"{frame_info.filename}:{frame_info.lineno}::{frame_info.function}"
try:
filepath = Path(frame_info.filename).resolve()
filepath = Path("~", filepath.relative_to(Path.home()))
except Exception:
filepath = Path(frame_info.filename)
ret = f"{filepath}:{frame_info.lineno}::{frame_info.function}"
return ret
def setup_logging(level: Any) -> None:
def setup_logging(level: Any, root_log_name: str = __name__.split(".")[0]) -> None:
# Get the root logger and set its level
main_logger = logging.getLogger("clan_cli")
main_logger = logging.getLogger(root_log_name)
main_logger.setLevel(level)
# Create and add the default handler
@@ -76,7 +89,7 @@ def setup_logging(level: Any) -> None:
# Create and add your custom handler
default_handler.setLevel(level)
default_handler.setFormatter(CustomFormatter(level == logging.DEBUG))
default_handler.setFormatter(CustomFormatter(str(level) == str(logging.DEBUG)))
main_logger.addHandler(default_handler)
# Set logging level for other modules used by this module

View File

@@ -15,9 +15,12 @@ def find_git_repo_root() -> Path | None:
return find_toplevel([".git"])
def clan_key_safe(clan_name: str, flake_url: str) -> str:
def clan_key_safe(flake_url: str) -> str:
"""
only embed the url in the path, not the clan name, as it would involve eval.
"""
quoted_url = urllib.parse.quote_plus(flake_url)
return f"{clan_name}-{quoted_url}"
return f"{quoted_url}"
def find_toplevel(top_level_files: list[str]) -> Path | None:
@@ -41,16 +44,38 @@ def user_config_dir() -> Path:
return Path(os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")))
def user_data_dir() -> Path:
if sys.platform == "win32":
return Path(
os.getenv("LOCALAPPDATA", os.path.expanduser("~\\AppData\\Local\\"))
)
elif sys.platform == "darwin":
return Path(os.path.expanduser("~/Library/Application Support/"))
else:
return Path(os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share")))
def user_cache_dir() -> Path:
if sys.platform == "win32":
return Path(
os.getenv("LOCALAPPDATA", os.path.expanduser("~\\AppData\\Local\\"))
)
elif sys.platform == "darwin":
return Path(os.path.expanduser("~/Library/Caches/"))
else:
return Path(os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache")))
def user_gcroot_dir() -> Path:
p = user_config_dir() / "clan" / "gcroots"
p.mkdir(parents=True, exist_ok=True)
return p
def machine_gcroot(*, clan_name: str, flake_url: str) -> Path:
def machine_gcroot(flake_url: str) -> Path:
# Always build icon so that we can symlink it to the gcroot
gcroot_dir = user_gcroot_dir()
clan_gcroot = gcroot_dir / clan_key_safe(clan_name, flake_url)
clan_gcroot = gcroot_dir / clan_key_safe(flake_url)
clan_gcroot.mkdir(parents=True, exist_ok=True)
return clan_gcroot
@@ -59,9 +84,9 @@ def user_history_file() -> Path:
return user_config_dir() / "clan" / "history"
def vm_state_dir(clan_name: str, flake_url: str, vm_name: str) -> Path:
clan_key = clan_key_safe(clan_name, flake_url)
return user_config_dir() / "clan" / "vmstate" / clan_key / vm_name
def vm_state_dir(flake_url: str, vm_name: str) -> Path:
clan_key = clan_key_safe(flake_url)
return user_data_dir() / "clan" / "vmstate" / clan_key / vm_name
def machines_dir(flake_dir: Path) -> Path:

View File

@@ -0,0 +1,21 @@
# !/usr/bin/env python3
import argparse
from .check import register_check_parser
from .list import register_list_parser
# takes a (sub)parser and configures it
def register_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
check_parser = subparser.add_parser("check", help="check if facts are up to date")
register_check_parser(check_parser)
list_parser = subparser.add_parser("list", help="list all facts")
register_list_parser(list_parser)

View File

@@ -0,0 +1,38 @@
import argparse
import importlib
import logging
from ..machines.machines import Machine
log = logging.getLogger(__name__)
def check_facts(machine: Machine) -> bool:
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
existing_facts = fact_store.get_all()
missing_facts = []
for service in machine.secrets_data:
for fact in machine.secrets_data[service]["facts"]:
if fact not in existing_facts.get(service, {}):
log.info(f"Fact {fact} for service {service} is missing")
missing_facts.append((service, fact))
if missing_facts:
return False
return True
def check_command(args: argparse.Namespace) -> None:
machine = Machine(name=args.machine, flake=args.flake)
if check_facts(machine):
print("All facts are present")
def register_check_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to check facts for",
)
parser.set_defaults(func=check_command)

View File

@@ -0,0 +1,36 @@
import argparse
import importlib
import json
import logging
from ..machines.machines import Machine
log = logging.getLogger(__name__)
def get_all_facts(machine: Machine) -> dict:
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
# for service in machine.secrets_data:
# facts[service] = {}
# for fact in machine.secrets_data[service]["facts"]:
# fact_content = fact_store.get(service, fact)
# if fact_content:
# facts[service][fact] = fact_content.decode()
# else:
# log.error(f"Fact {fact} for service {service} is missing")
return fact_store.get_all()
def get_command(args: argparse.Namespace) -> None:
machine = Machine(name=args.machine, flake=args.flake)
print(json.dumps(get_all_facts(machine), indent=4))
def register_list_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to print facts for",
)
parser.set_defaults(func=get_command)

View File

@@ -0,0 +1,28 @@
from abc import ABC, abstractmethod
from pathlib import Path
from clan_cli.machines.machines import Machine
class FactStoreBase(ABC):
@abstractmethod
def __init__(self, machine: Machine) -> None:
pass
@abstractmethod
def exists(self, service: str, name: str) -> bool:
pass
@abstractmethod
def set(self, service: str, name: str, value: bytes) -> Path | None:
pass
# get a single fact
@abstractmethod
def get(self, service: str, name: str) -> bytes:
pass
# get all facts
@abstractmethod
def get_all(self) -> dict[str, dict[str, bytes]]:
pass

View File

@@ -0,0 +1,49 @@
from pathlib import Path
from clan_cli.errors import ClanError
from clan_cli.machines.machines import Machine
from . import FactStoreBase
class FactStore(FactStoreBase):
def __init__(self, machine: Machine) -> None:
self.machine = machine
self.works_remotely = False
def set(self, service: str, name: str, value: bytes) -> Path | None:
if isinstance(self.machine.flake, Path):
fact_path = (
self.machine.flake / "machines" / self.machine.name / "facts" / name
)
fact_path.parent.mkdir(parents=True, exist_ok=True)
fact_path.touch()
fact_path.write_bytes(value)
return fact_path
else:
raise ClanError(
f"in_flake fact storage is only supported for local flakes: {self.machine.flake}"
)
def exists(self, service: str, name: str) -> bool:
fact_path = (
self.machine.flake_dir / "machines" / self.machine.name / "facts" / name
)
return fact_path.exists()
# get a single fact
def get(self, service: str, name: str) -> bytes:
fact_path = (
self.machine.flake_dir / "machines" / self.machine.name / "facts" / name
)
return fact_path.read_bytes()
# get all facts
def get_all(self) -> dict[str, dict[str, bytes]]:
facts_folder = self.machine.flake_dir / "machines" / self.machine.name / "facts"
facts: dict[str, dict[str, bytes]] = {}
facts["TODO"] = {}
if facts_folder.exists():
for fact_path in facts_folder.iterdir():
facts["TODO"][fact_path.name] = fact_path.read_bytes()
return facts

View File

@@ -0,0 +1,46 @@
import logging
from pathlib import Path
from clan_cli.dirs import vm_state_dir
from clan_cli.errors import ClanError
from clan_cli.machines.machines import Machine
from . import FactStoreBase
log = logging.getLogger(__name__)
class FactStore(FactStoreBase):
def __init__(self, machine: Machine) -> None:
self.machine = machine
self.works_remotely = False
self.dir = vm_state_dir(str(machine.flake), machine.name) / "facts"
log.debug(f"FactStore initialized with dir {self.dir}")
def exists(self, service: str, name: str) -> bool:
fact_path = self.dir / service / name
return fact_path.exists()
def set(self, service: str, name: str, value: bytes) -> Path | None:
fact_path = self.dir / service / name
fact_path.parent.mkdir(parents=True, exist_ok=True)
fact_path.write_bytes(value)
return None
# get a single fact
def get(self, service: str, name: str) -> bytes:
fact_path = self.dir / service / name
if fact_path.exists():
return fact_path.read_bytes()
raise ClanError(f"Fact {name} for service {service} not found")
# get all facts
def get_all(self) -> dict[str, dict[str, bytes]]:
facts: dict[str, dict[str, bytes]] = {}
if self.dir.exists():
for service in self.dir.iterdir():
facts[service.name] = {}
for fact in service.iterdir():
facts[service.name][fact.name] = fact.read_bytes()
return facts

View File

@@ -7,7 +7,7 @@ from ..dirs import machine_gcroot
from ..errors import ClanError
from ..machines.list import list_machines
from ..machines.machines import Machine
from ..nix import nix_build, nix_config, nix_eval, nix_metadata
from ..nix import nix_add_to_gcroots, nix_build, nix_config, nix_eval, nix_metadata
from ..vms.inspect import VmConfig, inspect_vm
@@ -24,6 +24,10 @@ class FlakeConfig:
revision: str | None
vm: VmConfig
def __post_init__(self) -> None:
if isinstance(self.vm, dict):
self.vm = VmConfig(**self.vm)
def run_cmd(cmd: list[str]) -> str:
proc = run(cmd)
@@ -44,6 +48,11 @@ def inspect_flake(flake_url: str | Path, machine_name: str) -> FlakeConfig:
machine = Machine(machine_name, flake_url)
vm = inspect_vm(machine)
# Make symlink to gcroots from vm.machine_icon
if vm.machine_icon:
gcroot_icon: Path = machine_gcroot(flake_url=str(flake_url)) / vm.machine_name
nix_add_to_gcroots(vm.machine_icon, gcroot_icon)
# Get the cLAN name
cmd = nix_eval(
[
@@ -71,7 +80,7 @@ def inspect_flake(flake_url: str | Path, machine_name: str) -> FlakeConfig:
[
f'{flake_url}#clanInternals.machines."{system}"."{machine_name}".config.clanCore.clanIcon'
],
machine_gcroot(clan_name=clan_name, flake_url=str(flake_url)) / "clanIcon",
machine_gcroot(flake_url=str(flake_url)) / "clanIcon",
)
run_cmd(cmd)

View File

@@ -0,0 +1,62 @@
import argparse
import importlib
import logging
from dataclasses import dataclass
from pathlib import Path
from tempfile import TemporaryDirectory
from .machines.machines import Machine
from .secrets.generate import generate_secrets
log = logging.getLogger(__name__)
def flash_machine(machine: Machine, device: str | None = None) -> None:
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
generate_secrets(machine)
with TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
upload_dir_ = machine.secrets_upload_directory
if upload_dir_.startswith("/"):
upload_dir_ = upload_dir_[1:]
upload_dir = tmpdir / upload_dir_
upload_dir.mkdir(parents=True)
secret_store.upload(upload_dir)
fs_image = machine.build_nix("config.system.clan.iso")
print(fs_image)
@dataclass
class FlashOptions:
flake: Path
machine: str
device: str | None
def flash_command(args: argparse.Namespace) -> None:
opts = FlashOptions(
flake=args.flake,
machine=args.machine,
device=args.device,
)
machine = Machine(opts.machine, flake=opts.flake)
flash_machine(machine, device=opts.device)
def register_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
type=str,
help="machine to install",
)
parser.add_argument(
"--device",
type=str,
help="device to flash the system to",
)
parser.set_defaults(func=flash_command)

View File

@@ -7,29 +7,47 @@ from clan_cli.nix import nix_shell
from .cmd import Log, run
# generic vcs agnostic commit function
def commit_file(
file_path: Path,
repo_dir: Path,
commit_message: str | None = None,
) -> None:
# check that the file is in the git repository and exists
if not Path(file_path).resolve().is_relative_to(repo_dir.resolve()):
raise ClanError(f"File {file_path} is not in the git repository {repo_dir}")
if not file_path.exists():
raise ClanError(f"File {file_path} does not exist")
"""Commit a file to a git repository.
:param file_path: The path to the file to commit.
:param repo_dir: The path to the git repository.
:param commit_message: The commit message.
:raises ClanError: If the file is not in the git repository.
"""
commit_files([file_path], repo_dir, commit_message)
# generic vcs agnostic commit function
def commit_files(
file_paths: list[Path],
repo_dir: Path,
commit_message: str | None = None,
) -> None:
# check that the file is in the git repository
for file_path in file_paths:
if not Path(file_path).resolve().is_relative_to(repo_dir.resolve()):
raise ClanError(f"File {file_path} is not in the git repository {repo_dir}")
# generate commit message if not provided
if commit_message is None:
# ensure that mentioned file path is relative to repo
commit_message = f"Add {file_path.relative_to(repo_dir)}"
commit_message = ""
for file_path in file_paths:
# ensure that mentioned file path is relative to repo
commit_message += f"Add {file_path.relative_to(repo_dir)}"
# check if the repo is a git repo and commit
if (repo_dir / ".git").exists():
_commit_file_to_git(repo_dir, file_path, commit_message)
_commit_file_to_git(repo_dir, file_paths, commit_message)
else:
return
def _commit_file_to_git(repo_dir: Path, file_path: Path, commit_message: str) -> None:
def _commit_file_to_git(
repo_dir: Path, file_paths: list[Path], commit_message: str
) -> None:
"""Commit a file to a git repository.
:param repo_dir: The path to the git repository.
@@ -37,18 +55,20 @@ def _commit_file_to_git(repo_dir: Path, file_path: Path, commit_message: str) ->
:param commit_message: The commit message.
:raises ClanError: If the file is not in the git repository.
"""
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "add", str(file_path)],
)
# add the file to the git index
for file_path in file_paths:
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "add", str(file_path)],
)
# add the file to the git index
run(cmd, log=Log.BOTH, error_msg=f"Failed to add {file_path} file to git index")
run(cmd, log=Log.BOTH, error_msg=f"Failed to add {file_path} file to git index")
# check if there is a diff
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "diff", "--cached", "--exit-code", str(file_path)],
["git", "-C", str(repo_dir), "diff", "--cached", "--exit-code"]
+ [str(file_path) for file_path in file_paths],
)
result = run(cmd, check=False, cwd=repo_dir)
# if there is no diff, return
@@ -65,8 +85,8 @@ def _commit_file_to_git(repo_dir: Path, file_path: Path, commit_message: str) ->
"commit",
"-m",
commit_message,
str(file_path.relative_to(repo_dir)),
],
]
+ [str(file_path) for file_path in file_paths],
)
run(cmd, error_msg=f"Failed to commit {file_path} to git repository {repo_dir}")
run(cmd, error_msg=f"Failed to commit {file_paths} to git repository {repo_dir}")

View File

@@ -7,6 +7,7 @@ import logging
from typing import Any
from clan_cli.flakes.inspect import FlakeConfig, inspect_flake
from clan_cli.machines.list import list_machines
from ..clan_uri import ClanURI
from ..dirs import user_history_file
@@ -34,14 +35,14 @@ class HistoryEntry:
self.flake = FlakeConfig(**self.flake)
def merge_dicts(d1: dict, d2: dict) -> dict:
def _merge_dicts(d1: dict, d2: dict) -> dict:
# create a new dictionary that copies d1
merged = dict(d1)
# iterate over the keys and values of d2
for key, value in d2.items():
# if the key is in d1 and both values are dictionaries, merge them recursively
if key in d1 and isinstance(d1[key], dict) and isinstance(value, dict):
merged[key] = merge_dicts(d1[key], value)
merged[key] = _merge_dicts(d1[key], value)
# otherwise, update the value of the key in the merged dictionary
else:
merged[key] = value
@@ -58,7 +59,7 @@ def list_history() -> list[HistoryEntry]:
parsed = read_history_file()
for i, p in enumerate(parsed.copy()):
# Everything from the settings dict is merged into the flake dict, and can override existing values
parsed[i] = merge_dicts(p, p.get("settings", {}))
parsed[i] = _merge_dicts(p, p.get("settings", {}))
logs = [HistoryEntry(**p) for p in parsed]
except (json.JSONDecodeError, TypeError) as ex:
raise ClanError(f"History file at {user_history_file()} is corrupted") from ex
@@ -66,8 +67,8 @@ def list_history() -> list[HistoryEntry]:
return logs
def new_history_entry(uri: ClanURI) -> HistoryEntry:
flake = inspect_flake(uri.get_internal(), uri.params.flake_attr)
def new_history_entry(url: str, machine: str) -> HistoryEntry:
flake = inspect_flake(url, machine)
flake.flake_url = str(flake.flake_url)
return HistoryEntry(
flake=flake,
@@ -75,32 +76,47 @@ def new_history_entry(uri: ClanURI) -> HistoryEntry:
)
def add_history(uri: ClanURI) -> list[HistoryEntry]:
def add_all_to_history(uri: ClanURI) -> list[HistoryEntry]:
history = list_history()
new_entries: list[HistoryEntry] = []
for machine in list_machines(uri.get_internal()):
new_entry = _add_maschine_to_history_list(uri.get_internal(), machine, history)
new_entries.append(new_entry)
write_history_file(history)
return new_entries
def add_history(uri: ClanURI) -> HistoryEntry:
user_history_file().parent.mkdir(parents=True, exist_ok=True)
logs = list_history()
found = False
uri_path = uri.get_internal()
uri_machine = uri.params.flake_attr
history = list_history()
new_entry = _add_maschine_to_history_list(
uri.get_internal(), uri.params.flake_attr, history
)
write_history_file(history)
return new_entry
for entry in logs:
def _add_maschine_to_history_list(
uri_path: str, uri_machine: str, entries: list[HistoryEntry]
) -> HistoryEntry:
for new_entry in entries:
if (
entry.flake.flake_url == str(uri_path)
and entry.flake.flake_attr == uri_machine
new_entry.flake.flake_url == str(uri_path)
and new_entry.flake.flake_attr == uri_machine
):
found = True
entry.last_used = datetime.datetime.now().isoformat()
new_entry.last_used = datetime.datetime.now().isoformat()
return new_entry
if not found:
history = new_history_entry(uri)
logs.append(history)
write_history_file(logs)
return logs
new_entry = new_history_entry(uri_path, uri_machine)
entries.append(new_entry)
return new_entry
def add_history_command(args: argparse.Namespace) -> None:
add_history(args.uri)
if args.all:
add_all_to_history(args.uri)
else:
add_history(args.uri)
# takes a (sub)parser and configures it
@@ -108,4 +124,7 @@ def register_add_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"uri", type=ClanURI.from_str, help="Path to the flake", default="."
)
parser.add_argument(
"--all", help="Add all machines", default=False, action="store_true"
)
parser.set_defaults(func=add_history_command)

View File

@@ -1,11 +1,14 @@
# !/usr/bin/env python3
import argparse
import datetime
from clan_cli.flakes.inspect import inspect_flake
from ..clan_uri import ClanParameters, ClanURI
from ..errors import ClanCmdError
from ..locked_open import write_history_file
from ..nix import nix_metadata
from .add import HistoryEntry, list_history, new_history_entry
from .add import HistoryEntry, list_history
def update_history() -> list[HistoryEntry]:
@@ -27,7 +30,11 @@ def update_history() -> list[HistoryEntry]:
url=str(entry.flake.flake_url),
params=ClanParameters(entry.flake.flake_attr),
)
entry = new_history_entry(uri)
flake = inspect_flake(uri.get_internal(), uri.params.flake_attr)
flake.flake_url = str(flake.flake_url)
entry = HistoryEntry(
flake=flake, last_used=datetime.datetime.now().isoformat()
)
write_history_file(logs)
return logs

View File

@@ -14,19 +14,15 @@ log = logging.getLogger(__name__)
def install_nixos(machine: Machine, kexec: str | None = None) -> None:
log.info(f"deployment address1: {machine.deployment_info['deploymentAddress']}")
secrets_module = importlib.import_module(machine.secrets_module)
log.info(f"installing {machine.name}")
log.info(f"using secret store: {secrets_module.SecretStore}")
secret_store = secrets_module.SecretStore(machine=machine)
h = machine.host
log.info(f"deployment address2: {machine.deployment_info['deploymentAddress']}")
h = machine.target_host
target_host = f"{h.user or 'root'}@{h.host}"
log.info(f"target host: {target_host}")
flake_attr = h.meta.get("flake_attr", "")
generate_secrets(machine)
with TemporaryDirectory() as tmpdir_:
@@ -42,7 +38,7 @@ def install_nixos(machine: Machine, kexec: str | None = None) -> None:
cmd = [
"nixos-anywhere",
"-f",
f"{machine.flake}#{flake_attr}",
f"{machine.flake}#{machine.name}",
"-t",
"--no-reboot",
"--extra-files",
@@ -77,10 +73,7 @@ def install_command(args: argparse.Namespace) -> None:
kexec=args.kexec,
)
machine = Machine(opts.machine, flake=opts.flake)
machine.get_deployment_info()
machine.deployment_info["deploymentAddress"] = opts.target_host
log.info(f"target host: {opts.target_host}")
log.info(f"deployment address: {machine.deployment_info['deploymentAddress']}")
machine.target_host_address = opts.target_host
install_nixos(machine, kexec=opts.kexec)

View File

@@ -1,14 +1,44 @@
import json
import logging
from collections.abc import Generator
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any
from clan_cli.dirs import vm_state_dir
from qemu.qmp import QEMUMonitorProtocol
from ..cmd import run
from ..errors import ClanError
from ..nix import nix_build, nix_config, nix_eval, nix_metadata
from ..ssh import Host, parse_deployment_address
log = logging.getLogger(__name__)
class VMAttr:
def __init__(self, state_dir: Path) -> None:
# These sockets here are just symlinks to the real sockets which
# are created by the run.py file. The reason being that we run into
# file path length issues on Linux. If no qemu process is running
# the symlink will be dangling.
self._qmp_socket: Path = state_dir / "qmp.sock"
self._qga_socket: Path = state_dir / "qga.sock"
@contextmanager
def qmp_ctx(self) -> Generator[QEMUMonitorProtocol, None, None]:
rpath = self._qmp_socket.resolve()
if not rpath.exists():
raise ClanError(f"qmp socket {rpath} does not exist. Is the VM running?")
qmp = QEMUMonitorProtocol(str(rpath))
qmp.connect()
try:
yield qmp
finally:
qmp.close()
class Machine:
def __init__(
self,
@@ -28,46 +58,63 @@ class Machine:
self.eval_cache: dict[str, str] = {}
self.build_cache: dict[str, Path] = {}
if deployment_info is not None:
self.deployment_info = deployment_info
self._deployment_info: None | dict[str, str] = deployment_info
def get_deployment_info(self) -> None:
self.deployment_info = json.loads(
self.build_nix("config.system.clan.deployment.file").read_text()
)
print(f"self_deployment_info: {self.deployment_info}")
state_dir = vm_state_dir(flake_url=str(self.flake), vm_name=self.name)
self.vm: VMAttr = VMAttr(state_dir)
def __str__(self) -> str:
return f"Machine(name={self.name}, flake={self.flake})"
def __repr__(self) -> str:
return str(self)
@property
def deployment_address(self) -> str:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
return self.deployment_info["deploymentAddress"]
def deployment_info(self) -> dict[str, str]:
if self._deployment_info is not None:
return self._deployment_info
self._deployment_info = json.loads(
self.build_nix("config.system.clan.deployment.file").read_text()
)
return self._deployment_info
@property
def target_host_address(self) -> str:
# deploymentAddress is deprecated.
val = self.deployment_info.get("targetHost") or self.deployment_info.get(
"deploymentAddress"
)
if val is None:
msg = f"the 'clan.networking.targetHost' nixos option is not set for machine '{self.name}'"
raise ClanError(msg)
return val
@target_host_address.setter
def target_host_address(self, value: str) -> None:
self.deployment_info["targetHost"] = value
@property
def secrets_module(self) -> str:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
print(f"self_deployment_info2: {self.deployment_info}")
return self.deployment_info["secretsModule"]
@property
def secrets_data(self) -> dict:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
def facts_module(self) -> str:
return self.deployment_info["factsModule"]
@property
def secrets_data(self) -> dict[str, dict[str, Any]]:
if self.deployment_info["secretsData"]:
try:
return json.loads(Path(self.deployment_info["secretsData"]).read_text())
except json.JSONDecodeError:
log.error(
except json.JSONDecodeError as e:
raise ClanError(
f"Failed to parse secretsData for machine {self.name} as json"
)
return {}
) from e
return {}
@property
def secrets_upload_directory(self) -> str:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
return self.deployment_info["secretsUploadDirectory"]
@property
@@ -78,64 +125,157 @@ class Machine:
if hasattr(self, "flake_path"):
return Path(self.flake_path)
self.flake_path = nix_metadata(self.flake)["path"]
self.flake_path: str = nix_metadata(self.flake)["path"]
return Path(self.flake_path)
@property
def host(self) -> Host:
def target_host(self) -> Host:
return parse_deployment_address(
self.name, self.deployment_address, meta={"machine": self}
self.name, self.target_host_address, meta={"machine": self}
)
def eval_nix(self, attr: str, refresh: bool = False) -> str:
@property
def build_host(self) -> Host:
"""
eval a nix attribute of the machine
@attr: the attribute to get
The host where the machine is built and deployed from.
Can be the same as the target host.
"""
build_host = self.deployment_info.get("buildHost")
if build_host is None:
return self.target_host
# enable ssh agent forwarding to allow the build host to access the target host
return parse_deployment_address(
self.name,
build_host,
forward_agent=True,
meta={"machine": self, "target_host": self.target_host},
)
def nix(
self,
method: str,
attr: str,
extra_config: None | dict = None,
impure: bool = False,
nix_options: list[str] = [],
) -> str | Path:
"""
Build the machine and return the path to the result
accepts a secret store and a facts store # TODO
"""
config = nix_config()
system = config["system"]
attr = f'clanInternals.machines."{system}".{self.name}.{attr}'
print(f"attr: {attr}")
file_info = dict()
with NamedTemporaryFile(mode="w") as config_json:
if extra_config is not None:
json.dump(extra_config, config_json, indent=2)
else:
json.dump({}, config_json)
config_json.flush()
if attr in self.eval_cache and not refresh:
file_info = json.loads(
run(
nix_eval(
[
"--impure",
"--expr",
f'let x = (builtins.fetchTree {{ type = "file"; url = "file://{config_json.name}"; }}); in {{ narHash = x.narHash; path = x.outPath; }}',
]
)
).stdout.strip()
)
args = []
# get git commit from flake
if extra_config is not None:
metadata = nix_metadata(self.flake_dir)
url = metadata["url"]
if "dirtyRevision" in metadata:
# if not impure:
# raise ClanError(
# "The machine has a dirty revision, and impure mode is not allowed"
# )
# else:
# args += ["--impure"]
args += ["--impure"]
args += [
"--expr",
f"""
((builtins.getFlake "{url}").clanInternals.machinesFunc."{system}"."{self.name}" {{
extraConfig = builtins.fromJSON (builtins.readFile (builtins.fetchTree {{
type = "file";
url = if (builtins.compareVersions builtins.nixVersion "2.19") == -1 then "{file_info["path"]}" else "file:{file_info["path"]}";
narHash = "{file_info["narHash"]}";
}}));
}}).{attr}
""",
]
else:
if isinstance(self.flake, Path):
if (self.flake / ".git").exists():
flake = f"git+file://{self.flake}"
else:
flake = f"path:{self.flake}"
else:
flake = self.flake
args += [
f'{flake}#clanInternals.machines."{system}".{self.name}.{attr}',
*nix_options,
]
if method == "eval":
output = run(nix_eval(args)).stdout.strip()
return output
elif method == "build":
outpath = run(nix_build(args)).stdout.strip()
return Path(outpath)
else:
raise ValueError(f"Unknown method {method}")
def eval_nix(
self,
attr: str,
refresh: bool = False,
extra_config: None | dict = None,
impure: bool = False,
nix_options: list[str] = [],
) -> str:
"""
eval a nix attribute of the machine
@attr: the attribute to get
"""
if attr in self.eval_cache and not refresh and extra_config is None:
return self.eval_cache[attr]
if isinstance(self.flake, Path):
if (self.flake / ".git").exists():
flake = f"git+file://{self.flake}"
else:
flake = f"path:{self.flake}"
output = self.nix("eval", attr, extra_config, impure, nix_options)
if isinstance(output, str):
self.eval_cache[attr] = output
return output
else:
flake = self.flake
raise ClanError("eval_nix returned not a string")
print(f"evaluating {flake}#{attr}")
cmd = nix_eval([f"{flake}#{attr}"])
print(f"cmd: {cmd}")
output = run(cmd).stdout.strip()
self.eval_cache[attr] = output
return output
def build_nix(self, attr: str, refresh: bool = False) -> Path:
def build_nix(
self,
attr: str,
refresh: bool = False,
extra_config: None | dict = None,
impure: bool = False,
nix_options: list[str] = [],
) -> Path:
"""
build a nix attribute of the machine
@attr: the attribute to get
"""
config = nix_config()
system = config["system"]
attr = f'clanInternals.machines."{system}".{self.name}.{attr}'
if attr in self.build_cache and not refresh:
if attr in self.build_cache and not refresh and extra_config is None:
return self.build_cache[attr]
if isinstance(self.flake, Path):
flake = f"path:{self.flake}"
output = self.nix("build", attr, extra_config, impure, nix_options)
if isinstance(output, Path):
self.build_cache[attr] = output
return output
else:
flake = self.flake
log.info(f"building {flake}#{attr}")
outpath = run(nix_build([f"{flake}#{attr}"])).stdout.strip()
self.build_cache[attr] = Path(outpath)
return Path(outpath)
raise ClanError("build_nix returned not a Path")

View File

@@ -1,19 +1,92 @@
import argparse
import json
import logging
import os
import shlex
import subprocess
import sys
from pathlib import Path
from ..cmd import run
from ..errors import ClanError
from ..machines.machines import Machine
from ..nix import nix_build, nix_command, nix_config
from ..nix import nix_build, nix_command, nix_config, nix_metadata
from ..secrets.generate import generate_secrets
from ..secrets.upload import upload_secrets
from ..ssh import Host, HostGroup, HostKeyCheck, parse_deployment_address
log = logging.getLogger(__name__)
def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
def is_path_input(node: dict[str, dict[str, str]]) -> bool:
locked = node.get("locked")
if not locked:
return False
return locked["type"] == "path" or locked.get("url", "").startswith("file://")
def upload_sources(
flake_url: str, remote_url: str, always_upload_source: bool = False
) -> str:
if not always_upload_source:
flake_data = nix_metadata(flake_url)
url = flake_data["resolvedUrl"]
has_path_inputs = any(
is_path_input(node) for node in flake_data["locks"]["nodes"].values()
)
if not has_path_inputs and not is_path_input(flake_data):
# No need to upload sources, we can just build the flake url directly
# FIXME: this might fail for private repositories?
return url
if not has_path_inputs:
# Just copy the flake to the remote machine, we can substitute other inputs there.
path = flake_data["path"]
env = os.environ.copy()
# env["NIX_SSHOPTS"] = " ".join(opts.remote_ssh_options)
assert remote_url
cmd = nix_command(
[
"copy",
"--to",
f"ssh://{remote_url}",
"--no-check-sigs",
path,
]
)
proc = subprocess.run(cmd, stdout=subprocess.PIPE, env=env, check=False)
if proc.returncode != 0:
raise ClanError(
f"failed to upload sources: {shlex.join(cmd)} failed with {proc.returncode}"
)
return path
# Slow path: we need to upload all sources to the remote machine
assert remote_url
cmd = nix_command(
[
"flake",
"archive",
"--to",
f"ssh://{remote_url}",
"--json",
flake_url,
]
)
log.info("run %s", shlex.join(cmd))
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=False)
if proc.returncode != 0:
raise ClanError(
f"failed to upload sources: {shlex.join(cmd)} failed with {proc.returncode}"
)
try:
return json.loads(proc.stdout)["path"]
except (json.JSONDecodeError, OSError) as e:
raise ClanError(
f"failed to parse output of {shlex.join(cmd)}: {e}\nGot: {proc.stdout.decode('utf-8', 'replace')}"
)
def deploy_nixos(hosts: HostGroup) -> None:
"""
Deploy to all hosts in parallel
"""
@@ -23,14 +96,7 @@ def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
ssh_arg = f"-p {h.port}" if h.port else ""
env = os.environ.copy()
env["NIX_SSHOPTS"] = ssh_arg
res = h.run_local(
nix_command(["flake", "archive", "--to", f"ssh://{target}", "--json"]),
check=True,
stdout=subprocess.PIPE,
extra_env=env,
)
data = json.loads(res.stdout)
path = data["path"]
path = upload_sources(".", target)
if h.host_key_check != HostKeyCheck.STRICT:
ssh_arg += " -o StrictHostKeyChecking=no"
@@ -39,16 +105,11 @@ def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
ssh_arg += " -i " + h.key if h.key else ""
flake_attr = h.meta.get("flake_attr", "")
machine: Machine = h.meta["machine"]
generate_secrets(h.meta["machine"])
upload_secrets(h.meta["machine"])
generate_secrets(machine)
upload_secrets(machine)
target_host = h.meta.get("target_host")
if target_host:
target_user = h.meta.get("target_user")
if target_user:
target_host = f"{target_user}@{target_host}"
extra_args = h.meta.get("extra_args", [])
cmd = [
"nixos-rebuild",
@@ -64,9 +125,10 @@ def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
"--build-host",
"",
"--flake",
f"{path}#{flake_attr}",
f"{path}#{machine.name}",
]
if target_host:
if target_host := h.meta.get("target_host"):
target_host = f"{target_host.user or 'root'}@{target_host.host}"
cmd.extend(["--target-host", target_host])
ret = h.run(cmd, check=False)
# re-retry switch if the first time fails
@@ -87,18 +149,25 @@ def get_all_machines(clan_dir: Path) -> HostGroup:
machines = json.loads(Path(machines_json.rstrip()).read_text())
hosts = []
ignored_machines = []
for name, machine_data in machines.items():
# very hacky. would be better to do a MachinesGroup instead
host = parse_deployment_address(
name,
machine_data["deploymentAddress"],
meta={
"machine": Machine(
name=name, flake=clan_dir, deployment_info=machine_data
)
},
if machine_data.get("requireExplicitUpdate", False):
continue
machine = Machine(name=name, flake=clan_dir, deployment_info=machine_data)
try:
hosts.append(machine.build_host)
except ClanError:
ignored_machines.append(name)
continue
if not hosts and ignored_machines != []:
print(
"WARNING: No machines to update. The following defined machines were ignored because they do not have `clan.networking.targetHost` nixos option set:",
file=sys.stderr,
)
hosts.append(host)
for machine in ignored_machines:
print(machine, file=sys.stderr)
# very hacky. would be better to do a MachinesGroup instead
return HostGroup(hosts)
@@ -106,7 +175,7 @@ def get_selected_machines(machine_names: list[str], flake_dir: Path) -> HostGrou
hosts = []
for name in machine_names:
machine = Machine(name=name, flake=flake_dir)
hosts.append(machine.host)
hosts.append(machine.build_host)
return HostGroup(hosts)
@@ -116,7 +185,7 @@ def update(args: argparse.Namespace) -> None:
raise ClanError("Could not find clan flake toplevel directory")
if len(args.machines) == 1 and args.target_host is not None:
machine = Machine(name=args.machines[0], flake=args.flake)
machine.deployment_info["deploymentAddress"] = args.target_host
machine.target_host_address = args.target_host
host = parse_deployment_address(
args.machines[0],
args.target_host,
@@ -133,7 +202,7 @@ def update(args: argparse.Namespace) -> None:
else:
machines = get_selected_machines(args.machines, args.flake)
deploy_nixos(machines, args.flake)
deploy_nixos(machines)
def register_update_parser(parser: argparse.ArgumentParser) -> None:

View File

@@ -53,6 +53,11 @@ def nix_build(flags: list[str], gcroot: Path | None = None) -> list[str]:
)
def nix_add_to_gcroots(nix_path: Path, dest: Path) -> None:
cmd = ["nix-store", "--realise", f"{nix_path}", "--add-root", f"{dest}"]
run(cmd)
def nix_config() -> dict[str, Any]:
cmd = nix_command(["show-config", "--json"])
proc = run(cmd)

View File

@@ -1,6 +1,7 @@
# !/usr/bin/env python3
import argparse
from .check import register_check_parser
from .generate import register_generate_parser
from .groups import register_groups_parser
from .import_sops import register_import_sops_parser
@@ -32,6 +33,9 @@ def register_parser(parser: argparse.ArgumentParser) -> None:
import_sops_parser = subparser.add_parser("import-sops", help="import a sops file")
register_import_sops_parser(import_sops_parser)
check_parser = subparser.add_parser("check", help="check if secrets are up to date")
register_check_parser(check_parser)
parser_generate = subparser.add_parser(
"generate", help="generate secrets for machines if they don't exist yet"
)

View File

@@ -0,0 +1,61 @@
import argparse
import importlib
import logging
from ..machines.machines import Machine
log = logging.getLogger(__name__)
def check_secrets(machine: Machine, service: None | str = None) -> bool:
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
missing_secrets = []
missing_facts = []
if service:
services = [service]
else:
services = list(machine.secrets_data.keys())
for service in services:
for secret in machine.secrets_data[service]["secrets"]:
if isinstance(secret, str):
secret_name = secret
else:
secret_name = secret["name"]
if not secret_store.exists(service, secret_name):
log.info(f"Secret {secret} for service {service} is missing")
missing_secrets.append((service, secret_name))
for fact in machine.secrets_data[service]["facts"]:
if not fact_store.exists(service, fact):
log.info(f"Fact {fact} for service {service} is missing")
missing_facts.append((service, fact))
log.debug(f"missing_secrets: {missing_secrets}")
log.debug(f"missing_facts: {missing_facts}")
if missing_secrets or missing_facts:
return False
return True
def check_command(args: argparse.Namespace) -> None:
machine = Machine(
name=args.machine,
flake=args.flake,
)
check_secrets(machine, service=args.service)
def register_check_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to check secrets for",
)
parser.add_argument(
"--service",
help="the service to check",
)
parser.set_defaults(func=check_command)

View File

@@ -2,87 +2,145 @@ import argparse
import importlib
import logging
import os
import shutil
from collections.abc import Callable
from pathlib import Path
from tempfile import TemporaryDirectory
from clan_cli.cmd import run
from ..errors import ClanError
from ..facts.modules import FactStoreBase
from ..git import commit_files
from ..machines.machines import Machine
from ..nix import nix_shell
from .check import check_secrets
from .modules import SecretStoreBase
log = logging.getLogger(__name__)
def generate_secrets(machine: Machine) -> None:
def generate_service_secrets(
machine: Machine,
service: str,
secret_store: SecretStoreBase,
fact_store: FactStoreBase,
tmpdir: Path,
prompt: Callable[[str], str],
) -> None:
service_dir = tmpdir / service
# check if all secrets exist and generate them if at least one is missing
needs_regeneration = not check_secrets(machine, service=service)
log.debug(f"{service} needs_regeneration: {needs_regeneration}")
if needs_regeneration:
if not isinstance(machine.flake, Path):
msg = f"flake is not a Path: {machine.flake}"
msg += "fact/secret generation is only supported for local flakes"
env = os.environ.copy()
facts_dir = service_dir / "facts"
facts_dir.mkdir(parents=True)
env["facts"] = str(facts_dir)
secrets_dir = service_dir / "secrets"
secrets_dir.mkdir(parents=True)
env["secrets"] = str(secrets_dir)
# compatibility for old outputs.nix users
if isinstance(machine.secrets_data[service]["generator"], str):
generator = machine.secrets_data[service]["generator"]
else:
generator = machine.secrets_data[service]["generator"]["finalScript"]
if machine.secrets_data[service]["generator"]["prompt"]:
prompt_value = prompt(
machine.secrets_data[service]["generator"]["prompt"]
)
env["prompt_value"] = prompt_value
# fmt: off
cmd = nix_shell(
[
"nixpkgs#bash",
"nixpkgs#bubblewrap",
],
[
"bwrap",
"--ro-bind", "/nix/store", "/nix/store",
"--tmpfs", "/usr/lib/systemd",
"--dev", "/dev",
"--bind", str(facts_dir), str(facts_dir),
"--bind", str(secrets_dir), str(secrets_dir),
"--unshare-all",
"--unshare-user",
"--uid", "1000",
"--",
"bash", "-c", generator
],
)
# fmt: on
run(
cmd,
env=env,
)
files_to_commit = []
# store secrets
for secret in machine.secrets_data[service]["secrets"]:
if isinstance(secret, str):
# TODO: This is the old NixOS module, can be dropped everyone has updated.
secret_name = secret
groups = []
else:
secret_name = secret["name"]
groups = secret.get("groups", [])
secret_file = secrets_dir / secret_name
if not secret_file.is_file():
msg = f"did not generate a file for '{secret_name}' when running the following command:\n"
msg += generator
raise ClanError(msg)
secret_path = secret_store.set(
service, secret_name, secret_file.read_bytes(), groups
)
if secret_path:
files_to_commit.append(secret_path)
# store facts
for name in machine.secrets_data[service]["facts"]:
fact_file = facts_dir / name
if not fact_file.is_file():
msg = f"did not generate a file for '{name}' when running the following command:\n"
msg += machine.secrets_data[service]["generator"]
raise ClanError(msg)
fact_file = fact_store.set(service, name, fact_file.read_bytes())
if fact_file:
files_to_commit.append(fact_file)
commit_files(
files_to_commit,
machine.flake_dir,
f"Update facts/secrets for service {service} in machine {machine.name}",
)
def generate_secrets(
machine: Machine,
prompt: None | Callable[[str], str] = None,
) -> None:
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
with TemporaryDirectory() as d:
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
if prompt is None:
prompt = lambda text: input(f"{text}: ")
with TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
for service in machine.secrets_data:
print(service)
tmpdir = Path(d) / service
# check if all secrets exist and generate them if at least one is missing
needs_regeneration = any(
not secret_store.exists(service, secret)
for secret in machine.secrets_data[service]["secrets"]
) or any(
not (machine.flake / fact).exists()
for fact in machine.secrets_data[service]["facts"].values()
generate_service_secrets(
machine=machine,
service=service,
secret_store=secret_store,
fact_store=fact_store,
tmpdir=tmpdir,
prompt=prompt,
)
for fact in machine.secrets_data[service]["facts"].values():
if not (machine.flake / fact).exists():
print(f"fact {fact} is missing")
if needs_regeneration:
env = os.environ.copy()
facts_dir = tmpdir / "facts"
facts_dir.mkdir(parents=True)
env["facts"] = str(facts_dir)
secrets_dir = tmpdir / "secrets"
secrets_dir.mkdir(parents=True)
env["secrets"] = str(secrets_dir)
# fmt: off
cmd = nix_shell(
[
"nixpkgs#bash",
"nixpkgs#bubblewrap",
],
[
"bwrap",
"--ro-bind", "/nix/store", "/nix/store",
"--tmpfs", "/usr/lib/systemd",
"--dev", "/dev",
"--bind", str(facts_dir), str(facts_dir),
"--bind", str(secrets_dir), str(secrets_dir),
"--unshare-all",
"--",
"bash", "-c", machine.secrets_data[service]["generator"]
],
)
# fmt: on
run(
cmd,
env=env,
)
# store secrets
for secret in machine.secrets_data[service]["secrets"]:
secret_file = secrets_dir / secret
if not secret_file.is_file():
msg = f"did not generate a file for '{secret}' when running the following command:\n"
msg += machine.secrets_data[service]["generator"]
raise ClanError(msg)
secret_store.set(service, secret, secret_file.read_text())
# store facts
for name, fact_path in machine.secrets_data[service]["facts"].items():
fact_file = facts_dir / name
if not fact_file.is_file():
msg = f"did not generate a file for '{name}' when running the following command:\n"
msg += machine.secrets_data[service]["generator"]
raise ClanError(msg)
fact_path = machine.flake / fact_path
fact_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(fact_file, fact_path)
print("successfully generated secrets")

View File

@@ -2,6 +2,7 @@ import argparse
from pathlib import Path
from ..errors import ClanError
from ..git import commit_files
from ..machines.types import machine_name_type, validate_hostname
from . import secrets
from .folders import list_objects, remove_object, sops_machines_folder
@@ -10,7 +11,13 @@ from .types import public_or_private_age_key_type, secret_name_type
def add_machine(flake_dir: Path, name: str, key: str, force: bool) -> None:
write_key(sops_machines_folder(flake_dir) / name, key, force)
path = sops_machines_folder(flake_dir) / name
write_key(path, key, force)
commit_files(
[path],
flake_dir,
f"Add machine {name} to secrets",
)
def remove_machine(flake_dir: Path, name: str) -> None:
@@ -35,11 +42,16 @@ def list_machines(flake_dir: Path) -> list[str]:
def add_secret(flake_dir: Path, machine: str, secret: str) -> None:
secrets.allow_member(
path = secrets.allow_member(
secrets.machines_folder(flake_dir, secret),
sops_machines_folder(flake_dir),
machine,
)
commit_files(
[path],
flake_dir,
f"Add {machine} to secret",
)
def remove_secret(flake_dir: Path, machine: str, secret: str) -> None:

View File

@@ -0,0 +1,31 @@
from abc import ABC, abstractmethod
from pathlib import Path
from clan_cli.machines.machines import Machine
class SecretStoreBase(ABC):
@abstractmethod
def __init__(self, machine: Machine) -> None:
pass
@abstractmethod
def set(
self, service: str, name: str, value: bytes, groups: list[str]
) -> Path | None:
pass
@abstractmethod
def get(self, service: str, name: str) -> bytes:
pass
@abstractmethod
def exists(self, service: str, name: str) -> bool:
pass
def update_check(self) -> bool:
return False
@abstractmethod
def upload(self, output_dir: Path) -> None:
pass

View File

@@ -5,20 +5,25 @@ from pathlib import Path
from clan_cli.machines.machines import Machine
from clan_cli.nix import nix_shell
from . import SecretStoreBase
class SecretStore:
class SecretStore(SecretStoreBase):
def __init__(self, machine: Machine) -> None:
self.machine = machine
def set(self, service: str, name: str, value: str) -> None:
def set(
self, service: str, name: str, value: bytes, groups: list[str]
) -> Path | None:
subprocess.run(
nix_shell(
["nixpkgs#pass"],
["pass", "insert", "-m", f"machines/{self.machine.name}/{name}"],
),
input=value.encode("utf-8"),
input=value,
check=True,
)
return None # we manage the files outside of the git repo
def get(self, service: str, name: str) -> bytes:
return subprocess.run(
@@ -35,7 +40,6 @@ class SecretStore:
"PASSWORD_STORE_DIR", f"{os.environ['HOME']}/.password-store"
)
secret_path = Path(password_store) / f"machines/{self.machine.name}/{name}.gpg"
print(f"checking {secret_path}")
return secret_path.exists()
def generate_hash(self) -> bytes:
@@ -84,9 +88,11 @@ class SecretStore:
hashes.sort()
return b"\n".join(hashes)
# FIXME: add this when we switch to python3.12
# @override
def update_check(self) -> bool:
local_hash = self.generate_hash()
remote_hash = self.machine.host.run(
remote_hash = self.machine.target_host.run(
# TODO get the path to the secrets from the machine
["cat", f"{self.machine.secrets_upload_directory}/.pass_info"],
check=False,
@@ -102,5 +108,10 @@ class SecretStore:
def upload(self, output_dir: Path) -> None:
for service in self.machine.secrets_data:
for secret in self.machine.secrets_data[service]["secrets"]:
(output_dir / secret).write_bytes(self.get(service, secret))
if isinstance(secret, dict):
secret_name = secret["name"]
else:
# TODO: drop old format soon
secret_name = secret
(output_dir / secret_name).write_bytes(self.get(service, secret_name))
(output_dir / ".pass_info").write_bytes(self.generate_hash())

View File

@@ -6,8 +6,10 @@ from clan_cli.secrets.machines import add_machine, has_machine
from clan_cli.secrets.secrets import decrypt_secret, encrypt_secret, has_secret
from clan_cli.secrets.sops import generate_private_key
from . import SecretStoreBase
class SecretStore:
class SecretStore(SecretStoreBase):
def __init__(self, machine: Machine) -> None:
self.machine = machine
@@ -28,18 +30,25 @@ class SecretStore:
)
add_machine(self.machine.flake_dir, self.machine.name, pub_key, False)
def set(self, _service: str, name: str, value: str) -> None:
def set(
self, service: str, name: str, value: bytes, groups: list[str]
) -> Path | None:
path = (
sops_secrets_folder(self.machine.flake_dir) / f"{self.machine.name}-{name}"
)
encrypt_secret(
self.machine.flake_dir,
sops_secrets_folder(self.machine.flake_dir) / f"{self.machine.name}-{name}",
value,
path,
value.decode(),
add_machines=[self.machine.name],
add_groups=groups,
)
return path
def get(self, _service: str, _name: str) -> bytes:
def get(self, service: str, name: str) -> bytes:
raise NotImplementedError()
def exists(self, _service: str, name: str) -> bool:
def exists(self, service: str, name: str) -> bool:
return has_secret(
self.machine.flake_dir,
f"{self.machine.name}-{name}",

View File

@@ -0,0 +1,35 @@
import os
import shutil
from pathlib import Path
from clan_cli.dirs import vm_state_dir
from clan_cli.machines.machines import Machine
from . import SecretStoreBase
class SecretStore(SecretStoreBase):
def __init__(self, machine: Machine) -> None:
self.machine = machine
self.dir = vm_state_dir(str(machine.flake), machine.name) / "secrets"
self.dir.mkdir(parents=True, exist_ok=True)
def set(
self, service: str, name: str, value: bytes, groups: list[str]
) -> Path | None:
secret_file = self.dir / service / name
secret_file.parent.mkdir(parents=True, exist_ok=True)
secret_file.write_bytes(value)
return None # we manage the files outside of the git repo
def get(self, service: str, name: str) -> bytes:
secret_file = self.dir / service / name
return secret_file.read_bytes()
def exists(self, service: str, name: str) -> bool:
return (self.dir / service / name).exists()
def upload(self, output_dir: Path) -> None:
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
shutil.copytree(self.dir, output_dir)

View File

@@ -3,11 +3,13 @@ import getpass
import os
import shutil
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import IO
from .. import tty
from ..errors import ClanError
from ..git import commit_files
from .folders import (
list_objects,
sops_groups_folder,
@@ -63,42 +65,58 @@ def encrypt_secret(
key = ensure_sops_key(flake_dir)
keys = set([])
files_to_commit = []
for user in add_users:
allow_member(
users_folder(flake_dir, secret.name),
sops_users_folder(flake_dir),
user,
False,
files_to_commit.append(
allow_member(
users_folder(flake_dir, secret.name),
sops_users_folder(flake_dir),
user,
False,
)
)
for machine in add_machines:
allow_member(
machines_folder(flake_dir, secret.name),
sops_machines_folder(flake_dir),
machine,
False,
files_to_commit.append(
allow_member(
machines_folder(flake_dir, secret.name),
sops_machines_folder(flake_dir),
machine,
False,
)
)
for group in add_groups:
allow_member(
groups_folder(flake_dir, secret.name),
sops_groups_folder(flake_dir),
group,
False,
files_to_commit.append(
allow_member(
groups_folder(flake_dir, secret.name),
sops_groups_folder(flake_dir),
group,
False,
)
)
keys = collect_keys_for_path(secret)
if key.pubkey not in keys:
keys.add(key.pubkey)
allow_member(
users_folder(flake_dir, secret.name),
sops_users_folder(flake_dir),
key.username,
False,
files_to_commit.append(
allow_member(
users_folder(flake_dir, secret.name),
sops_users_folder(flake_dir),
key.username,
False,
)
)
encrypt_file(secret / "secret", value, list(sorted(keys)))
secret_path = secret / "secret"
encrypt_file(secret_path, value, list(sorted(keys)))
files_to_commit.append(secret_path)
commit_files(
files_to_commit,
flake_dir,
f"Update secret {secret.name}",
)
def remove_secret(flake_dir: Path, secret: str) -> None:
@@ -106,6 +124,11 @@ def remove_secret(flake_dir: Path, secret: str) -> None:
if not path.exists():
raise ClanError(f"Secret '{secret}' does not exist")
shutil.rmtree(path)
commit_files(
[path],
flake_dir,
f"Remove secret {secret}",
)
def remove_command(args: argparse.Namespace) -> None:
@@ -139,10 +162,10 @@ def list_directory(directory: Path) -> str:
def allow_member(
group_folder: Path, source_folder: Path, name: str, do_update_keys: bool = True
) -> None:
) -> Path:
source = source_folder / name
if not source.exists():
msg = f"{name} does not exist in {source_folder}: "
msg = f"Cannot encrypt {group_folder.parent.name} for '{name}' group. '{name}' group does not exist in {source_folder}: "
msg += list_directory(source_folder)
raise ClanError(msg)
group_folder.mkdir(parents=True, exist_ok=True)
@@ -150,7 +173,7 @@ def allow_member(
if user_target.exists():
if not user_target.is_symlink():
raise ClanError(
f"Cannot add user {name}. {user_target} exists but is not a symlink"
f"Cannot add user '{name}' to {group_folder.parent.name} secret. {user_target} exists but is not a symlink"
)
os.remove(user_target)
@@ -160,6 +183,7 @@ def allow_member(
group_folder.parent,
list(sorted(collect_keys_for_path(group_folder.parent))),
)
return user_target
def disallow_member(group_folder: Path, name: str) -> None:
@@ -192,17 +216,31 @@ def has_secret(flake_dir: Path, secret: str) -> bool:
return (sops_secrets_folder(flake_dir) / secret / "secret").exists()
def list_secrets(flake_dir: Path) -> list[str]:
def list_secrets(flake_dir: Path, pattern: str | None = None) -> list[str]:
path = sops_secrets_folder(flake_dir)
def validate(name: str) -> bool:
return VALID_SECRET_NAME.match(name) is not None and has_secret(flake_dir, name)
return (
VALID_SECRET_NAME.match(name) is not None
and has_secret(flake_dir, name)
and (pattern is None or pattern in name)
)
return list_objects(path, validate)
@dataclass
class ListSecretsOptions:
flake: Path
pattern: str | None
def list_command(args: argparse.Namespace) -> None:
lst = list_secrets(Path(args.flake))
options = ListSecretsOptions(
flake=args.flake,
pattern=args.pattern,
)
lst = list_secrets(options.flake, options.pattern)
if len(lst) > 0:
print("\n".join(lst))
@@ -239,17 +277,28 @@ def set_command(args: argparse.Namespace) -> None:
def rename_command(args: argparse.Namespace) -> None:
old_path = sops_secrets_folder(Path(args.flake)) / args.secret
new_path = sops_secrets_folder(Path(args.flake)) / args.new_name
flake_dir = Path(args.flake)
old_path = sops_secrets_folder(flake_dir) / args.secret
new_path = sops_secrets_folder(flake_dir) / args.new_name
if not old_path.exists():
raise ClanError(f"Secret '{args.secret}' does not exist")
if new_path.exists():
raise ClanError(f"Secret '{args.new_name}' already exists")
os.rename(old_path, new_path)
commit_files(
[old_path, new_path],
flake_dir,
f"Rename secret {args.secret} to {args.new_name}",
)
def register_secrets_parser(subparser: argparse._SubParsersAction) -> None:
parser_list = subparser.add_parser("list", help="list secrets")
parser_list.add_argument(
"pattern",
nargs="?",
help="a pattern to filter the secrets. All secrets containing the pattern will be listed.",
)
parser_list.set_defaults(func=list_command)
parser_get = subparser.add_parser("get", help="get a secret")

View File

@@ -144,7 +144,9 @@ def encrypt_file(
args = ["sops", "--config", str(manifest)]
args.extend([str(secret_path)])
cmd = nix_shell(["nixpkgs#sops"], args)
p = run(cmd, log=Log.BOTH, check=False)
# Don't use our `run` here, because it breaks editor integration.
# We never need this in our UI.
p = subprocess.run(cmd, check=False)
# returns 200 if the file is changed
if p.returncode != 0 and p.returncode != 200:
raise ClanError(

View File

@@ -15,14 +15,12 @@ def upload_secrets(machine: Machine) -> None:
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
update_check = getattr(secret_store, "update_check", None)
if callable(update_check):
if update_check():
log.info("Secrets already up to date")
return
if secret_store.update_check():
log.info("Secrets already up to date")
return
with TemporaryDirectory() as tempdir:
secret_store.upload(Path(tempdir))
host = machine.host
host = machine.target_host
ssh_cmd = host.ssh_cmd()
run(

View File

@@ -2,6 +2,7 @@ import argparse
from pathlib import Path
from ..errors import ClanError
from ..git import commit_files
from . import secrets
from .folders import list_objects, remove_object, sops_users_folder
from .sops import read_key, write_key
@@ -14,7 +15,13 @@ from .types import (
def add_user(flake_dir: Path, name: str, key: str, force: bool) -> None:
write_key(sops_users_folder(flake_dir) / name, key, force)
path = sops_users_folder(flake_dir) / name
write_key(path, key, force)
commit_files(
[path],
flake_dir,
f"Add user {name} to secrets",
)
def remove_user(flake_dir: Path, name: str) -> None:

View File

@@ -16,14 +16,7 @@ from enum import Enum
from pathlib import Path
from shlex import quote
from threading import Thread
from typing import (
IO,
Any,
Generic,
Literal,
TypeVar,
overload,
)
from typing import IO, Any, Generic, TypeVar
# https://no-color.org
DISABLE_COLOR = not sys.stderr.isatty() or os.environ.get("NO_COLOR", "") != ""
@@ -755,7 +748,7 @@ class HostGroup:
def parse_deployment_address(
machine_name: str, host: str, meta: dict[str, Any] = {}
machine_name: str, host: str, forward_agent: bool = True, meta: dict[str, Any] = {}
) -> Host:
parts = host.split("@")
user: str | None = None
@@ -777,83 +770,12 @@ def parse_deployment_address(
hostname = result.hostname
port = result.port
meta = meta.copy()
meta["flake_attr"] = machine_name
return Host(
hostname,
user=user,
port=port,
command_prefix=machine_name,
forward_agent=forward_agent,
meta=meta,
ssh_options=options,
)
@overload
def run(
cmd: list[str] | str,
text: Literal[True] = ...,
stdout: FILE = ...,
stderr: FILE = ...,
extra_env: dict[str, str] = ...,
cwd: None | str | Path = ...,
check: bool = ...,
) -> subprocess.CompletedProcess[str]:
...
@overload
def run(
cmd: list[str] | str,
text: Literal[False],
stdout: FILE = ...,
stderr: FILE = ...,
extra_env: dict[str, str] = ...,
cwd: None | str | Path = ...,
check: bool = ...,
) -> subprocess.CompletedProcess[bytes]:
...
def run(
cmd: list[str] | str,
text: bool = True,
stdout: FILE = None,
stderr: FILE = None,
extra_env: dict[str, str] = {},
cwd: None | str | Path = None,
check: bool = True,
) -> subprocess.CompletedProcess[Any]:
"""
Run command locally
@cmd if this parameter is a string the command is interpreted as a shell command,
otherwise if it is a list, than the first list element is the command
and the remaining list elements are passed as arguments to the
command.
@text when true, file objects for stdout and stderr are opened in text mode.
@stdout if not None stdout of the command will be redirected to this file i.e. stdout=subprocss.PIPE
@stderr if not None stderr of the command will be redirected to this file i.e. stderr=subprocess.PIPE
@extra_env environment variables to override whe running the command
@cwd current working directory to run the process in
@check If check is true, and the process exits with a non-zero exit code, a
CalledProcessError exception will be raised. Attributes of that exception
hold the arguments, the exit code, and stdout and stderr if they were
captured.
"""
if isinstance(cmd, list):
info("$ " + " ".join(cmd))
else:
info(f"$ {cmd}")
env = os.environ.copy()
env.update(extra_env)
return subprocess.run(
cmd,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd,
check=check,
shell=not isinstance(cmd, list),
text=text,
)

View File

@@ -9,18 +9,20 @@ from ..machines.machines import Machine
@dataclass
class VmConfig:
machine_name: str
machine_icon: Path
machine_description: str
flake_url: str | Path
clan_name: str
cores: int
memory_size: int
graphics: bool
wayland: bool = False
waypipe: bool = False
def inspect_vm(machine: Machine) -> VmConfig:
data = json.loads(machine.eval_nix("config.clanCore.vm.inspect"))
return VmConfig(machine_name=machine.name, flake_url=machine.flake, **data)
return VmConfig(flake_url=machine.flake, **data)
@dataclass

View File

@@ -0,0 +1,147 @@
import os
import random
from dataclasses import dataclass
from pathlib import Path
from .inspect import VmConfig
@dataclass
class GraphicOptions:
args: list[str]
vsock_cid: int | None = None
def graphics_options(vm: VmConfig) -> GraphicOptions:
common = [
"-audio",
"driver=pa,model=virtio",
]
if vm.waypipe:
# FIXME: check for collisions
cid = random.randint(1, 2**32)
# fmt: off
return GraphicOptions([
*common,
"-nographic",
"-device", f"vhost-vsock-pci,id=vhost-vsock-pci0,guest-cid={cid}",
"-vga", "none",
#"-display", "egl-headless,gl=core",
# this would make the gpu part of the hypervisor
#"-device", "virtio-vga-gl,blob=true",
# This is for an external gpu process
#"-device", "virtio-serial-pci",
#"-device", "vhost-user-vga,chardev=vgpu",
#"-chardev", "socket,id=vgpu,path=/tmp/vgpu.sock",
], cid)
# fmt: on
else:
if not os.path.exists("/run/opengl-driver"):
display_options = [
"-vga",
"none",
"-display",
"gtk,gl=on",
"-device",
"virtio-gpu-gl",
"-display",
"spice-app,gl=on",
]
else:
display_options = ["-display", "spice-app"]
# fmt: off
return GraphicOptions([
*common,
*display_options,
"-device", "virtio-serial-pci",
"-chardev", "spicevmc,id=vdagent0,name=vdagent",
"-device", "virtserialport,chardev=vdagent0,name=com.redhat.spice.0",
"-device", "qemu-xhci,id=spicepass",
"-chardev", "spicevmc,id=usbredirchardev1,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev1,id=usbredirdev1",
"-chardev", "spicevmc,id=usbredirchardev2,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev2,id=usbredirdev2",
"-chardev", "spicevmc,id=usbredirchardev3,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev3,id=usbredirdev3",
"-device", "pci-ohci,id=smartpass",
"-device", "usb-ccid",
"-chardev", "spicevmc,id=ccid,name=smartcard",
], None)
# fmt: on
@dataclass
class QemuCommand:
args: list[str]
vsock_cid: int | None = None
def qemu_command(
vm: VmConfig,
nixos_config: dict[str, str],
secrets_dir: Path,
rootfs_img: Path,
state_img: Path,
virtiofsd_socket: Path,
qmp_socket_file: Path,
qga_socket_file: Path,
) -> QemuCommand:
kernel_cmdline = [
(Path(nixos_config["toplevel"]) / "kernel-params").read_text(),
f'init={nixos_config["toplevel"]}/init',
f'regInfo={nixos_config["regInfo"]}/registration',
"console=hvc0",
]
if not vm.waypipe:
kernel_cmdline.append("console=tty0")
# fmt: off
command = [
"qemu-kvm",
"-name", vm.machine_name,
"-m", f'{nixos_config["memorySize"]}M',
"-object", f"memory-backend-memfd,id=mem,size={nixos_config['memorySize']}M",
"-machine", "pc,memory-backend=mem,accel=kvm",
"-smp", str(nixos_config["cores"]),
"-cpu", "max",
"-enable-kvm",
# speed-up boot by not waiting for the boot menu
"-boot", "menu=off,strict=on",
"-device", "virtio-rng-pci",
"-netdev", "user,id=user.0",
"-device", "virtio-net-pci,netdev=user.0,romfile=",
"-chardev", f"socket,id=char1,path={virtiofsd_socket}",
"-device", "vhost-user-fs-pci,chardev=char1,tag=nix-store",
"-virtfs", f"local,path={secrets_dir},security_model=none,mount_tag=secrets",
"-drive", f"cache=writeback,file={rootfs_img},format=qcow2,id=drive1,if=none,index=1,werror=report",
"-device", "virtio-blk-pci,bootindex=1,drive=drive1,serial=root",
"-drive", f"cache=writeback,file={state_img},format=qcow2,id=state,if=none,index=2,werror=report",
"-device", "virtio-blk-pci,drive=state",
"-device", "virtio-keyboard",
"-usb", "-device", "usb-tablet,bus=usb-bus.0",
"-kernel", f'{nixos_config["toplevel"]}/kernel',
"-initrd", nixos_config["initrd"],
"-append", " ".join(kernel_cmdline),
# qmp & qga setup
"-qmp", f"unix:{qmp_socket_file},server,wait=off",
"-chardev", f"socket,path={qga_socket_file},server=on,wait=off,id=qga0",
"-device", "virtio-serial",
"-device", "virtserialport,chardev=qga0,name=org.qemu.guest_agent.0",
"-serial", "null",
"-chardev", "stdio,mux=on,id=char0,signal=off",
"-mon", "chardev=char0,mode=readline",
"-device", "virtconsole,chardev=char0,nr=0",
] # fmt: on
vsock_cid = None
if vm.graphics:
opts = graphics_options(vm)
vsock_cid = opts.vsock_cid
command.extend(opts.args)
else:
command.append("-nographic")
return QemuCommand(command, vsock_cid=vsock_cid)

View File

@@ -1,164 +1,59 @@
import argparse
import contextlib
import importlib
import json
import logging
import os
import random
import socket
import subprocess
import tempfile
import time
from collections.abc import Iterator
from dataclasses import dataclass, field
from pathlib import Path
from typing import IO
from tempfile import TemporaryDirectory
from ..cmd import Log, run
from ..dirs import machine_gcroot, module_root, vm_state_dir
from ..dirs import module_root, user_cache_dir, vm_state_dir
from ..errors import ClanError
from ..machines.machines import Machine
from ..nix import nix_build, nix_config, nix_shell
from ..nix import nix_shell
from ..secrets.generate import generate_secrets
from .inspect import VmConfig, inspect_vm
from .qemu import qemu_command
from .virtiofsd import start_virtiofsd
from .waypipe import start_waypipe
log = logging.getLogger(__name__)
@dataclass
class GraphicOptions:
args: list[str]
vsock_cid: int | None = None
def graphics_options(vm: VmConfig) -> GraphicOptions:
common = [
"-audio",
"driver=pa,model=virtio",
]
if vm.wayland:
# FIXME: check for collisions
cid = random.randint(1, 2**32)
# fmt: off
return GraphicOptions([
*common,
"-nographic",
"-vga", "none",
"-device", f"vhost-vsock-pci,id=vhost-vsock-pci0,guest-cid={cid}",
# TODO: vgpu
#"-display", "egl-headless,gl=core",
#"-device", "virtio-vga,blob=true",
#"-device", "virtio-serial-pci",
#"-device", "vhost-user-vga,chardev=vgpu",
#"-chardev", "socket,id=vgpu,path=/tmp/vgpu.sock",
], cid)
# fmt: on
else:
# fmt: off
return GraphicOptions([
*common,
"-vga", "none",
"-display", "gtk,gl=on",
"-device", "virtio-gpu-gl",
"-display", "spice-app,gl=on",
"-device", "virtio-serial-pci",
"-chardev", "spicevmc,id=vdagent0,name=vdagent",
"-device", "virtserialport,chardev=vdagent0,name=com.redhat.spice.0",
"-device", "qemu-xhci,id=spicepass",
"-chardev", "spicevmc,id=usbredirchardev1,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev1,id=usbredirdev1",
"-chardev", "spicevmc,id=usbredirchardev2,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev2,id=usbredirdev2",
"-chardev", "spicevmc,id=usbredirchardev3,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev3,id=usbredirdev3",
"-device", "pci-ohci,id=smartpass",
"-device", "usb-ccid",
"-chardev", "spicevmc,id=ccid,name=smartcard",
], None)
# fmt: on
@dataclass
class QemuCommand:
args: list[str]
vsock_cid: int | None = None
def qemu_command(
vm: VmConfig,
nixos_config: dict[str, str],
xchg_dir: Path,
secrets_dir: Path,
state_dir: Path,
disk_img: Path,
) -> QemuCommand:
kernel_cmdline = [
(Path(nixos_config["toplevel"]) / "kernel-params").read_text(),
f'init={nixos_config["toplevel"]}/init',
f'regInfo={nixos_config["regInfo"]}/registration',
"console=ttyS0,115200n8",
]
if not vm.wayland:
kernel_cmdline.append("console=tty0")
# fmt: off
command = [
"qemu-kvm",
"-name", vm.machine_name,
"-m", f'{nixos_config["memorySize"]}M',
"-object", f"memory-backend-memfd,id=mem,size={nixos_config['memorySize']}M",
"-machine", "pc,memory-backend=mem,accel=kvm",
"-smp", str(nixos_config["cores"]),
"-cpu", "max",
"-enable-kvm",
"-device", "virtio-rng-pci",
"-net", "nic,netdev=user.0,model=virtio",
"-netdev", "user,id=user.0",
"-virtfs", "local,path=/nix/store,security_model=none,mount_tag=nix-store",
"-virtfs", f"local,path={xchg_dir},security_model=none,mount_tag=shared",
"-virtfs", f"local,path={xchg_dir},security_model=none,mount_tag=xchg",
"-virtfs", f"local,path={secrets_dir},security_model=none,mount_tag=secrets",
"-virtfs", f"local,path={state_dir},security_model=none,mount_tag=state",
"-drive", f"cache=writeback,file={disk_img},format=raw,id=drive1,if=none,index=1,werror=report",
"-device", "virtio-blk-pci,bootindex=1,drive=drive1,serial=root",
"-device", "virtio-keyboard",
"-usb", "-device", "usb-tablet,bus=usb-bus.0",
"-kernel", f'{nixos_config["toplevel"]}/kernel',
"-initrd", nixos_config["initrd"],
"-append", " ".join(kernel_cmdline),
] # fmt: on
vsock_cid = None
if vm.graphics:
opts = graphics_options(vm)
vsock_cid = opts.vsock_cid
command.extend(opts.args)
else:
command.append("-nographic")
return QemuCommand(command, vsock_cid=vsock_cid)
def facts_to_nixos_config(facts: dict[str, dict[str, bytes]]) -> dict:
nixos_config: dict = {}
nixos_config["clanCore"] = {}
nixos_config["clanCore"]["secrets"] = {}
for service, service_facts in facts.items():
nixos_config["clanCore"]["secrets"][service] = {}
nixos_config["clanCore"]["secrets"][service]["facts"] = {}
for fact, value in service_facts.items():
nixos_config["clanCore"]["secrets"][service]["facts"][fact] = {
"value": value.decode()
}
return nixos_config
# TODO move this to the Machines class
def get_vm_create_info(
machine: Machine, vm: VmConfig, nix_options: list[str]
def build_vm(
machine: Machine, tmpdir: Path, nix_options: list[str] = []
) -> dict[str, str]:
config = nix_config()
system = config["system"]
# TODO pass prompt here for the GTK gui
secrets_dir = get_secrets(machine, tmpdir)
clan_dir = machine.flake
cmd = nix_build(
[
f'{clan_dir}#clanInternals.machines."{system}"."{machine.name}".config.system.clan.vm.create',
*nix_options,
],
machine_gcroot(clan_name=vm.clan_name, flake_url=str(vm.flake_url))
/ f"vm-{machine.name}",
)
proc = run(
cmd, log=Log.BOTH, error_msg=f"Could not build vm config for {machine.name}"
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
facts = fact_store.get_all()
nixos_config_file = machine.build_nix(
"config.system.clan.vm.create",
extra_config=facts_to_nixos_config(facts),
nix_options=nix_options,
)
try:
return json.loads(Path(proc.stdout.strip()).read_text())
vm_data = json.loads(Path(nixos_config_file).read_text())
vm_data["secrets_dir"] = str(secrets_dir)
return vm_data
except json.JSONDecodeError as e:
raise ClanError(f"Failed to parse vm config: {e}")
@@ -168,32 +63,32 @@ def get_secrets(
tmpdir: Path,
) -> Path:
secrets_dir = tmpdir / "secrets"
secrets_dir.mkdir(exist_ok=True)
secrets_dir.mkdir(parents=True, exist_ok=True)
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
# Only generate secrets for local clans
if isinstance(machine.flake, Path) and machine.flake.is_dir():
generate_secrets(machine)
else:
log.warning("won't generate secrets for non local clan")
generate_secrets(machine)
secret_store.upload(secrets_dir)
return secrets_dir
def prepare_disk(tmpdir: Path, log_fd: IO[str] | None) -> Path:
disk_img = tmpdir / "disk.img"
def prepare_disk(
directory: Path,
size: str = "1024M",
file_name: str = "disk.img",
) -> Path:
disk_img = directory / file_name
cmd = nix_shell(
["nixpkgs#qemu"],
[
"qemu-img",
"create",
"-f",
"raw",
"qcow2",
str(disk_img),
"1024M",
size,
],
)
run(
@@ -202,107 +97,77 @@ def prepare_disk(tmpdir: Path, log_fd: IO[str] | None) -> Path:
error_msg=f"Could not create disk image at {disk_img}",
)
cmd = nix_shell(
["nixpkgs#e2fsprogs"],
[
"mkfs.ext4",
"-L",
"nixos",
str(disk_img),
],
)
run(
cmd,
log=Log.BOTH,
error_msg=f"Could not create ext4 filesystem at {disk_img}",
)
return disk_img
VMADDR_CID_HYPERVISOR = 2
def test_vsock_port(port: int) -> bool:
try:
s = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
s.connect((VMADDR_CID_HYPERVISOR, port))
s.close()
return True
except OSError:
return False
@contextlib.contextmanager
def start_waypipe(cid: int | None, title_prefix: str) -> Iterator[None]:
if cid is None:
yield
return
waypipe = nix_shell(
["git+https://git.clan.lol/clan/clan-core#waypipe"],
[
"waypipe",
"--vsock",
"--socket",
f"s{cid}:3049",
"--title-prefix",
title_prefix,
"client",
],
)
with subprocess.Popen(waypipe) as proc:
try:
while not test_vsock_port(3049):
time.sleep(0.1)
yield
finally:
proc.kill()
def run_vm(
vm: VmConfig,
nix_options: list[str] = [],
log_fd: IO[str] | None = None,
) -> None:
"""
log_fd can be used to stream the output of all commands to a UI
"""
def run_vm(vm: VmConfig, nix_options: list[str] = []) -> None:
machine = Machine(vm.machine_name, vm.flake_url)
log.debug(f"Creating VM for {machine}")
# TODO: We should get this from the vm argument
nixos_config = get_vm_create_info(machine, vm, nix_options)
# store the temporary rootfs inside XDG_CACHE_HOME on the host
# otherwise, when using /tmp, we risk running out of memory
cache = user_cache_dir() / "clan"
cache.mkdir(exist_ok=True)
with TemporaryDirectory(dir=cache) as cachedir, TemporaryDirectory() as sockets:
tmpdir = Path(cachedir)
with tempfile.TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
xchg_dir = tmpdir / "xchg"
xchg_dir.mkdir(exist_ok=True)
# TODO: We should get this from the vm argument
nixos_config = build_vm(machine, tmpdir, nix_options)
secrets_dir = get_secrets(machine, tmpdir)
disk_img = prepare_disk(tmpdir, log_fd)
state_dir = vm_state_dir(vm.clan_name, str(machine.flake), machine.name)
state_dir = vm_state_dir(str(vm.flake_url), machine.name)
state_dir.mkdir(parents=True, exist_ok=True)
# specify socket files for qmp and qga
qmp_socket_file = Path(sockets) / "qmp.sock"
qga_socket_file = Path(sockets) / "qga.sock"
# Create symlinks to the qmp/qga sockets to be able to find them later.
# This indirection is needed because we cannot put the sockets directly
# in the state_dir.
# The reason is, qemu has a length limit of 108 bytes for the qmp socket
# path which is violated easily.
qmp_link = state_dir / "qmp.sock"
if os.path.lexists(qmp_link):
qmp_link.unlink()
qmp_link.symlink_to(qmp_socket_file)
qga_link = state_dir / "qga.sock"
if os.path.lexists(qga_link):
qga_link.unlink()
qga_link.symlink_to(qga_socket_file)
rootfs_img = prepare_disk(tmpdir)
state_img = state_dir / "state.qcow2"
if not state_img.exists():
state_img = prepare_disk(
directory=state_dir,
file_name="state.qcow2",
size="50G",
)
virtiofsd_socket = Path(sockets) / "virtiofsd.sock"
qemu_cmd = qemu_command(
vm,
nixos_config,
xchg_dir=xchg_dir,
secrets_dir=secrets_dir,
state_dir=state_dir,
disk_img=disk_img,
secrets_dir=Path(nixos_config["secrets_dir"]),
rootfs_img=rootfs_img,
state_img=state_img,
virtiofsd_socket=virtiofsd_socket,
qmp_socket_file=qmp_socket_file,
qga_socket_file=qga_socket_file,
)
packages = ["nixpkgs#qemu"]
env = os.environ.copy()
if vm.graphics and not vm.wayland:
if vm.graphics and not vm.waypipe:
packages.append("nixpkgs#virt-viewer")
remote_viewer_mimetypes = module_root() / "vms" / "mimetypes"
env[
"XDG_DATA_DIRS"
] = f"{remote_viewer_mimetypes}:{env.get('XDG_DATA_DIRS', '')}"
with start_waypipe(qemu_cmd.vsock_cid, f"[{vm.machine_name}] "):
with start_waypipe(
qemu_cmd.vsock_cid, f"[{vm.machine_name}] "
), start_virtiofsd(virtiofsd_socket):
run(
nix_shell(packages, qemu_cmd.args),
env=env,
@@ -311,33 +176,20 @@ def run_vm(
)
@dataclass
class RunOptions:
machine: str
flake: Path
nix_options: list[str] = field(default_factory=list)
wayland: bool = False
def run_command(
machine: str, flake: Path, option: list[str] = [], **args: argparse.Namespace
) -> None:
machine_obj: Machine = Machine(machine, flake)
vm: VmConfig = inspect_vm(machine=machine_obj)
run_vm(vm, option)
def run_command(args: argparse.Namespace) -> None:
run_options = RunOptions(
machine=args.machine,
flake=args.flake,
nix_options=args.option,
wayland=args.wayland,
)
machine = Machine(run_options.machine, run_options.flake)
vm = inspect_vm(machine=machine)
# TODO: allow to set this in the config
vm.wayland = run_options.wayland
run_vm(vm, run_options.nix_options)
def _run_command(args: argparse.Namespace) -> None:
run_command(**args.vars())
def register_run_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("machine", type=str, help="machine in the flake to run")
parser.add_argument("--flake-url", type=str, help="flake url")
parser.add_argument("--wayland", action="store_true", help="use wayland")
parser.set_defaults(func=run_command)
parser.set_defaults(func=_run_command)

View File

@@ -0,0 +1,41 @@
import contextlib
import shutil
import subprocess
import time
from collections.abc import Iterator
from pathlib import Path
from ..errors import ClanError
from ..nix import nix_shell
@contextlib.contextmanager
def start_virtiofsd(socket_path: Path) -> Iterator[None]:
sandbox = "namespace"
if shutil.which("newuidmap") is None:
sandbox = "none"
virtiofsd = nix_shell(
["nixpkgs#virtiofsd"],
[
"virtiofsd",
"--socket-path",
str(socket_path),
"--cache",
"always",
"--sandbox",
sandbox,
"--shared-dir",
"/nix/store",
],
)
with subprocess.Popen(virtiofsd) as proc:
try:
while not socket_path.exists():
rc = proc.poll()
if rc is not None:
msg = f"virtiofsd exited unexpectedly with code {rc}"
raise ClanError(msg)
time.sleep(0.1)
yield
finally:
proc.kill()

View File

@@ -0,0 +1,50 @@
import contextlib
import socket
import subprocess
import time
from collections.abc import Iterator
from ..errors import ClanError
from ..nix import nix_shell
VMADDR_CID_HYPERVISOR = 2
def test_vsock_port(port: int) -> bool:
try:
s = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
s.connect((VMADDR_CID_HYPERVISOR, port))
s.close()
return True
except OSError:
return False
@contextlib.contextmanager
def start_waypipe(cid: int | None, title_prefix: str) -> Iterator[None]:
if cid is None:
yield
return
waypipe = nix_shell(
["git+https://git.clan.lol/clan/clan-core#waypipe"],
[
"waypipe",
"--vsock",
"--socket",
f"s{cid}:3049",
"--title-prefix",
title_prefix,
"client",
],
)
with subprocess.Popen(waypipe) as proc:
try:
while not test_vsock_port(3049):
rc = proc.poll()
if rc is not None:
msg = f"waypipe exited unexpectedly with code {rc}"
raise ClanError(msg)
time.sleep(0.1)
yield
finally:
proc.kill()

View File

@@ -9,7 +9,7 @@ dynamic = ["version"]
scripts = { clan = "clan_cli:main" }
[tool.setuptools.packages.find]
exclude = ["clan_cli.nixpkgs*"]
exclude = ["clan_cli.nixpkgs*", "result"]
[tool.setuptools.package-data]
clan_cli = ["config/jsonschema/*", "webui/assets/**/*", "vms/mimetypes/**/*"]
@@ -55,5 +55,5 @@ ignore_missing_imports = true
[tool.ruff]
target-version = "py311"
line-length = 88
select = [ "E", "F", "I", "U", "N", "RUF", "ANN", "A" ]
ignore = ["E501", "E402", "ANN101", "ANN401", "A003"]
lint.select = [ "E", "F", "I", "U", "N", "RUF", "ANN", "A" ]
lint.ignore = ["E501", "E402", "E731", "ANN101", "ANN401", "A003"]

77
pkgs/clan-cli/qemu/qga.py Normal file
View File

@@ -0,0 +1,77 @@
import base64
import json
import socket
from pathlib import Path
from time import sleep
# qga is almost like qmp, but not quite, because:
# - server doesn't send initial message
# - no need to initialize by asking for capabilities
# - results need to be base64 decoded
class QgaSession:
def __init__(self, socket_file: Path | str) -> None:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# try to reconnect a couple of times if connection refused
for _ in range(100):
try:
self.sock.connect(str(socket_file))
return
except ConnectionRefusedError:
sleep(0.1)
self.sock.connect(str(socket_file))
def get_response(self) -> dict:
result = self.sock.recv(9999999)
return json.loads(result)
# only execute, don't wait for response
def exec_cmd(self, cmd: str) -> None:
self.sock.send(
json.dumps(
{
"execute": "guest-exec",
"arguments": {
"path": "/bin/sh",
"arg": ["-l", "-c", cmd],
"capture-output": True,
},
}
).encode("utf-8")
)
# run, wait for result, return exitcode and output
def run(self, cmd: str) -> tuple[int, str, str]:
self.exec_cmd(cmd)
result_pid = self.get_response()
pid = result_pid["return"]["pid"]
# loop until exited=true
status_payload = json.dumps(
{
"execute": "guest-exec-status",
"arguments": {
"pid": pid,
},
}
).encode("utf-8")
while True:
self.sock.send(status_payload)
result = self.get_response()
if "error" in result and result["error"]["desc"].startswith("PID"):
raise Exception("PID could not be found")
if result["return"]["exited"]:
break
sleep(0.1)
exitcode = result["return"]["exitcode"]
stdout = (
""
if "out-data" not in result["return"]
else base64.b64decode(result["return"]["out-data"]).decode("utf-8")
)
stderr = (
""
if "err-data" not in result["return"]
else base64.b64decode(result["return"]["err-data"]).decode("utf-8")
)
return exitcode, stdout, stderr

317
pkgs/clan-cli/qemu/qmp.py Normal file
View File

@@ -0,0 +1,317 @@
# mypy: ignore-errors
""" QEMU Monitor Protocol Python class """
# Copyright (C) 2009, 2010 Red Hat Inc.
#
# Authors:
# Luiz Capitulino <lcapitulino@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import errno
import json
import logging
import socket
from typing import Any
class QMPError(Exception):
"""
QMP base exception
"""
class QMPConnectError(QMPError):
"""
QMP connection exception
"""
class QMPCapabilitiesError(QMPError):
"""
QMP negotiate capabilities exception
"""
class QMPTimeoutError(QMPError):
"""
QMP timeout exception
"""
class QEMUMonitorProtocol:
"""
Provide an API to connect to QEMU via QEMU Monitor Protocol (QMP) and then
allow to handle commands and events.
"""
#: Logger object for debugging messages
logger: logging.Logger = logging.getLogger("QMP")
def __init__(
self,
address: str | tuple[str, int],
server: bool = False,
nickname: str | None = None,
) -> None:
"""
Create a QEMUMonitorProtocol class.
@param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection
@param server: server mode listens on the socket (bool)
@raise OSError on socket connection errors
@note No connection is established, this is done by the connect() or
accept() methods
"""
self.__events: list[dict[str, Any]] = []
self.__address: str | tuple[str, int] = address
self.__sock: socket.socket = self.__get_sock()
self.__sockfile: socket.SocketIO | None = None
self._nickname: str | None = nickname
if self._nickname:
self.logger = logging.getLogger("QMP").getChild(self._nickname)
if server:
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__sock.bind(self.__address)
self.__sock.listen(1)
def __get_sock(self) -> socket.socket:
if isinstance(self.__address, tuple):
family = socket.AF_INET
else:
family = socket.AF_UNIX
return socket.socket(family, socket.SOCK_STREAM)
def __negotiate_capabilities(self) -> dict[str, Any]:
greeting = self.__json_read()
if greeting is None or "QMP" not in greeting:
raise QMPConnectError
# Greeting seems ok, negotiate capabilities
resp = self.cmd("qmp_capabilities")
if resp and "return" in resp:
return greeting
raise QMPCapabilitiesError
def __json_read(self, only_event: bool = False) -> dict[str, Any] | None:
while True:
data = self.__sockfile.readline()
if not data:
return None
resp = json.loads(data)
if "event" in resp:
self.logger.debug("<<< %s", resp)
self.__events.append(resp)
if not only_event:
continue
return resp
def __get_events(self, wait: bool | float = False) -> None:
"""
Check for new events in the stream and cache them in __events.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
"""
# Check for new events regardless and pull them into the cache:
self.__sock.setblocking(0)
try:
self.__json_read()
except OSError as err:
if err.errno == errno.EAGAIN:
# No data available
pass
self.__sock.setblocking(1)
# Wait for new events, if needed.
# if wait is 0.0, this means "no wait" and is also implicitly false.
if not self.__events and wait:
if isinstance(wait, float):
self.__sock.settimeout(wait)
try:
ret = self.__json_read(only_event=True)
except TimeoutError:
raise QMPTimeoutError("Timeout waiting for event")
except Exception:
raise QMPConnectError("Error while reading from socket")
if ret is None:
raise QMPConnectError("Error while reading from socket")
self.__sock.settimeout(None)
def __enter__(self) -> "QEMUMonitorProtocol":
# Implement context manager enter function.
return self
def __exit__(self, exc_type: Any, exc_value: Any, exc_traceback: Any) -> bool:
# Implement context manager exit function.
self.close()
return False
def connect(self, negotiate: bool = True) -> dict[str, Any] | None:
"""
Connect to the QMP Monitor and perform capabilities negotiation.
@return QMP greeting dict, or None if negotiate is false
@raise OSError on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
"""
self.__sock.connect(self.__address)
self.__sockfile = self.__sock.makefile()
if negotiate:
return self.__negotiate_capabilities()
return None
def accept(self, timeout: float | None = 15.0) -> dict[str, Any]:
"""
Await connection from QMP Monitor and perform capabilities negotiation.
@param timeout: timeout in seconds (nonnegative float number, or
None). The value passed will set the behavior of the
underneath QMP socket as described in [1]. Default value
is set to 15.0.
@return QMP greeting dict
@raise OSError on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
[1]
https://docs.python.org/3/library/socket.html#socket.socket.settimeout
"""
self.__sock.settimeout(timeout)
self.__sock, _ = self.__sock.accept()
self.__sockfile = self.__sock.makefile()
return self.__negotiate_capabilities()
def cmd_obj(self, qmp_cmd: dict[str, Any]) -> dict[str, Any] | None:
"""
Send a QMP command to the QMP Monitor.
@param qmp_cmd: QMP command to be sent as a Python dict
@return QMP response as a Python dict or None if the connection has
been closed
"""
self.logger.debug(">>> %s", qmp_cmd)
try:
self.__sock.sendall(json.dumps(qmp_cmd).encode("utf-8"))
except OSError as err:
if err.errno == errno.EPIPE:
return None
raise err
resp = self.__json_read()
self.logger.debug("<<< %s", resp)
return resp
def cmd(
self,
name: str,
args: dict[str, Any] | None = None,
cmd_id: dict[str, Any] | list[Any] | str | int | None = None,
) -> dict[str, Any] | None:
"""
Build a QMP command and send it to the QMP Monitor.
@param name: command name (string)
@param args: command arguments (dict)
@param cmd_id: command id (dict, list, string or int)
"""
qmp_cmd: dict[str, Any] = {"execute": name}
if args:
qmp_cmd["arguments"] = args
if cmd_id:
qmp_cmd["id"] = cmd_id
return self.cmd_obj(qmp_cmd)
def command(self, cmd: str, **kwds: Any) -> Any:
"""
Build and send a QMP command to the monitor, report errors if any
"""
ret = self.cmd(cmd, kwds)
if "error" in ret:
raise Exception(ret["error"]["desc"])
return ret["return"]
def pull_event(self, wait: bool | float = False) -> dict[str, Any] | None:
"""
Pulls a single event.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
@return The first available QMP event, or None.
"""
self.__get_events(wait)
if self.__events:
return self.__events.pop(0)
return None
def get_events(self, wait: bool | float = False) -> list[dict[str, Any]]:
"""
Get a list of available QMP events.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
@return The list of available QMP events.
"""
self.__get_events(wait)
return self.__events
def clear_events(self) -> None:
"""
Clear current list of pending events.
"""
self.__events = []
def close(self) -> None:
"""
Close the socket and socket file.
"""
if self.__sock:
self.__sock.close()
if self.__sockfile:
self.__sockfile.close()
def settimeout(self, timeout: float | None) -> None:
"""
Set the socket timeout.
@param timeout (float): timeout in seconds, or None.
@note This is a wrap around socket.settimeout
"""
self.__sock.settimeout(timeout)
def get_sock_fd(self) -> int:
"""
Get the socket file descriptor.
@return The file descriptor number.
"""
return self.__sock.fileno()
def is_scm_available(self) -> bool:
"""
Check if the socket allows for SCM_RIGHTS.
@return True if SCM_RIGHTS is available, otherwise False.
"""
return self.__sock.family == socket.AF_UNIX

View File

@@ -41,7 +41,10 @@ class FlakeForTest(NamedTuple):
def generate_flake(
temporary_home: Path,
flake_template: Path,
substitutions: dict[str, str] = {},
substitutions: dict[str, str] = {
"__CHANGE_ME__": "_test_vm_persistence",
"git+https://git.clan.lol/clan/clan-core": "path://" + str(CLAN_CORE),
},
# define the machines directly including their config
machine_configs: dict[str, dict] = {},
) -> FlakeForTest:

View File

@@ -1,5 +1,5 @@
{ lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";

View File

@@ -1,5 +1,5 @@
{ lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";

View File

@@ -1,5 +1,5 @@
{ lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clan.virtualisation.graphics = false;

View File

@@ -20,16 +20,16 @@ from clan_cli.dirs import clan_key_safe, vm_state_dir
def test_clan_key_safe() -> None:
assert clan_key_safe("clan1", "/foo/bar") == "clan1-%2Ffoo%2Fbar"
assert clan_key_safe("/foo/bar") == "%2Ffoo%2Fbar"
def test_vm_state_dir_identity() -> None:
dir1 = vm_state_dir("clan1", "https://some.clan", "vm1")
dir2 = vm_state_dir("clan1", "https://some.clan", "vm1")
dir1 = vm_state_dir("https://some.clan", "vm1")
dir2 = vm_state_dir("https://some.clan", "vm1")
assert str(dir1) == str(dir2)
def test_vm_state_dir_no_collision() -> None:
dir1 = vm_state_dir("clan1", "/foo/bar", "vm1")
dir2 = vm_state_dir("clan1", "https://some.clan", "vm1")
dir1 = vm_state_dir("/foo/bar", "vm1")
dir2 = vm_state_dir("https://some.clan", "vm1")
assert str(dir1) != str(dir2)

View File

@@ -12,10 +12,11 @@
clanName = "test_flake_with_core";
machines = {
vm1 = { lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clanCore.sops.defaultGroups = [ "admins" ];
clan.virtualisation.graphics = false;
clan.networking.zerotier.controller.enable = true;
@@ -32,7 +33,7 @@
};
};
vm2 = { lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";

View File

@@ -12,7 +12,7 @@
clanName = "test_flake_with_core_and_pass";
machines = {
vm1 = { lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clanCore.secretStore = "password-store";
clanCore.secretsUploadDirectory = lib.mkForce "__CLAN_SOPS_KEY_DIR__/secrets";

View File

@@ -26,7 +26,6 @@ def test_history_add(
"add",
str(uri),
]
cli.run(cmd)
history_file = user_history_file()

View File

@@ -275,6 +275,14 @@ def test_secrets(
cli.run(["--flake", str(test_flake.path), "secrets", "list"])
assert capsys.readouterr().out == "key\n"
capsys.readouterr() # empty the buffer
cli.run(["--flake", str(test_flake.path), "secrets", "list", "nonexisting"])
assert capsys.readouterr().out == ""
capsys.readouterr() # empty the buffer
cli.run(["--flake", str(test_flake.path), "secrets", "list", "key"])
assert capsys.readouterr().out == "key\n"
cli.run(
[
"--flake",

View File

@@ -33,6 +33,17 @@ def test_generate_secret(
age_keys[0].pubkey,
]
)
cli.run(
[
"--flake",
str(test_flake_with_core.path),
"secrets",
"groups",
"add-user",
"admins",
"user1",
]
)
cmd = ["--flake", str(test_flake_with_core.path), "secrets", "generate", "vm1"]
cli.run(cmd)
has_secret(test_flake_with_core.path, "vm1-age.key")

View File

@@ -60,7 +60,7 @@ def test_upload_secret(
flake = test_flake_with_core_and_pass.path.joinpath("flake.nix")
host = host_group.hosts[0]
addr = f"{host.user}@{host.host}:{host.port}?StrictHostKeyChecking=no&UserKnownHostsFile=/dev/null&IdentityFile={host.key}"
new_text = flake.read_text().replace("__CLAN_DEPLOYMENT_ADDRESS__", addr)
new_text = flake.read_text().replace("__CLAN_TARGET_ADDRESS__", addr)
flake.write_text(new_text)
cli.run(["secrets", "upload", "vm1"])
zerotier_identity_secret = (

View File

@@ -52,7 +52,7 @@ def test_secrets_upload(
flake = test_flake_with_core.path.joinpath("flake.nix")
host = host_group.hosts[0]
addr = f"{host.user}@{host.host}:{host.port}?StrictHostKeyChecking=no&UserKnownHostsFile=/dev/null&IdentityFile={host.key}"
new_text = flake.read_text().replace("__CLAN_DEPLOYMENT_ADDRESS__", addr)
new_text = flake.read_text().replace("__CLAN_TARGET_ADDRESS__", addr)
flake.write_text(new_text)
cli.run(["--flake", str(test_flake_with_core.path), "secrets", "upload", "vm1"])

View File

@@ -1,32 +1,11 @@
import subprocess
from clan_cli.ssh import Host, HostGroup, run
def test_run() -> None:
p = run("echo hello")
assert p.stdout is None
def test_run_failure() -> None:
p = run("exit 1", check=False)
assert p.returncode == 1
try:
p = run("exit 1")
except Exception:
pass
else:
assert False, "Command should have raised an error"
from clan_cli.ssh import Host, HostGroup
hosts = HostGroup([Host("some_host")])
def test_run_environment() -> None:
p1 = run("echo $env_var", stdout=subprocess.PIPE, extra_env=dict(env_var="true"))
assert p1.stdout == "true\n"
p2 = hosts.run_local(
"echo $env_var", extra_env=dict(env_var="true"), stdout=subprocess.PIPE
)
@@ -38,17 +17,6 @@ def test_run_environment() -> None:
assert "env_var=true" in p3[0].result.stdout
def test_run_non_shell() -> None:
p = run(["echo", "$hello"], stdout=subprocess.PIPE)
assert p.stdout == "$hello\n"
def test_run_stderr_stdout() -> None:
p = run("echo 1; echo 2 >&2", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert p.stdout == "1\n"
assert p.stderr == "2\n"
def test_run_local() -> None:
hosts.run_local("echo hello")

Some files were not shown because too many files have changed in this diff Show More