Compare commits

..

178 Commits

Author SHA1 Message Date
Qubasa
446039b02b Working demo script 2024-02-16 17:47:34 +07:00
Qubasa
5a69bbe93e demo script 2024-02-16 17:47:05 +07:00
Qubasa
280bee0861 clan-vm-manager: Fixing vm starting. 2024-02-16 16:10:49 +07:00
Qubasa
4f7f34f9b4 clan-vm-manager: Added clan icon to trayicon 2024-02-16 12:25:06 +07:00
clan-bot
7fe38a9a80 Merge pull request 'add waypipe user to video group' (#853) from Mic92-target_host into main 2024-02-15 18:41:04 +00:00
Jörg Thalheim
95820905f9 waypipe: add fixed uid for user 2024-02-15 19:33:01 +01:00
Mic92
be77d365e7 Merge pull request 'add waypipe user to video group' (#852) from Mic92-target_host into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/852
2024-02-15 18:31:39 +00:00
Jörg Thalheim
897acfaf6b add waypipe user to video group 2024-02-15 18:31:39 +00:00
Jörg Thalheim
30cb4c0eed add waypipe user to video group 2024-02-15 19:30:51 +01:00
clan-bot
50c8c2070b Merge pull request 'vms: move qemu_command to its own module' (#851) from Mic92-target_host into main 2024-02-15 16:30:01 +00:00
Jörg Thalheim
0200016dd2 vms: move qemu_command to its own module 2024-02-15 17:25:24 +01:00
clan-bot
658758302a Merge pull request 'vms: smaller cleanups' (#850) from Mic92-target_host into main 2024-02-15 16:23:24 +00:00
Jörg Thalheim
8e780b2a8c vms: drop unused xchdir 2024-02-15 17:19:43 +01:00
Jörg Thalheim
a399cbb8d9 vms: move virtiofsd/waypipe into their own modules 2024-02-15 17:19:31 +01:00
clan-bot
eacda36bb3 Merge pull request 'fix fact store' (#849) from Mic92-target_host into main 2024-02-15 11:27:59 +00:00
Jörg Thalheim
4943c33307 add file prefix for nix 2.19 or newer 2024-02-15 12:24:18 +01:00
clan-bot
2e900d943d Merge pull request 'waypipe: add more environment variables' (#848) from a-kenji-wayland-module-env-add into main 2024-02-15 10:09:12 +00:00
a-kenji
d7d33f6c25 waypipe: add more environment variables 2024-02-15 11:03:27 +01:00
clan-bot
58073375e4 Merge pull request 'add factsStore modules' (#839) from fact_store into main 2024-02-15 09:46:01 +00:00
lassulus
6871b29d15 vms: use vm fact/secret-store 2024-02-15 10:41:25 +01:00
lassulus
98139ac48d add factsStore modules 2024-02-15 10:41:25 +01:00
clan-bot
f9f428b960 Merge pull request 'waypipe: add wayland environment' (#847) from a-kenji-wayland-module-env into main 2024-02-15 09:01:49 +00:00
a-kenji
99bda8e099 waypipe: add wayland environment 2024-02-15 09:58:56 +01:00
clan-bot
06721b0c5a Merge pull request 'optimize filesystem mount flags' (#846) from Mic92-target_host into main 2024-02-14 12:06:47 +00:00
Jörg Thalheim
7cab50c088 optimize filesystem mount flags
perf!
2024-02-14 13:01:47 +01:00
clan-bot
1cc6e74297 Merge pull request 'clan_vm_manager: --debug enables debug mode in clan_cli too' (#840) from Qubasa-add_spinner into main 2024-02-14 08:43:14 +00:00
Qubasa
30850eef22 clan_cli: Added longer timeout for test 2024-02-14 15:40:03 +07:00
clan-bot
de69d3139b Merge pull request 'waypipe: rename systemd unit' (#845) from a-kenji-waypipe/rename-system-service into main 2024-02-13 15:45:29 +00:00
a-kenji
5ef2257ed1 waypipe: rename systemd unit 2024-02-13 16:42:29 +01:00
clan-bot
436e4e3882 Merge pull request 'waypipe: improve default module' (#844) from a-kenji-improve/module/waypipe into main 2024-02-13 15:40:11 +00:00
a-kenji
3ba4954c8d waypipe: improve default module 2024-02-13 16:37:14 +01:00
clan-bot
82e5e57e20 Merge pull request 'Fix demo script' (#843) from a-kenji-demo/improve/script into main 2024-02-13 15:18:23 +00:00
a-kenji
91c36a33da Fix demo script
The `--wayland` cli option is now a module option.
2024-02-13 16:15:36 +01:00
clan-bot
2f567db623 Merge pull request 'waypipe: improve default module' (#842) from a-kenji-waypipe/improve into main 2024-02-13 15:08:03 +00:00
a-kenji
e46315cab8 waypipe: improve default module 2024-02-13 16:02:46 +01:00
clan-bot
2c90664456 Merge pull request 'vms: enable sysusers' (#841) from Mic92-target_host into main 2024-02-13 13:20:02 +00:00
Jörg Thalheim
7a3fcd3deb vms: enable sysusers 2024-02-13 14:14:54 +01:00
clan-bot
2744d5724b Merge pull request 'switch to sops-nix experimental branch' (#832) from Mic92-target_host into main 2024-02-13 13:01:01 +00:00
Jörg Thalheim
952f976ea7 fix virtiofsd in CIs 2024-02-13 13:57:52 +01:00
Jörg Thalheim
b15c21f800 drop check for /var/lib/nixos 2024-02-13 12:44:22 +01:00
Jörg Thalheim
7cfce69504 demo.sh: make relative path configureable 2024-02-13 11:45:42 +01:00
Jörg Thalheim
8f98f0e8b7 also use qcow2 for volatile state 2024-02-13 11:45:42 +01:00
Jörg Thalheim
3bf94ab0fc use virtio-console instead of serial for vm 2024-02-13 11:45:42 +01:00
Jörg Thalheim
11ac50c17b format rootfs in vm itself 2024-02-13 11:45:42 +01:00
Jörg Thalheim
07caab537f drop unused mounts 2024-02-13 11:45:42 +01:00
Jörg Thalheim
a56dc3bf8c simplify vmstate directory 2024-02-13 11:45:42 +01:00
Jörg Thalheim
7f8ba25a5b qemu: disable sea-bios and option rom 2024-02-13 11:45:42 +01:00
Jörg Thalheim
ef202a8150 switch to sops-nix experimental branch 2024-02-13 11:45:23 +01:00
Jörg Thalheim
d6b3e03d70 vms: integrate virtiofsd 2024-02-13 11:44:17 +01:00
Qubasa
03b9183e04 clan_cli: Added lazy qmp 2024-02-13 16:44:09 +07:00
Qubasa
92ec3fb9f9 test_vms_cli: Trying new way of testing 2024-02-13 16:44:09 +07:00
Qubasa
87dbc99cab clan_cli: Made qmp implementation lazy 2024-02-13 16:44:09 +07:00
Qubasa
ef6d7cee1a clan_vm_manager: started spinner. not fully working yet 2024-02-13 16:44:09 +07:00
Qubasa
4d1bde083a UI: Improve README 2024-02-13 16:44:09 +07:00
Qubasa
403b874522 clan_vm_manager: --debug enables debug mode in clan_cli too 2024-02-13 16:44:09 +07:00
clan-bot
0dadae9087 Merge pull request 'update flake lock' (#838) from Qubasa-main into main 2024-02-12 12:03:30 +00:00
lassulus
b39c860379 fix borgbackup check
we need to switch to the classical test environment again, because borg
was complaining otherwise
2024-02-12 12:59:32 +01:00
Qubasa
7d301b7e3c update flake lock 2024-02-12 14:52:01 +07:00
clan-bot
33787a6aab Merge pull request 'UI: Added tray icon' (#831) from Qubasa-main into main 2024-02-12 07:19:59 +00:00
Qubasa
0ce8bcd018 clan_vm_manager: Added VM shutdown timeout 2024-02-12 14:16:44 +07:00
Qubasa
7b48535a98 UI: Added tray icon 2024-02-12 13:43:54 +07:00
clan-bot
f166da1621 Merge pull request 'allow passing of extra_config into machines' (#834) from lassulus-extra_config into main 2024-02-11 07:43:52 +00:00
lassulus
eebd9d0b4a allow passing of extra_config into machines 2024-02-11 08:40:41 +01:00
lassulus
10cbe11e53 nixosModules clanCore: fix iso format 2024-02-10 13:27:51 +01:00
clan-bot
2530ba52ac Merge pull request 'waypipe: add more default settings' (#836) from a-kenji-waypipe/add into main 2024-02-10 12:24:55 +00:00
a-kenji
798bbe188c waypipe: add more default settings 2024-02-10 13:22:16 +01:00
clan-bot
237d7aee4a Merge pull request 'clanModules: add waypipe service' (#835) from a-kenji-init/waypipe into main 2024-02-10 12:03:49 +00:00
a-kenji
105209cfb9 clanModules: add waypipe service 2024-02-10 12:32:06 +01:00
clan-bot
cc8d6b281b Merge pull request 'vms: init graceful shutdown for GUI' (#833) from DavHau-dave into main 2024-02-09 12:58:52 +00:00
DavHau
02dd132e08 vms: init graceful shutdown for GUI
- add python modules for qemu protocols: QMP (hardware interactions) and QGA (guest service interaction)
- refactor state directory: remove name from path (already contains url)
- add impure vm test for basic qmp interaction
- simplify existing vm persistance test (factor out shared code)
- integrate graceful shutdown into GUI

the GUI integration still needs to be improved later:
- add fallback in case system doesn't react to powerdown button
- shutdown GUI switch fails if VM hasn't been started yet, and then remains in a wrong position
2024-02-09 19:55:18 +07:00
clan-bot
6af8423f1e Merge pull request 'UI: Fixed incorrect display of cLAN icon in window switcher' (#830) from Qubasa-main into main 2024-02-08 08:08:28 +00:00
Qubasa
8a9d3d3230 UI: Fixed incorrect display of cLAN icon in window switcher 2024-02-08 15:00:36 +07:00
clan-bot
13457eca0a Merge pull request 'Clan VM Manager: add dropdown to add more machines' (#827) from hsjobeki-main into main 2024-02-08 07:40:06 +00:00
Johannes Kirschbauer
0221e7176b Clan VM Manager: add dropdown to add more machines 2024-02-08 14:36:47 +07:00
clan-bot
7326862c1a Merge pull request 'UI: Improved Join card display' (#829) from Qubasa-main into main 2024-02-08 07:31:30 +00:00
Qubasa
0ee4dcd782 UI: Improved Join card display 2024-02-08 14:28:34 +07:00
clan-bot
e0ed00ef5c Merge pull request 'UI: Fixed style.css not working when installed' (#828) from Qubasa-main into main 2024-02-08 07:13:06 +00:00
Qubasa
a2ce341995 UI: Fixed style.css not working when installed 2024-02-08 14:10:17 +07:00
clan-bot
6ddb8dfe9d Merge pull request 'UI: Added joining multiple clans one after another over clan url' (#826) from Qubasa-main into main 2024-02-07 10:19:01 +00:00
Qubasa
10578e7611 UI: Added joining multiple clans one after another over clan url 2024-02-07 17:16:20 +07:00
clan-bot
96b98dcbed Merge pull request 'Clan VM Manager: detect if clan exists' (#825) from hsjobeki-main into main 2024-02-07 09:20:33 +00:00
Johannes Kirschbauer
030cbd24ce Clan VM Manager: detect if clan exists 2024-02-07 16:08:48 +07:00
clan-bot
045c5e608b Merge pull request 'Clan VM Manager: init per vm settings handler' (#824) from hsjobeki-main into main 2024-02-07 08:43:58 +00:00
Johannes Kirschbauer
d20902cef4 Clan VM Manager: init per vm settings handler 2024-02-07 15:41:18 +07:00
clan-bot
a1a433b654 Merge pull request 'clan_manager: UI is now a singelton.' (#822) from Qubasa-main into main 2024-02-07 05:06:11 +00:00
Qubasa
869c01bf95 clan_manager: UI is now a singelton. 2024-02-07 12:03:12 +07:00
clan-bot
68ac0cd3ec Merge pull request 'clan-cli: add simple flash command' (#821) from lassulus-flaash into main 2024-02-07 04:31:52 +00:00
lassulus
67d264263c nixosModules zerotier: remove unneeded default 2024-02-07 05:26:01 +01:00
lassulus
b780754621 clan-cli: add simple flash command 2024-02-07 05:26:01 +01:00
clan-bot
cd45bb3174 Merge pull request 'add requireExplicitUpdate option for mobile devices' (#820) from Mic92-target_host into main 2024-02-06 16:59:03 +00:00
Jörg Thalheim
6fe6229498 add requireExplicitUpdate option for mobile devices 2024-02-06 17:55:34 +01:00
clan-bot
7c598e6278 Merge pull request 'document build host option' (#819) from Mic92-target_host into main 2024-02-06 16:30:04 +00:00
Jörg Thalheim
531a899817 document build host option 2024-02-06 17:27:06 +01:00
clan-bot
e912b125c3 Merge pull request 'remove unused ssh.run method' (#818) from Mic92-target_host into main 2024-02-06 16:24:47 +00:00
Jörg Thalheim
614d1aecfd set nixpkgs.pkgs for secrets generation
This allows us to use the same nixpkgs instance for all machines.
2024-02-06 17:21:42 +01:00
Jörg Thalheim
be3a75bbd7 add support for build machines 2024-02-06 17:21:42 +01:00
Jörg Thalheim
2315dba2a9 rename machine.host to machine.target_host 2024-02-06 17:21:42 +01:00
Jörg Thalheim
6e57122da8 rename target_host to target_host_address 2024-02-06 17:21:42 +01:00
Jörg Thalheim
301a6b6a23 machines/update: get flake_attr from machine class 2024-02-06 17:21:42 +01:00
Jörg Thalheim
a2f0d077c8 remove unused ssh.run method 2024-02-06 15:47:32 +01:00
clan-bot
8234f127e5 Merge pull request 'machines: don't ignore errors when parsing secretsData json' (#817) from Mic92-target_host into main 2024-02-06 14:18:38 +00:00
Jörg Thalheim
c66c25aeb7 machines: don't ignore errors when parsing secretsData json 2024-02-06 15:15:21 +01:00
clan-bot
534ebb6094 Merge pull request 'skip machines without target_host when running clan machines update' (#816) from Mic92-target_host into main 2024-02-06 14:07:31 +00:00
Jörg Thalheim
91f26a4743 skip machines without target_host when running clan machines update 2024-02-06 15:04:19 +01:00
clan-bot
71d14eb178 Merge pull request 'move checks if targetHost/buildHost is set to cli' (#815) from Mic92-target_host into main 2024-02-06 13:54:50 +00:00
Jörg Thalheim
ad1a87fc14 move checks if targetHost/buildHost is set to cli 2024-02-06 14:51:44 +01:00
clan-bot
35bb076729 Merge pull request 'clan_manager: Implemented machine_icon, machine_description' (#813) from Qubasa-main into main 2024-02-06 13:16:31 +00:00
Qubasa
ab05cfde30 clan_manager: Implemented machine_icon, machine_description 2024-02-06 20:13:18 +07:00
clan-bot
4d18ce2366 Merge pull request 'cli,nix: Add machine_icon, machine_description to vm' (#812) from Qubasa-main into main 2024-02-06 12:29:32 +00:00
Qubasa
21443d0647 cli,nix: Add machine_icon, machine_description 2024-02-06 19:25:34 +07:00
clan-bot
868aba47b5 Merge pull request 'clanCore: fix deploymentAddress -> targetHost alias' (#811) from Mic92-target_host into main 2024-02-06 09:57:56 +00:00
Jörg Thalheim
923696c21c clanCore: fix deploymentAddress -> targetHost alias 2024-02-06 10:55:07 +01:00
clan-bot
99c432fcb8 Merge pull request 'Automatic flake update - 2024-02-05T00:00+00:00' (#804) from flake-update-2024-02-05 into main 2024-02-06 04:20:53 +00:00
Clan Merge Bot
3b5465d24d update flake lock - 2024-02-05T00:00+00:00
Flake lock file updates:

• Updated input 'flake-parts':
    'github:hercules-ci/flake-parts/07f6395285469419cf9d078f59b5b49993198c00' (2024-01-11)
  → 'github:hercules-ci/flake-parts/b253292d9c0a5ead9bc98c4e9a26c6312e27d69f' (2024-02-01)
• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/50071d87c75300c037e28439c5176c3933b9fce5' (2024-01-28)
  → 'github:NixOS/nixpkgs/5d75993fa5feaa333f3eadd83e0a08fc34432acc' (2024-02-04)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/73bf36912e31a6b21af6e0f39218e067283c67ef' (2024-01-28)
  → 'github:Mic92/sops-nix/23f61b897c00b66855074db471ba016e0cda20dd' (2024-02-04)
2024-02-06 04:17:06 +00:00
clan-bot
6a62018f1d Merge pull request 'build-clan: Removed unecessary arg, machineDescription and machineIcon' (#810) from Qubasa-main into main 2024-02-05 09:58:58 +00:00
Qubasa
4421797f30 build-clan: Removed unecessary arg, machineDescription and machineIcon 2024-02-05 16:56:13 +07:00
clan-bot
bfd525b042 Merge pull request 'clan-cli: add autocommits for facts/secrets' (#809) from lassulus-autocommits into main 2024-02-05 09:08:29 +00:00
lassulus
815527ec2b clan-cli secrets: commit facts/secrets after generating them 2024-02-05 10:03:10 +01:00
lassulus
e265537f69 clan-cli secrets: remove debug output 2024-02-05 10:03:10 +01:00
lassulus
8114cebaa4 clan-cli git: add commit_files function 2024-02-05 10:03:10 +01:00
clan-bot
0e076e52c8 Merge pull request 'vm_manager: Fixed uri join and then vm start not working' (#808) from Qubasa-main into main 2024-02-05 09:01:21 +00:00
Qubasa
fd44eeb384 vm_manager: Fixed uri join and then vm start not working 2024-02-05 15:57:14 +07:00
clan-bot
08c1e13dce Merge pull request 'async join' (#807) from hsjobeki-main into main 2024-02-05 08:07:51 +00:00
Johannes Kirschbauer
c52c83002c async join 2024-02-05 15:05:14 +07:00
clan-bot
1a8a6acfb1 Merge pull request 'Added machineIcon and machineDescription to buildClan' (#806) from Qubasa-main into main 2024-02-05 07:21:18 +00:00
Qubasa
b3815527a5 Added machineIcon and machineDescription to buildClan 2024-02-05 14:18:40 +07:00
clan-bot
fc50d8748a Merge pull request 'Starting to implement logs' (#799) from Qubasa-main into main 2024-02-05 06:40:26 +00:00
Qubasa
38cadd0ab2 Added --debug flag clan command in nix tests 2024-02-05 13:37:35 +07:00
Qubasa
33a10f76c0 UI: Fixed multiple connects to signal 2024-02-05 13:37:35 +07:00
clan-bot
2c00ccaea6 Merge pull request 'nixosModules zerotier: fix type of dns' (#805) from lassulus-zerotier-dns into main 2024-02-05 01:35:04 +00:00
lassulus
a2eb6f219d nixosModules zerotier: fix type of dns 2024-02-05 02:31:14 +01:00
clan-bot
ae256b666e Merge pull request 'clanCore zerotier: set default values in config for merging' (#803) from lassulus-zerotier-settings2 into main 2024-02-03 08:29:06 +00:00
lassulus
b39fda8d85 clanCore zerotier: set default values in config for merging 2024-02-03 09:26:04 +01:00
clan-bot
eaf2ac3c5c Merge pull request 'clan-cli update: upload only local paths from localhost' (#802) from lassulus-fast_flake_archive into main 2024-02-03 06:56:37 +00:00
lassulus
31188648f0 clan-cli update: remove legacy argument 2024-02-03 07:53:15 +01:00
lassulus
6a62065cdf clan-cli update: upload only local paths from localhost 2024-02-03 07:53:15 +01:00
clan-bot
20257b88ed Merge pull request 'clanCore zerotier: add settings option' (#801) from lassulus-zerotier-settings into main 2024-02-03 03:51:53 +00:00
lassulus
a52f1e3594 clanCore zerotier: add settings option 2024-02-03 04:48:54 +01:00
clan-bot
3bff29b9fe Merge pull request 'clan-cli: secrets check command' (#800) from lassulus-check_secrets into main 2024-02-02 16:43:25 +00:00
lassulus
315cdea6ce clan-cli machines: remove debug prints 2024-02-02 17:40:19 +01:00
lassulus
605b03bb91 clan-cli password-store: remove debug print 2024-02-02 17:40:19 +01:00
lassulus
bcdde990ff clan-cli secrets: add check command 2024-02-02 17:40:19 +01:00
clan-bot
841581bfc4 Merge pull request 'rename deployment address to target address' (#798) from Mic92-target_host into main 2024-02-02 09:42:29 +00:00
Jörg Thalheim
3538cf2e46 rename deployment address to target address
This is a prepares having a build server for deployment
2024-02-02 16:39:29 +07:00
clan-bot
7daca31db7 Merge pull request 'Starting to implement logs' (#796) from Qubasa-main into main 2024-02-02 05:07:39 +00:00
Qubasa
16562946fe vm-manager: Added log console printing on vm start. Added python logging module 2024-02-02 12:04:30 +07:00
clan-bot
789f3132c5 Merge pull request 'multi join via cli' (#795) from hsjobeki-main into main 2024-02-02 04:01:49 +00:00
Johannes Kirschbauer
e57169cb29 multi join via cli 2024-02-02 10:58:28 +07:00
clan-bot
90cf41c365 Merge pull request 'halalify zerotierone' (#794) from lassulus-halalify into main 2024-02-01 14:46:53 +00:00
lassulus
b4c6092cc0 halalify zerotierone 2024-02-01 15:44:13 +01:00
clan-bot
79a8c40f40 Merge pull request 'zerotier generate: kill process group' (#793) from lassulus-zerotier-kill-pg into main 2024-02-01 09:14:17 +00:00
lassulus
86b248d457 zerotier generate: retry if port allocation fails 2024-02-01 10:11:30 +01:00
clan-bot
b43a29dadc Merge pull request 'zerotier generate: kill process group' (#792) from lassulus-zerotier-kill-pg into main 2024-02-01 09:06:22 +00:00
lassulus
93874705fe zerotier generate: kill process group 2024-02-01 10:01:28 +01:00
clan-bot
59feea9e8a Merge pull request 'qemu: init python modules for qmp and qga' (#790) from DavHau-dave into main 2024-02-01 05:40:06 +00:00
DavHau
56b6907740 qemu: init python modules for qmp and qga 2024-02-01 12:32:21 +07:00
Qubasa
14917b7d56 Starting to implement logs 2024-02-01 10:21:58 +07:00
clan-bot
cc21108c59 Merge pull request 'vms: rename wayland attrs to waypipe' (#789) from a-kenji-rename-wayland-to-waypipe into main 2024-02-01 03:17:12 +00:00
a-kenji
533012af7d vms: rename wayland attrs to waypipe
And remove the options from the cli interface.
2024-02-01 10:14:36 +07:00
clan-bot
cdeb409c53 Merge pull request 'vms: wayland attr specified in configuration' (#787) from a-kenji-allow/wayland-in-config into main 2024-02-01 02:06:52 +00:00
a-kenji
f89c9b00dd vms: wayland attr specified in configuration 2024-02-01 09:00:43 +07:00
clan-bot
110e790246 Merge pull request 'syncthing: remember auto accepted folders, if introduced' (#786) from a-kenji-syncthing-default-accept into main 2024-01-31 15:39:50 +00:00
a-kenji
c81e9857da syncthing: remember auto accepted folders, if introduced
Makes it more compatible with restoring state
2024-01-31 22:24:46 +07:00
clan-bot
b5edd7ca08 Merge pull request 'group clans by url' (#783) from hsjobeki-main into main 2024-01-31 04:23:15 +00:00
Johannes Kirschbauer
c1bc1c942a group clans by url 2024-01-31 11:20:35 +07:00
clan-bot
6107b01a3f Merge pull request 'vm-state: fix and improve testing' (#782) from DavHau-dave into main 2024-01-31 04:07:41 +00:00
DavHau
59fa63eba9 Reapply "vm-state: fix and improve testing"
This reverts commit 99092f6e76.

vm-state: revert sysusers, improve testing

zerotier: enable persistence

vm-state: cleanup tests
2024-01-31 11:02:16 +07:00
clan-bot
c69f68feee Merge pull request 'syncthing: make inotify tuning overrideable' (#781) from a-kenji-syncthing-inotify into main 2024-01-31 03:47:06 +00:00
a-kenji
dd460e9f4f syncthing: make inotify tuning overrideable 2024-01-31 10:44:31 +07:00
clan-bot
b99f569973 Merge pull request 'some minor secrets fixups' (#780) from lassulus-secrets-fixes into main 2024-01-30 11:13:35 +00:00
lassulus
961eb26335 secrets modules: pass secrets as bytes 2024-01-30 12:11:05 +01:00
lassulus
0dbfe52d62 secrets: add sandbox user 2024-01-30 12:11:05 +01:00
clan-bot
a0ebf882c5 Merge pull request 'Machine __str__ impl' (#779) from Qubasa-heads/origin/Qubasa-fix into main 2024-01-30 08:38:51 +00:00
Qubasa
649e345585 Machine __str__ impl 2024-01-30 15:32:35 +07:00
clan-bot
1f108f8913 Merge pull request 'Added demo.sh to prepare demo environment' (#777) from Qubasa-origin/Qubasa-fix into main 2024-01-30 08:04:49 +00:00
Qubasa
a3207f7011 UI: Fixed toggle button color on second time not changing 2024-01-30 15:02:05 +07:00
Qubasa
45e8917679 Added demo.sh to prepare demo environment 2024-01-30 14:42:22 +07:00
clan-bot
c9b2deb326 Merge pull request 'Demo version' (#776) from Qubasa-main into main 2024-01-30 07:05:55 +00:00
95 changed files with 4168 additions and 1063 deletions

View File

@@ -5,6 +5,7 @@ let
directory = ../..;
machines = {
test_backup_client = {
clan.networking.targetHost = "client";
imports = [ self.nixosModules.test_backup_client ];
fileSystems."/".device = "/dev/null";
boot.loader.grub.device = "/dev/null";
@@ -14,7 +15,7 @@ let
in
{
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) test_backup_client; };
flake.clanInternals.machines = clan.clanInternals.machines;
flake.clanInternals = clan.clanInternals;
flake.nixosModules = {
test_backup_server = { ... }: {
imports = [
@@ -109,16 +110,16 @@ in
client.succeed("echo testing > /var/test-backups/somefile")
# create
client.succeed("clan --flake ${../..} backups create test_backup_client")
client.succeed("clan --debug --flake ${../..} backups create test_backup_client")
client.wait_until_succeeds("! systemctl is-active borgbackup-job-test_backup_server")
# list
backup_id = json.loads(client.succeed("borg-job-test_backup_server list --json"))["archives"][0]["archive"]
assert(backup_id in client.succeed("clan --flake ${../..} backups list test_backup_client"))
assert(backup_id in client.succeed("clan --debug --flake ${../..} backups list test_backup_client"))
# restore
client.succeed("rm -f /var/test-backups/somefile")
client.succeed(f"clan --flake ${../..} backups restore test_backup_client borgbackup {backup_id}")
client.succeed(f"clan --debug --flake ${../..} backups restore test_backup_client borgbackup {backup_id}")
assert(client.succeed("cat /var/test-backups/somefile").strip() == "testing")
'';
}

View File

@@ -1,4 +1,4 @@
(import ../lib/container-test.nix) ({ ... }: {
(import ../lib/test-base.nix) ({ ... }: {
name = "borgbackup";
nodes.machine = { self, ... }: {
@@ -18,11 +18,20 @@
clanCore.clanDir = ./.;
clanCore.state.testState.folders = [ "/etc/state" ];
environment.etc.state.text = "hello world";
systemd.tmpfiles.settings = {
"ssh-key"."/root/.ssh/id_ed25519" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
};
clan.borgbackup = {
enable = true;
destinations.test = {
repo = "borg@localhost:.";
rsh = "ssh -i ${../lib/ssh/privkey} -o StrictHostKeyChecking=no";
rsh = "ssh -i /root/.ssh/id_ed25519 -o StrictHostKeyChecking=no";
};
};
}

View File

@@ -5,6 +5,7 @@ let
directory = ../..;
machines = {
test_install_machine = {
clan.networking.targetHost = "test_install_machine";
imports = [ self.nixosModules.test_install_machine ];
};
};
@@ -12,7 +13,7 @@ let
in
{
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) test_install_machine; };
flake.clanInternals.machines = clan.clanInternals.machines;
flake.clanInternals = clan.clanInternals;
flake.nixosModules = {
test_install_machine = { lib, modulesPath, ... }: {
imports = [
@@ -106,7 +107,7 @@ in
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
client.wait_until_succeeds("ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
client.succeed("clan --flake ${../..} machines install test_install_machine root@target >&2")
client.succeed("clan --debug --flake ${../..} machines install test_install_machine root@target >&2")
try:
target.shutdown()
except BrokenPipeError:

View File

@@ -14,5 +14,6 @@
xfce = ./xfce.nix;
zt-tcp-relay = ./zt-tcp-relay.nix;
localsend = ./localsend.nix;
waypipe = ./waypipe.nix;
};
}

View File

@@ -63,15 +63,18 @@
];
# Activates inofify compatibilty on syncthing
boot.kernel.sysctl."fs.inotify.max_user_watches" = 524288;
boot.kernel.sysctl."fs.inotify.max_user_watches" = lib.mkDefault 524288;
services.syncthing = {
enable = true;
configDir = "/var/lib/syncthing";
overrideFolders = true;
overrideDevices = true;
overrideFolders = lib.mkDefault (
if (config.clan.syncthing.introducer == null) then true else false
);
overrideDevices = lib.mkDefault (
if (config.clan.syncthing.introducer == null) then true else false
);
dataDir = lib.mkDefault "/home/user/";
@@ -79,10 +82,10 @@
key =
lib.mkDefault
config.clanCore.secrets.syncthing.secrets."syncthing.key".path or null;
config.clan.secrets.syncthing.secrets."syncthing.key".path or null;
cert =
lib.mkDefault
config.clanCore.secrets.syncthing.secrets."syncthing.cert".path or null;
config.clan.secrets.syncthing.secrets."syncthing.cert".path or null;
settings = {
options = {

74
clanModules/waypipe.nix Normal file
View File

@@ -0,0 +1,74 @@
{ pkgs
, lib
, config
, ...
}:
{
options.clan.services.waypipe = {
enable = lib.mkEnableOption "waypipe";
user = lib.mkOption {
type = lib.types.str;
default = "user";
description = "User the program is run under";
};
flags = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [
"--vsock"
"-s"
"3049"
"server"
];
description = "Flags that will be passed to waypipe";
};
command = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ (lib.getExe pkgs.foot) ];
description = "Commands that waypipe should run";
};
};
config = lib.mkIf config.clan.services.waypipe.enable {
# Waypipe needs pipewire
services.pipewire = {
enable = lib.mkDefault true;
alsa.enable = lib.mkDefault true;
alsa.support32Bit = lib.mkDefault true;
pulse.enable = lib.mkDefault true;
};
# General default settings
fonts.enableDefaultPackages = lib.mkDefault true;
hardware.opengl.enable = lib.mkDefault true;
# Assume it is run inside a clan context
clan.virtualisation.waypipe = lib.mkDefault true;
# User account
services.getty.autologinUser = lib.mkDefault config.clan.services.waypipe.user;
security.sudo.wheelNeedsPassword = false;
users.users.user = lib.mkIf (config.clan.services.waypipe.user == "user") {
isNormalUser = true;
uid = 1000;
password = "";
extraGroups = [ "wheel" "video" ];
shell = "/run/current-system/sw/bin/bash";
};
systemd.user.services.waypipe = {
serviceConfig.PassEnvironment = "DISPLAY";
serviceConfig.Environment = ''
XDG_SESSION_TYPE=wayland \
NIXOS_OZONE_WL=1 \
GDK_BACKEND=wayland \
QT_QPA_PLATFORM=wayland \
CLUTTER_BACKEND = "wayland" \
SDL_VIDEODRIVER=wayland
'';
script = ''
${lib.getExe config.clanCore.clanPkgs.waypipe} \
${lib.escapeShellArgs config.clan.services.waypipe.flags} \
${lib.escapeShellArgs config.clan.services.waypipe.command}
'';
wantedBy = [ "default.target" ];
};
};
}

View File

@@ -88,17 +88,18 @@ $ clan machines install my-machine <target_host>
## Update Your Machines
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a deployment address for each target machine.
Clan CLI enables you to remotely update your machines over SSH. This requires setting up a target address for each target machine.
### Setting the Deployment Address
### Setting the Target Host
Replace `host_or_ip` with the actual hostname or IP address of your target machine:
```shellSession
$ clan config --machine my-machine clan.networking.deploymentAddress root@host_or_ip
$ clan config --machine my-machine clan.networking.targetHost root@host_or_ip
```
_Note: The use of `root@` in the deployment address implies SSH access as the root user. Ensure that the root login is secured and only used when necessary._
_Note: The use of `root@` in the target address implies SSH access as the root user.
Ensure that the root login is secured and only used when necessary._
### Updating Machine Configurations
@@ -113,3 +114,25 @@ You can also update all configured machines simultaneously by omitting the machi
```shellSession
$ clan machines update
```
### Setting a Build Host
If the machine does not have enough resources to run the NixOS evaluation or build itself,
it is also possible to specify a build host instead.
During an update, the cli will ssh into the build host and run `nixos-rebuild` from there.
```shellSession
$ clan config --machine my-machine clan.networking.buildHost root@host_or_ip
```
### Excluding a machine from `clan machine update`
To exclude machines from beeing updated when running `clan machines update` without any machines specified,
one can set the `clan.deployment.requireExplicitUpdate` option to true:
```shellSession
$ clan config --machine my-machine clan.deployment.requireExplicitUpdate true
```
This is useful for machines that are not always online or are not part of the regular update cycle.

36
flake.lock generated
View File

@@ -7,11 +7,11 @@
]
},
"locked": {
"lastModified": 1706491084,
"narHash": "sha256-eaEv+orTmr2arXpoE4aFZQMVPOYXCBEbLgK22kOtkhs=",
"lastModified": 1707524024,
"narHash": "sha256-HmumZ8FuWAAYZrWUKm3N4G4h8nmZ5VUVX+vXLmCJNKM=",
"owner": "nix-community",
"repo": "disko",
"rev": "f67ba6552845ea5d7f596a24d57c33a8a9dc8de9",
"rev": "d07de570ba05cec2807d058daaa044f6955720c7",
"type": "github"
},
"original": {
@@ -27,11 +27,11 @@
]
},
"locked": {
"lastModified": 1704982712,
"narHash": "sha256-2Ptt+9h8dczgle2Oo6z5ni5rt/uLMG47UFTR1ry/wgg=",
"lastModified": 1706830856,
"narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "07f6395285469419cf9d078f59b5b49993198c00",
"rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f",
"type": "github"
},
"original": {
@@ -63,11 +63,11 @@
]
},
"locked": {
"lastModified": 1706085261,
"narHash": "sha256-7PgpHRHyShINcqgevPP1fJ6N8kM5ZSOJnk3QZBrOCQ0=",
"lastModified": 1707405218,
"narHash": "sha256-ZQ366Oo8WJbCqXAZET7N0Sz6RQ3G2IbqVtxQRSa3SXc=",
"owner": "nix-community",
"repo": "nixos-generators",
"rev": "896f6589db5b25023b812bbb6c1f5d3a499b1132",
"rev": "843e2f04c716092797ffa4ce14c446adce2f09ef",
"type": "github"
},
"original": {
@@ -78,11 +78,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1706440623,
"narHash": "sha256-MzqsevUkrIVpAbbN7Wn3mGlYklkm2geaozGTFxtnYgA=",
"lastModified": 1707639604,
"narHash": "sha256-J5ipSdfkbYcYaH3Js2dUf3Of94BWStapdmxpW5wwH1U=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "50071d87c75300c037e28439c5176c3933b9fce5",
"rev": "bdc57436da855500d44e9c1ce7450c0772e1cfa1",
"type": "github"
},
"original": {
@@ -110,11 +110,11 @@
"nixpkgs-stable": []
},
"locked": {
"lastModified": 1706410821,
"narHash": "sha256-iCfXspqUOPLwRobqQNAQeKzprEyVowLMn17QaRPQc+M=",
"lastModified": 1707620614,
"narHash": "sha256-gfAoB9dGzBu62NoAoM945aok7+6M+LFu+nvnGwAsTp4=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "73bf36912e31a6b21af6e0f39218e067283c67ef",
"rev": "2eb7c4ba3aa75e2660fd217eb1ab64d5b793608e",
"type": "github"
},
"original": {
@@ -130,11 +130,11 @@
]
},
"locked": {
"lastModified": 1706462057,
"narHash": "sha256-7dG1D4iqqt0bEbBqUWk6lZiSqqwwAO0Hd1L5opVyhNM=",
"lastModified": 1707300477,
"narHash": "sha256-qQF0fEkHlnxHcrKIMRzOETnRBksUK048MXkX0SOmxvA=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "c6153c2a3ff4c38d231e3ae99af29b87f1df5901",
"rev": "ac599dab59a66304eb511af07b3883114f061b9d",
"type": "github"
},
"original": {

View File

@@ -49,6 +49,9 @@
machines = lib.mkOption {
type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified);
};
machinesFunc = lib.mkOption {
type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified);
};
};
};
};

View File

@@ -3,7 +3,7 @@
, specialArgs ? { } # Extra arguments to pass to nixosSystem i.e. useful to make self available
, machines ? { } # allows to include machine-specific modules i.e. machines.${name} = { ... }
, clanName # Needs to be (globally) unique, as this determines the folder name where the flake gets downloaded to.
, clanIcon ? null # A path to an icon to be used for the clan
, clanIcon ? null # A path to an icon to be used for the clan, should be the same for all machines
}:
let
machinesDirs = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (builtins.readDir (directory + /machines));
@@ -30,7 +30,7 @@ let
(machineSettings.clanImports or [ ]);
# TODO: remove default system once we have a hardware-config mechanism
nixosConfiguration = { system ? "x86_64-linux", name, forceSystem ? false }: nixpkgs.lib.nixosSystem {
nixosConfiguration = { system ? "x86_64-linux", name, pkgs ? null, extraConfig ? { } }: nixpkgs.lib.nixosSystem {
modules =
let
settings = machineSettings name;
@@ -39,20 +39,23 @@ let
++ [
settings
clan-core.nixosModules.clanCore
extraConfig
(machines.${name} or { })
{
clanCore.machineName = name;
({
clanCore.clanName = clanName;
clanCore.clanIcon = clanIcon;
clanCore.clanDir = directory;
nixpkgs.hostPlatform = if forceSystem then lib.mkForce system else lib.mkDefault system;
clanCore.machineName = name;
nixpkgs.hostPlatform = lib.mkDefault system;
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
nix.registry.nixpkgs.to = {
type = "path";
path = lib.mkDefault nixpkgs;
};
}
} // lib.optionalAttrs (pkgs != null) {
nixpkgs.pkgs = lib.mkForce pkgs;
})
];
inherit specialArgs;
};
@@ -75,7 +78,13 @@ let
configsPerSystem = builtins.listToAttrs
(builtins.map
(system: lib.nameValuePair system
(lib.mapAttrs (name: _: nixosConfiguration { inherit name system; forceSystem = true; }) allMachines))
(lib.mapAttrs (name: _: nixosConfiguration { inherit name system; }) allMachines))
supportedSystems);
configsFuncPerSystem = builtins.listToAttrs
(builtins.map
(system: lib.nameValuePair system
(lib.mapAttrs (name: _: args: nixosConfiguration (args // { inherit name system; })) allMachines))
supportedSystems);
in
{
@@ -83,6 +92,7 @@ in
clanInternals = {
machines = configsPerSystem;
machinesFunc = configsFuncPerSystem;
all-machines-json = lib.mapAttrs
(system: configs: nixpkgs.legacyPackages.${system}.writers.writeJSON "machines.json" (lib.mapAttrs (_: m: m.config.system.clan.deployment.data) configs))
configsPerSystem;

View File

@@ -6,6 +6,20 @@
the name of the clan
'';
};
machineIcon = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
the location of the machine icon
'';
};
machineDescription = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
the description of the machine
'';
};
clanDir = lib.mkOption {
type = lib.types.either lib.types.path lib.types.str;
description = ''

View File

@@ -1,23 +1,56 @@
{ config, lib, ... }:
{
options.clan.networking = {
deploymentAddress = lib.mkOption {
description = ''
The target SSH node for deployment.
options.clan = {
networking = {
targetHost = lib.mkOption {
description = ''
The target SSH node for deployment.
By default, the node's attribute name will be used.
If set to null, only local deployment will be supported.
By default, the node's attribute name will be used.
If set to null, only local deployment will be supported.
format: user@host:port&SSH_OPTION=SSH_VALUE
examples:
- machine.example.com
- user@machine2.example.com
- root@example.com:2222&IdentityFile=/path/to/private/key
'';
type = lib.types.nullOr lib.types.str;
default = "root@${config.networking.hostName}";
format: user@host:port&SSH_OPTION=SSH_VALUE
examples:
- machine.example.com
- user@machine2.example.com
- root@example.com:2222&IdentityFile=/path/to/private/key
'';
default = null;
type = lib.types.nullOr lib.types.str;
};
buildHost = lib.mkOption {
description = ''
The build SSH node where nixos-rebuild will be executed.
If set to null, the targetHost will be used.
format: user@host:port&SSH_OPTION=SSH_VALUE
examples:
- machine.example.com
- user@machine2.example.com
- root@example.com:2222&IdentityFile=/path/to/private/key
'';
type = lib.types.nullOr lib.types.str;
default = null;
};
};
deployment = {
requireExplicitUpdate = lib.mkOption {
description = ''
Do not update this machine when running `clan machines update` without any machines specified.
This is useful for machines that are not always online or are not part of the regular update cycle.
'';
type = lib.types.bool;
default = false;
};
};
};
imports = [
(lib.mkRenamedOptionModule [ "clan" "networking" "deploymentAddress" ] [ "clan" "networking" "targetHost" ])
];
config = {
# conflicts with systemd-resolved
networking.useHostResolvConf = false;

View File

@@ -19,18 +19,38 @@
the location of the deployment.json file
'';
};
deploymentAddress = lib.mkOption {
type = lib.types.str;
deployment.buildHost = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = ''
the address of the deployment server
the hostname of the build host where nixos-rebuild is run
'';
};
deployment.targetHost = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = ''
the hostname of the target host to be deployed to
'';
};
deployment.requireExplicitUpdate = lib.mkOption {
type = lib.types.bool;
description = ''
if true, the deployment will not be updated automatically.
'';
default = false;
};
secretsUploadDirectory = lib.mkOption {
type = lib.types.path;
description = ''
the directory on the deployment server where secrets are uploaded
'';
};
factsModule = lib.mkOption {
type = lib.types.str;
description = ''
the python import path to the facts module
'';
default = "clan_cli.facts.modules.in_repo";
};
secretsModule = lib.mkOption {
type = lib.types.str;
description = ''
@@ -56,6 +76,12 @@
json metadata about the vm
'';
};
iso = lib.mkOption {
type = lib.types.path;
description = ''
A generated iso of the machine for the flash command
'';
};
};
};
description = ''
@@ -65,11 +91,11 @@
# optimization for faster secret generate/upload and machines update
config = {
system.clan.deployment.data = {
inherit (config.system.clan) secretsModule secretsData;
inherit (config.clan.networking) deploymentAddress;
inherit (config.system.clan) factsModule secretsModule secretsData;
inherit (config.clan.networking) targetHost buildHost;
inherit (config.clan.deployment) requireExplicitUpdate;
inherit (config.clanCore) secretsUploadDirectory;
};
system.clan.deployment.file = pkgs.writeText "deployment.json" (builtins.toJSON config.system.clan.deployment.data);
};
}

View File

@@ -1,7 +1,7 @@
{ config, lib, ... }:
{ config, lib, pkgs, ... }:
{
options.clanCore.secretStore = lib.mkOption {
type = lib.types.enum [ "sops" "password-store" "custom" ];
type = lib.types.enum [ "sops" "password-store" "vm" "custom" ];
default = "sops";
description = ''
method to store secrets
@@ -69,8 +69,18 @@
readOnly = true;
internal = true;
default = ''
export PATH="${lib.makeBinPath config.path}"
set -efu -o pipefail
set -eu -o pipefail
export PATH="${lib.makeBinPath config.path}:${pkgs.coreutils}/bin"
# prepare sandbox user
mkdir -p /etc
cp ${pkgs.runCommand "fake-etc" {} ''
export PATH="${pkgs.coreutils}/bin"
mkdir -p $out
cp /etc/* $out/
''}/* /etc/
${config.script}
'';
};
@@ -140,5 +150,6 @@
imports = [
./sops.nix
./password-store.nix
./vm.nix
];
}

View File

@@ -0,0 +1,10 @@
{ config, lib, ... }:
{
config = lib.mkIf (config.clanCore.secretStore == "vm") {
clanCore.secretsDirectory = "/etc/secrets";
clanCore.secretsUploadDirectory = "/etc/secrets";
system.clan.secretsModule = "clan_cli.secrets.modules.vm";
system.clan.factsModule = "clan_cli.facts.modules.vm";
};
}

View File

@@ -1,8 +1,9 @@
{ lib, ... }:
{
# defaults
# FIXME: currently broken, will be fixed soon
#config.clanCore.state.HOME.folders = [ "/home" ];
config.clanCore.state.HOME.folders = [
"/home"
];
# interface
options.clanCore.state = lib.mkOption {

View File

@@ -1,17 +1,5 @@
{ lib, config, pkgs, options, extendModules, modulesPath, ... }:
let
# Generates a fileSystems entry for bind mounting a given state folder path
# It binds directories from /var/clanstate/{some-path} to /{some-path}.
# As a result, all state paths will be persisted across reboots, because
# the state folder is mounted from the host system.
mkBindMount = path: {
name = path;
value = {
device = "/var/clanstate/${path}";
options = [ "bind" ];
};
};
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList
@@ -19,33 +7,74 @@ let
config.clanCore.state
);
# A module setting up bind mounts for all state folders
stateMounts = {
virtualisation.fileSystems =
lib.listToAttrs
(map mkBindMount stateFolders);
};
vmModule = {
imports = [
(modulesPath + "/virtualisation/qemu-vm.nix")
./serial.nix
stateMounts
];
virtualisation.fileSystems = {
${config.clanCore.secretsUploadDirectory} = lib.mkForce {
# required for issuing shell commands via qga
services.qemuGuest.enable = true;
# required to react to system_powerdown qmp command
# Some desktop managers like xfce override the poweroff signal and therefore
# make it impossible to handle it via 'logind' diretly.
services.acpid.enable = true;
services.acpid.handlers.power.event = "button/power.*";
services.acpid.handlers.power.action = "poweroff";
boot.initrd.systemd.enable = true;
# currently needed for system.etc.overlay.enable
boot.kernelPackages = pkgs.linuxPackages_latest;
boot.initrd.systemd.storePaths = [ pkgs.util-linux pkgs.e2fsprogs ];
boot.initrd.systemd.emergencyAccess = true;
# sysusers is faster than nixos's perl scripts
# and doesn't require state.
systemd.sysusers.enable = true;
users.mutableUsers = false;
users.allowNoPasswordLogin = true;
boot.initrd.kernelModules = [ "virtiofs" ];
virtualisation.writableStore = false;
virtualisation.fileSystems = lib.mkForce ({
"/nix/store" = {
device = "nix-store";
options = [ "x-systemd.requires=systemd-modules-load.service" "ro" ];
fsType = "virtiofs";
};
"/" = {
device = "/dev/vda";
fsType = "ext4";
options = [ "defaults" "x-systemd.makefs" "nobarrier" "noatime" "nodiratime" "data=writeback" "discard" ];
};
"/vmstate" = {
device = "/dev/vdb";
options = [ "x-systemd.makefs" "noatime" "nodiratime" "discard" ];
noCheck = true;
fsType = "ext4";
};
${config.clanCore.secretsUploadDirectory} = {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
};
"/var/clanstate" = {
device = "state";
fsType = "9p";
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
};
};
boot.initrd.systemd.enable = true;
} // lib.listToAttrs (map
(folder:
lib.nameValuePair folder {
device = "/vmstate${folder}";
fsType = "none";
options = [ "bind" ];
})
stateFolders));
};
# We cannot simply merge the VM config into the current system config, because
@@ -53,7 +82,7 @@ let
# Instead we use extendModules to create a second instance of the current
# system configuration, and then merge the VM config into that.
vmConfig = extendModules {
modules = [ vmModule stateMounts ];
modules = [ vmModule ];
};
in
{
@@ -86,6 +115,14 @@ in
change the preferred console.
'';
};
waypipe = lib.mkOption {
type = lib.types.bool;
default = false;
description = lib.mdDoc ''
Whether to use waypipe for native wayland passthrough, or not.
'';
};
};
# All important VM config variables needed by the vm runner
# this is really just a remapping of values defined elsewhere
@@ -123,6 +160,38 @@ in
whether to enable graphics for the vm
'';
};
waypipe = lib.mkOption {
type = lib.types.bool;
internal = true;
readOnly = true;
description = ''
whether to enable native wayland window passthrough with waypipe for the vm
'';
};
machine_icon = lib.mkOption {
type = lib.types.nullOr lib.types.path;
internal = true;
readOnly = true;
description = ''
the location of the clan icon
'';
};
machine_name = lib.mkOption {
type = lib.types.str;
internal = true;
readOnly = true;
description = ''
the name of the vm
'';
};
machine_description = lib.mkOption {
type = lib.types.nullOr lib.types.str;
internal = true;
readOnly = true;
description = ''
the description of the vm
'';
};
};
};
@@ -130,8 +199,11 @@ in
# for clan vm inspect
clanCore.vm.inspect = {
clan_name = config.clanCore.clanName;
machine_icon = config.clanCore.machineIcon or config.clanCore.clanIcon;
machine_name = config.clanCore.machineName;
machine_description = config.clanCore.machineDescription;
memory_size = config.clan.virtualisation.memorySize;
inherit (config.clan.virtualisation) cores graphics;
inherit (config.clan.virtualisation) cores graphics waypipe;
};
# for clan vm create
system.clan.vm = {

View File

@@ -6,46 +6,6 @@ let
install -Dm755 ${./genmoon.py} $out/bin/genmoon
patchShebangs $out/bin/genmoon
'';
networkConfig = {
authTokens = [
null
];
authorizationEndpoint = "";
capabilities = [ ];
clientId = "";
dns = [ ];
enableBroadcast = true;
id = cfg.networkId;
ipAssignmentPools = [ ];
mtu = 2800;
multicastLimit = 32;
name = cfg.name;
uwid = cfg.networkId;
objtype = "network";
private = !cfg.controller.public;
remoteTraceLevel = 0;
remoteTraceTarget = null;
revision = 1;
routes = [ ];
rules = [
{
not = false;
or = false;
type = "ACTION_ACCEPT";
}
];
rulesSource = "";
ssoEnabled = false;
tags = [ ];
v4AssignMode = {
zt = false;
};
v6AssignMode = {
"6plane" = false;
rfc4193 = true;
zt = false;
};
};
in
{
options.clan.networking.zerotier = {
@@ -114,6 +74,12 @@ in
'';
};
};
settings = lib.mkOption {
description = lib.mdDoc "override the network config in /var/lib/zerotier/bla/$network.json";
type = lib.types.submodule {
freeformType = (pkgs.formats.json { }).type;
};
};
};
config = lib.mkMerge [
({
@@ -147,7 +113,7 @@ in
${lib.optionalString (cfg.controller.enable) ''
mkdir -p /var/lib/zerotier-one/controller.d/network
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON networkConfig)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON cfg.settings)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
''}
${lib.optionalString (cfg.moon.stableEndpoints != []) ''
if [[ ! -f /var/lib/zerotier-one/moon.json ]]; then
@@ -220,11 +186,11 @@ in
--network-id "$facts/zerotier-network-id"
'';
};
# clanCore.state.zerotier.folders = [ "/var/lib/zerotier-one" ];
clanCore.state.zerotier.folders = [ "/var/lib/zerotier-one" ];
environment.systemPackages = [ config.clanCore.clanPkgs.zerotier-members ];
})
(lib.mkIf (config.clanCore.secretsUploadDirectory != null && !cfg.controller.enable && cfg.networkId != null) {
(lib.mkIf (!cfg.controller.enable && cfg.networkId != null) {
clanCore.secrets.zerotier = {
facts.zerotier-ip = { };
facts.zerotier-meshname = { };
@@ -241,6 +207,46 @@ in
})
(lib.mkIf (cfg.controller.enable && (facts.zerotier-network-id.value or null) != null) {
clan.networking.zerotier.networkId = facts.zerotier-network-id.value;
clan.networking.zerotier.settings = {
authTokens = [
null
];
authorizationEndpoint = "";
capabilities = [ ];
clientId = "";
dns = { };
enableBroadcast = true;
id = cfg.networkId;
ipAssignmentPools = [ ];
mtu = 2800;
multicastLimit = 32;
name = cfg.name;
uwid = cfg.networkId;
objtype = "network";
private = !cfg.controller.public;
remoteTraceLevel = 0;
remoteTraceTarget = null;
revision = 1;
routes = [ ];
rules = [
{
not = false;
or = false;
type = "ACTION_ACCEPT";
}
];
rulesSource = "";
ssoEnabled = false;
tags = [ ];
v4AssignMode = {
zt = false;
};
v6AssignMode = {
"6plane" = false;
rfc4193 = true;
zt = false;
};
};
environment.etc."zerotier/network-id".text = facts.zerotier-network-id.value;
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
"+${pkgs.writeShellScript "whitelist-controller" ''

View File

@@ -3,8 +3,11 @@ import base64
import contextlib
import ipaddress
import json
import os
import signal
import socket
import subprocess
import sys
import time
import urllib.request
from collections.abc import Iterator
@@ -115,7 +118,11 @@ def zerotier_controller() -> Iterator[ZerotierController]:
f"-p{controller_port}",
str(home),
]
with subprocess.Popen(cmd) as p:
with subprocess.Popen(
cmd,
preexec_fn=os.setsid,
) as p:
process_group = os.getpgid(p.pid)
try:
print(
f"wait for controller to be started on 127.0.0.1:{controller_port}...",
@@ -131,8 +138,7 @@ def zerotier_controller() -> Iterator[ZerotierController]:
yield ZerotierController(controller_port, home)
finally:
p.terminate()
p.wait()
os.killpg(process_group, signal.SIGKILL)
@dataclass
@@ -143,9 +149,15 @@ class NetworkController:
# TODO: allow merging more network configuration here
def create_network_controller() -> NetworkController:
with zerotier_controller() as controller:
network = controller.create_network()
return NetworkController(network["nwid"], controller.identity)
e = ClanError("Bug, should never happen")
for _ in range(10):
try:
with zerotier_controller() as controller:
network = controller.create_network()
return NetworkController(network["nwid"], controller.identity)
except ClanError: # probably failed to allocate port, so retry
print("failed to create network, retrying..., probabl", file=sys.stderr)
raise e
def create_identity() -> Identity:

View File

@@ -5,6 +5,7 @@
clanCore.imports = [
inputs.sops-nix.nixosModules.sops
./clanCore
./iso
({ pkgs, lib, ... }: {
clanCore.clanPkgs = lib.mkDefault self.packages.${pkgs.hostPlatform.system};
})

View File

@@ -33,7 +33,7 @@
systemd.services.hidden-ssh-announce = {
description = "announce hidden ssh";
after = [ "tor.service" "network-online.target" ];
wants = [ "tor.service" ];
wants = [ "tor.service" "network-online.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
# ${pkgs.tor}/bin/torify

View File

@@ -0,0 +1,90 @@
{ config, extendModules, lib, pkgs, ... }:
let
# Generates a fileSystems entry for bind mounting a given state folder path
# It binds directories from /var/clanstate/{some-path} to /{some-path}.
# As a result, all state paths will be persisted across reboots, because
# the state folder is mounted from the host system.
mkBindMount = path: {
name = path;
value = {
device = "/var/clanstate/${path}";
options = [ "bind" ];
};
};
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList
(_item: attrs: attrs.folders)
config.clanCore.state
);
# A module setting up bind mounts for all state folders
stateMounts = {
fileSystems =
lib.listToAttrs
(map mkBindMount stateFolders);
};
isoModule = { config, ... }: {
imports = [
stateMounts
];
options.clan.iso.disko = lib.mkOption {
type = lib.types.submodule {
freeformType = (pkgs.formats.json { }).type;
};
default = {
disk = {
iso = {
type = "disk";
imageSize = "10G"; # TODO add auto image size in disko
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
ESP = {
size = "100M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
};
config = {
disko.devices = lib.mkOverride 51 config.clan.iso.disko;
boot.loader.grub.enable = true;
boot.loader.grub.efiSupport = true;
boot.loader.grub.device = lib.mkForce "/dev/vda";
boot.loader.grub.efiInstallAsRemovable = true;
};
};
isoConfig = extendModules {
modules = [ isoModule ];
};
in
{
config = {
# for clan vm create
system.clan.iso = isoConfig.config.system.build.diskoImages;
};
}

View File

@@ -6,7 +6,7 @@ from pathlib import Path
from types import ModuleType
from typing import Any
from . import backups, config, flakes, history, machines, secrets, vms
from . import backups, config, facts, flakes, flash, history, machines, secrets, vms
from .custom_logger import setup_logging
from .dirs import get_clan_flake_toplevel
from .errors import ClanCmdError, ClanError
@@ -91,6 +91,9 @@ def create_parser(prog: str | None = None) -> argparse.ArgumentParser:
parser_secrets = subparsers.add_parser("secrets", help="manage secrets")
secrets.register_parser(parser_secrets)
parser_facts = subparsers.add_parser("facts", help="manage facts")
facts.register_parser(parser_facts)
parser_machine = subparsers.add_parser(
"machines", help="Manage machines and their configuration"
)
@@ -102,6 +105,11 @@ def create_parser(prog: str | None = None) -> argparse.ArgumentParser:
parser_history = subparsers.add_parser("history", help="manage history")
history.register_parser(parser_history)
parser_flash = subparsers.add_parser(
"flash", help="flash machines to usb sticks or into isos"
)
flash.register_parser(parser_flash)
if argcomplete:
argcomplete.autocomplete(parser)
@@ -117,10 +125,10 @@ def main() -> None:
parser.print_help()
if args.debug:
setup_logging(logging.DEBUG)
setup_logging(logging.DEBUG, root_log_name=__name__.split(".")[0])
log.debug("Debug log activated")
else:
setup_logging(logging.INFO)
setup_logging(logging.INFO, root_log_name=__name__.split(".")[0])
if not hasattr(args, "func"):
return

View File

@@ -13,7 +13,7 @@ def create_backup(machine: Machine, provider: str | None = None) -> None:
backup_scripts = json.loads(machine.eval_nix("config.clanCore.backups"))
if provider is None:
for provider in backup_scripts["providers"]:
proc = machine.host.run(
proc = machine.target_host.run(
["bash", "-c", backup_scripts["providers"][provider]["create"]],
)
if proc.returncode != 0:
@@ -23,7 +23,7 @@ def create_backup(machine: Machine, provider: str | None = None) -> None:
else:
if provider not in backup_scripts["providers"]:
raise ClanError(f"provider {provider} not found")
proc = machine.host.run(
proc = machine.target_host.run(
["bash", "-c", backup_scripts["providers"][provider]["create"]],
)
if proc.returncode != 0:

View File

@@ -19,7 +19,7 @@ class Backup:
def list_provider(machine: Machine, provider: str) -> list[Backup]:
results = []
backup_metadata = json.loads(machine.eval_nix("config.clanCore.backups"))
proc = machine.host.run(
proc = machine.target_host.run(
["bash", "-c", backup_metadata["providers"][provider]["list"]],
stdout=subprocess.PIPE,
check=False,

View File

@@ -20,7 +20,7 @@ def restore_service(
env["JOB"] = backup.job_name
env["FOLDERS"] = ":".join(folders)
proc = machine.host.run(
proc = machine.target_host.run(
[
"bash",
"-c",
@@ -34,7 +34,7 @@ def restore_service(
f"failed to run preRestoreScript: {backup_folders[service]['preRestoreScript']}, error was: {proc.stdout}"
)
proc = machine.host.run(
proc = machine.target_host.run(
[
"bash",
"-c",
@@ -48,7 +48,7 @@ def restore_service(
f"failed to restore backup: {backup_metadata['providers'][provider]['restore']}"
)
proc = machine.host.run(
proc = machine.target_host.run(
[
"bash",
"-c",

View File

@@ -1,33 +0,0 @@
import json
from pathlib import Path
from clan_cli.nix import nix_eval
from .cmd import run
def get_clan_module_names(
flake_dir: Path,
) -> list[str]:
"""
Get the list of clan modules from the clan-core flake input
"""
proc = run(
nix_eval(
[
"--impure",
"--show-trace",
"--expr",
f"""
let
flake = builtins.getFlake (toString {flake_dir});
in
builtins.attrNames flake.inputs.clan-core.clanModules
""",
],
),
cwd=flake_dir,
)
module_names = json.loads(proc.stdout)
return module_names

View File

@@ -116,10 +116,8 @@ class ClanURI:
def get_full_uri(self) -> str:
return self._full_uri
# TODO(@Qubasa): return a comparable id e.g. f"{url}#{attr}"
# This should be our standard.
def get_id(self) -> str:
return f"{self._components.path}#{self._components.fragment}"
return f"{self.get_internal()}#{self.params.flake_attr}"
@classmethod
def from_path(

View File

@@ -66,9 +66,9 @@ def get_caller() -> str:
return ret
def setup_logging(level: Any) -> None:
def setup_logging(level: Any, root_log_name: str = __name__.split(".")[0]) -> None:
# Get the root logger and set its level
main_logger = logging.getLogger("clan_cli")
main_logger = logging.getLogger(root_log_name)
main_logger.setLevel(level)
# Create and add the default handler

View File

@@ -15,9 +15,12 @@ def find_git_repo_root() -> Path | None:
return find_toplevel([".git"])
def clan_key_safe(clan_name: str, flake_url: str) -> str:
def clan_key_safe(flake_url: str) -> str:
"""
only embed the url in the path, not the clan name, as it would involve eval.
"""
quoted_url = urllib.parse.quote_plus(flake_url)
return f"{clan_name}-{quoted_url}"
return f"{quoted_url}"
def find_toplevel(top_level_files: list[str]) -> Path | None:
@@ -41,16 +44,38 @@ def user_config_dir() -> Path:
return Path(os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")))
def user_data_dir() -> Path:
if sys.platform == "win32":
return Path(
os.getenv("LOCALAPPDATA", os.path.expanduser("~\\AppData\\Local\\"))
)
elif sys.platform == "darwin":
return Path(os.path.expanduser("~/Library/Application Support/"))
else:
return Path(os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share")))
def user_cache_dir() -> Path:
if sys.platform == "win32":
return Path(
os.getenv("LOCALAPPDATA", os.path.expanduser("~\\AppData\\Local\\"))
)
elif sys.platform == "darwin":
return Path(os.path.expanduser("~/Library/Caches/"))
else:
return Path(os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache")))
def user_gcroot_dir() -> Path:
p = user_config_dir() / "clan" / "gcroots"
p.mkdir(parents=True, exist_ok=True)
return p
def machine_gcroot(*, clan_name: str, flake_url: str) -> Path:
def machine_gcroot(flake_url: str) -> Path:
# Always build icon so that we can symlink it to the gcroot
gcroot_dir = user_gcroot_dir()
clan_gcroot = gcroot_dir / clan_key_safe(clan_name, flake_url)
clan_gcroot = gcroot_dir / clan_key_safe(flake_url)
clan_gcroot.mkdir(parents=True, exist_ok=True)
return clan_gcroot
@@ -59,9 +84,9 @@ def user_history_file() -> Path:
return user_config_dir() / "clan" / "history"
def vm_state_dir(clan_name: str, flake_url: str, vm_name: str) -> Path:
clan_key = clan_key_safe(clan_name, flake_url)
return user_config_dir() / "clan" / "vmstate" / clan_key / vm_name
def vm_state_dir(flake_url: str, vm_name: str) -> Path:
clan_key = clan_key_safe(flake_url)
return user_data_dir() / "clan" / "vmstate" / clan_key / vm_name
def machines_dir(flake_dir: Path) -> Path:

View File

@@ -0,0 +1,21 @@
# !/usr/bin/env python3
import argparse
from .check import register_check_parser
from .list import register_list_parser
# takes a (sub)parser and configures it
def register_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
check_parser = subparser.add_parser("check", help="check if facts are up to date")
register_check_parser(check_parser)
list_parser = subparser.add_parser("list", help="list all facts")
register_list_parser(list_parser)

View File

@@ -0,0 +1,38 @@
import argparse
import importlib
import logging
from ..machines.machines import Machine
log = logging.getLogger(__name__)
def check_facts(machine: Machine) -> bool:
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
existing_facts = fact_store.get_all()
missing_facts = []
for service in machine.secrets_data:
for fact in machine.secrets_data[service]["facts"]:
if fact not in existing_facts.get(service, {}):
log.info(f"Fact {fact} for service {service} is missing")
missing_facts.append((service, fact))
if missing_facts:
return False
return True
def check_command(args: argparse.Namespace) -> None:
machine = Machine(name=args.machine, flake=args.flake)
if check_facts(machine):
print("All facts are present")
def register_check_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to check facts for",
)
parser.set_defaults(func=check_command)

View File

@@ -0,0 +1,36 @@
import argparse
import importlib
import json
import logging
from ..machines.machines import Machine
log = logging.getLogger(__name__)
def get_all_facts(machine: Machine) -> dict:
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
# for service in machine.secrets_data:
# facts[service] = {}
# for fact in machine.secrets_data[service]["facts"]:
# fact_content = fact_store.get(service, fact)
# if fact_content:
# facts[service][fact] = fact_content.decode()
# else:
# log.error(f"Fact {fact} for service {service} is missing")
return fact_store.get_all()
def get_command(args: argparse.Namespace) -> None:
machine = Machine(name=args.machine, flake=args.flake)
print(json.dumps(get_all_facts(machine), indent=4))
def register_list_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to print facts for",
)
parser.set_defaults(func=get_command)

View File

@@ -0,0 +1,47 @@
from pathlib import Path
from clan_cli.errors import ClanError
from clan_cli.machines.machines import Machine
class FactStore:
def __init__(self, machine: Machine) -> None:
self.machine = machine
self.works_remotely = False
def set(self, _service: str, name: str, value: bytes) -> Path | None:
if isinstance(self.machine.flake, Path):
fact_path = (
self.machine.flake / "machines" / self.machine.name / "facts" / name
)
fact_path.parent.mkdir(parents=True, exist_ok=True)
fact_path.touch()
fact_path.write_bytes(value)
return fact_path
else:
raise ClanError(
f"in_flake fact storage is only supported for local flakes: {self.machine.flake}"
)
def exists(self, _service: str, name: str) -> bool:
fact_path = (
self.machine.flake_dir / "machines" / self.machine.name / "facts" / name
)
return fact_path.exists()
# get a single fact
def get(self, _service: str, name: str) -> bytes:
fact_path = (
self.machine.flake_dir / "machines" / self.machine.name / "facts" / name
)
return fact_path.read_bytes()
# get all facts
def get_all(self) -> dict[str, dict[str, bytes]]:
facts_folder = self.machine.flake_dir / "machines" / self.machine.name / "facts"
facts: dict[str, dict[str, bytes]] = {}
facts["TODO"] = {}
if facts_folder.exists():
for fact_path in facts_folder.iterdir():
facts["TODO"][fact_path.name] = fact_path.read_bytes()
return facts

View File

@@ -0,0 +1,44 @@
import logging
from pathlib import Path
from clan_cli.dirs import vm_state_dir
from clan_cli.errors import ClanError
from clan_cli.machines.machines import Machine
log = logging.getLogger(__name__)
class FactStore:
def __init__(self, machine: Machine) -> None:
self.machine = machine
self.works_remotely = False
self.dir = vm_state_dir(str(machine.flake), machine.name) / "facts"
log.debug(f"FactStore initialized with dir {self.dir}")
def exists(self, service: str, name: str) -> bool:
fact_path = self.dir / service / name
return fact_path.exists()
def set(self, service: str, name: str, value: bytes) -> Path | None:
fact_path = self.dir / service / name
fact_path.parent.mkdir(parents=True, exist_ok=True)
fact_path.write_bytes(value)
return None
# get a single fact
def get(self, service: str, name: str) -> bytes:
fact_path = self.dir / service / name
if fact_path.exists():
return fact_path.read_bytes()
raise ClanError(f"Fact {name} for service {service} not found")
# get all facts
def get_all(self) -> dict[str, dict[str, bytes]]:
facts: dict[str, dict[str, bytes]] = {}
if self.dir.exists():
for service in self.dir.iterdir():
facts[service.name] = {}
for fact in service.iterdir():
facts[service.name][fact.name] = fact.read_bytes()
return facts

View File

@@ -7,7 +7,7 @@ from ..dirs import machine_gcroot
from ..errors import ClanError
from ..machines.list import list_machines
from ..machines.machines import Machine
from ..nix import nix_build, nix_config, nix_eval, nix_metadata
from ..nix import nix_add_to_gcroots, nix_build, nix_config, nix_eval, nix_metadata
from ..vms.inspect import VmConfig, inspect_vm
@@ -24,6 +24,10 @@ class FlakeConfig:
revision: str | None
vm: VmConfig
def __post_init__(self) -> None:
if isinstance(self.vm, dict):
self.vm = VmConfig(**self.vm)
def run_cmd(cmd: list[str]) -> str:
proc = run(cmd)
@@ -44,6 +48,11 @@ def inspect_flake(flake_url: str | Path, machine_name: str) -> FlakeConfig:
machine = Machine(machine_name, flake_url)
vm = inspect_vm(machine)
# Make symlink to gcroots from vm.machine_icon
if vm.machine_icon:
gcroot_icon: Path = machine_gcroot(flake_url=str(flake_url)) / vm.machine_name
nix_add_to_gcroots(vm.machine_icon, gcroot_icon)
# Get the cLAN name
cmd = nix_eval(
[
@@ -71,7 +80,7 @@ def inspect_flake(flake_url: str | Path, machine_name: str) -> FlakeConfig:
[
f'{flake_url}#clanInternals.machines."{system}"."{machine_name}".config.clanCore.clanIcon'
],
machine_gcroot(clan_name=clan_name, flake_url=str(flake_url)) / "clanIcon",
machine_gcroot(flake_url=str(flake_url)) / "clanIcon",
)
run_cmd(cmd)

View File

@@ -0,0 +1,62 @@
import argparse
import importlib
import logging
from dataclasses import dataclass
from pathlib import Path
from tempfile import TemporaryDirectory
from .machines.machines import Machine
from .secrets.generate import generate_secrets
log = logging.getLogger(__name__)
def flash_machine(machine: Machine, device: str | None = None) -> None:
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
generate_secrets(machine)
with TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
upload_dir_ = machine.secrets_upload_directory
if upload_dir_.startswith("/"):
upload_dir_ = upload_dir_[1:]
upload_dir = tmpdir / upload_dir_
upload_dir.mkdir(parents=True)
secret_store.upload(upload_dir)
fs_image = machine.build_nix("config.system.clan.iso")
print(fs_image)
@dataclass
class FlashOptions:
flake: Path
machine: str
device: str | None
def flash_command(args: argparse.Namespace) -> None:
opts = FlashOptions(
flake=args.flake,
machine=args.machine,
device=args.device,
)
machine = Machine(opts.machine, flake=opts.flake)
flash_machine(machine, device=opts.device)
def register_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
type=str,
help="machine to install",
)
parser.add_argument(
"--device",
type=str,
help="device to flash the system to",
)
parser.set_defaults(func=flash_command)

View File

@@ -7,29 +7,49 @@ from clan_cli.nix import nix_shell
from .cmd import Log, run
# generic vcs agnostic commit function
def commit_file(
file_path: Path,
repo_dir: Path,
commit_message: str | None = None,
) -> None:
"""Commit a file to a git repository.
:param file_path: The path to the file to commit.
:param repo_dir: The path to the git repository.
:param commit_message: The commit message.
:raises ClanError: If the file is not in the git repository.
"""
commit_files([file_path], repo_dir, commit_message)
# generic vcs agnostic commit function
def commit_files(
file_paths: list[Path],
repo_dir: Path,
commit_message: str | None = None,
) -> None:
# check that the file is in the git repository and exists
if not Path(file_path).resolve().is_relative_to(repo_dir.resolve()):
raise ClanError(f"File {file_path} is not in the git repository {repo_dir}")
if not file_path.exists():
raise ClanError(f"File {file_path} does not exist")
for file_path in file_paths:
if not Path(file_path).resolve().is_relative_to(repo_dir.resolve()):
raise ClanError(f"File {file_path} is not in the git repository {repo_dir}")
if not file_path.exists():
raise ClanError(f"File {file_path} does not exist")
# generate commit message if not provided
if commit_message is None:
# ensure that mentioned file path is relative to repo
commit_message = f"Add {file_path.relative_to(repo_dir)}"
commit_message = ""
for file_path in file_paths:
# ensure that mentioned file path is relative to repo
commit_message += f"Add {file_path.relative_to(repo_dir)}"
# check if the repo is a git repo and commit
if (repo_dir / ".git").exists():
_commit_file_to_git(repo_dir, file_path, commit_message)
_commit_file_to_git(repo_dir, file_paths, commit_message)
else:
return
def _commit_file_to_git(repo_dir: Path, file_path: Path, commit_message: str) -> None:
def _commit_file_to_git(
repo_dir: Path, file_paths: list[Path], commit_message: str
) -> None:
"""Commit a file to a git repository.
:param repo_dir: The path to the git repository.
@@ -37,18 +57,20 @@ def _commit_file_to_git(repo_dir: Path, file_path: Path, commit_message: str) ->
:param commit_message: The commit message.
:raises ClanError: If the file is not in the git repository.
"""
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "add", str(file_path)],
)
# add the file to the git index
for file_path in file_paths:
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "add", str(file_path)],
)
# add the file to the git index
run(cmd, log=Log.BOTH, error_msg=f"Failed to add {file_path} file to git index")
run(cmd, log=Log.BOTH, error_msg=f"Failed to add {file_path} file to git index")
# check if there is a diff
cmd = nix_shell(
["nixpkgs#git"],
["git", "-C", str(repo_dir), "diff", "--cached", "--exit-code", str(file_path)],
["git", "-C", str(repo_dir), "diff", "--cached", "--exit-code"]
+ [str(file_path) for file_path in file_paths],
)
result = run(cmd, check=False, cwd=repo_dir)
# if there is no diff, return
@@ -65,8 +87,8 @@ def _commit_file_to_git(repo_dir: Path, file_path: Path, commit_message: str) ->
"commit",
"-m",
commit_message,
str(file_path.relative_to(repo_dir)),
],
]
+ [str(file_path) for file_path in file_paths],
)
run(cmd, error_msg=f"Failed to commit {file_path} to git repository {repo_dir}")
run(cmd, error_msg=f"Failed to commit {file_paths} to git repository {repo_dir}")

View File

@@ -7,6 +7,7 @@ import logging
from typing import Any
from clan_cli.flakes.inspect import FlakeConfig, inspect_flake
from clan_cli.machines.list import list_machines
from ..clan_uri import ClanURI
from ..dirs import user_history_file
@@ -66,8 +67,8 @@ def list_history() -> list[HistoryEntry]:
return logs
def new_history_entry(uri: ClanURI) -> HistoryEntry:
flake = inspect_flake(uri.get_internal(), uri.params.flake_attr)
def new_history_entry(url: str, machine: str) -> HistoryEntry:
flake = inspect_flake(url, machine)
flake.flake_url = str(flake.flake_url)
return HistoryEntry(
flake=flake,
@@ -75,12 +76,24 @@ def new_history_entry(uri: ClanURI) -> HistoryEntry:
)
def add_history(uri: ClanURI) -> list[HistoryEntry]:
def add_history(uri: ClanURI, *, all_machines: bool) -> list[HistoryEntry]:
user_history_file().parent.mkdir(parents=True, exist_ok=True)
logs = list_history()
history = list_history()
if not all_machines:
add_maschine_to_history(uri.get_internal(), uri.params.flake_attr, history)
if all_machines:
for machine in list_machines(uri.get_internal()):
add_maschine_to_history(uri.get_internal(), machine, history)
write_history_file(history)
return history
def add_maschine_to_history(
uri_path: str, uri_machine: str, logs: list[HistoryEntry]
) -> None:
found = False
uri_path = uri.get_internal()
uri_machine = uri.params.flake_attr
for entry in logs:
if (
@@ -91,16 +104,12 @@ def add_history(uri: ClanURI) -> list[HistoryEntry]:
entry.last_used = datetime.datetime.now().isoformat()
if not found:
history = new_history_entry(uri)
history = new_history_entry(uri_path, uri_machine)
logs.append(history)
write_history_file(logs)
return logs
def add_history_command(args: argparse.Namespace) -> None:
add_history(args.uri)
add_history(args.uri, all_machines=args.all)
# takes a (sub)parser and configures it
@@ -108,4 +117,7 @@ def register_add_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"uri", type=ClanURI.from_str, help="Path to the flake", default="."
)
parser.add_argument(
"--all", help="Add all machines", default=False, action="store_true"
)
parser.set_defaults(func=add_history_command)

View File

@@ -1,11 +1,14 @@
# !/usr/bin/env python3
import argparse
import datetime
from clan_cli.flakes.inspect import inspect_flake
from ..clan_uri import ClanParameters, ClanURI
from ..errors import ClanCmdError
from ..locked_open import write_history_file
from ..nix import nix_metadata
from .add import HistoryEntry, list_history, new_history_entry
from .add import HistoryEntry, list_history
def update_history() -> list[HistoryEntry]:
@@ -27,7 +30,11 @@ def update_history() -> list[HistoryEntry]:
url=str(entry.flake.flake_url),
params=ClanParameters(entry.flake.flake_attr),
)
entry = new_history_entry(uri)
flake = inspect_flake(uri.get_internal(), uri.params.flake_attr)
flake.flake_url = str(flake.flake_url)
entry = HistoryEntry(
flake=flake, last_used=datetime.datetime.now().isoformat()
)
write_history_file(logs)
return logs

View File

@@ -14,19 +14,15 @@ log = logging.getLogger(__name__)
def install_nixos(machine: Machine, kexec: str | None = None) -> None:
log.info(f"deployment address1: {machine.deployment_info['deploymentAddress']}")
secrets_module = importlib.import_module(machine.secrets_module)
log.info(f"installing {machine.name}")
log.info(f"using secret store: {secrets_module.SecretStore}")
secret_store = secrets_module.SecretStore(machine=machine)
h = machine.host
log.info(f"deployment address2: {machine.deployment_info['deploymentAddress']}")
h = machine.target_host
target_host = f"{h.user or 'root'}@{h.host}"
log.info(f"target host: {target_host}")
flake_attr = h.meta.get("flake_attr", "")
generate_secrets(machine)
with TemporaryDirectory() as tmpdir_:
@@ -42,7 +38,7 @@ def install_nixos(machine: Machine, kexec: str | None = None) -> None:
cmd = [
"nixos-anywhere",
"-f",
f"{machine.flake}#{flake_attr}",
f"{machine.flake}#{machine.name}",
"-t",
"--no-reboot",
"--extra-files",
@@ -77,10 +73,7 @@ def install_command(args: argparse.Namespace) -> None:
kexec=args.kexec,
)
machine = Machine(opts.machine, flake=opts.flake)
machine.get_deployment_info()
machine.deployment_info["deploymentAddress"] = opts.target_host
log.info(f"target host: {opts.target_host}")
log.info(f"deployment address: {machine.deployment_info['deploymentAddress']}")
machine.target_host_address = opts.target_host
install_nixos(machine, kexec=opts.kexec)

View File

@@ -1,14 +1,48 @@
import json
import logging
from collections.abc import Generator
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from clan_cli.dirs import vm_state_dir
from qemu.qmp import QEMUMonitorProtocol
from ..cmd import run
from ..errors import ClanError
from ..nix import nix_build, nix_config, nix_eval, nix_metadata
from ..ssh import Host, parse_deployment_address
log = logging.getLogger(__name__)
class VMAttr:
def __init__(self, state_dir: Path) -> None:
# These sockets here are just symlinks to the real sockets which
# are created by the run.py file. The reason being that we run into
# file path length issues on Linux. If no qemu process is running
# the symlink will be dangling.
self._qmp_socket: Path = state_dir / "qmp.sock"
self._qga_socket: Path = state_dir / "qga.sock"
self._qmp: QEMUMonitorProtocol | None = None
@contextmanager
def qmp_ctx(self) -> Generator[QEMUMonitorProtocol, None, None]:
if self._qmp is None:
log.debug(f"qmp_socket: {self._qmp_socket}")
rpath = self._qmp_socket.resolve()
if not rpath.exists():
raise ClanError(
f"qmp socket {rpath} does not exist. Is the VM running?"
)
self._qmp = QEMUMonitorProtocol(str(rpath))
self._qmp.connect()
try:
yield self._qmp
finally:
self._qmp.close()
class Machine:
def __init__(
self,
@@ -28,46 +62,63 @@ class Machine:
self.eval_cache: dict[str, str] = {}
self.build_cache: dict[str, Path] = {}
if deployment_info is not None:
self.deployment_info = deployment_info
self._deployment_info: None | dict[str, str] = deployment_info
def get_deployment_info(self) -> None:
self.deployment_info = json.loads(
self.build_nix("config.system.clan.deployment.file").read_text()
)
print(f"self_deployment_info: {self.deployment_info}")
state_dir = vm_state_dir(flake_url=str(self.flake), vm_name=self.name)
self.vm: VMAttr = VMAttr(state_dir)
def __str__(self) -> str:
return f"Machine(name={self.name}, flake={self.flake})"
def __repr__(self) -> str:
return str(self)
@property
def deployment_address(self) -> str:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
return self.deployment_info["deploymentAddress"]
def deployment_info(self) -> dict[str, str]:
if self._deployment_info is not None:
return self._deployment_info
self._deployment_info = json.loads(
self.build_nix("config.system.clan.deployment.file").read_text()
)
return self._deployment_info
@property
def target_host_address(self) -> str:
# deploymentAddress is deprecated.
val = self.deployment_info.get("targetHost") or self.deployment_info.get(
"deploymentAddress"
)
if val is None:
msg = f"the 'clan.networking.targetHost' nixos option is not set for machine '{self.name}'"
raise ClanError(msg)
return val
@target_host_address.setter
def target_host_address(self, value: str) -> None:
self.deployment_info["targetHost"] = value
@property
def secrets_module(self) -> str:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
print(f"self_deployment_info2: {self.deployment_info}")
return self.deployment_info["secretsModule"]
@property
def facts_module(self) -> str:
return self.deployment_info["factsModule"]
@property
def secrets_data(self) -> dict:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
if self.deployment_info["secretsData"]:
try:
return json.loads(Path(self.deployment_info["secretsData"]).read_text())
except json.JSONDecodeError:
log.error(
except json.JSONDecodeError as e:
raise ClanError(
f"Failed to parse secretsData for machine {self.name} as json"
)
return {}
) from e
return {}
@property
def secrets_upload_directory(self) -> str:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
return self.deployment_info["secretsUploadDirectory"]
@property
@@ -82,60 +133,152 @@ class Machine:
return Path(self.flake_path)
@property
def host(self) -> Host:
def target_host(self) -> Host:
return parse_deployment_address(
self.name, self.deployment_address, meta={"machine": self}
self.name, self.target_host_address, meta={"machine": self}
)
def eval_nix(self, attr: str, refresh: bool = False) -> str:
@property
def build_host(self) -> Host:
"""
eval a nix attribute of the machine
@attr: the attribute to get
The host where the machine is built and deployed from.
Can be the same as the target host.
"""
build_host = self.deployment_info.get("buildHost")
if build_host is None:
return self.target_host
# enable ssh agent forwarding to allow the build host to access the target host
return parse_deployment_address(
self.name,
build_host,
forward_agent=True,
meta={"machine": self, "target_host": self.target_host},
)
def nix(
self,
method: str,
attr: str,
extra_config: None | dict = None,
impure: bool = False,
nix_options: list[str] = [],
) -> str | Path:
"""
Build the machine and return the path to the result
accepts a secret store and a facts store # TODO
"""
config = nix_config()
system = config["system"]
attr = f'clanInternals.machines."{system}".{self.name}.{attr}'
print(f"attr: {attr}")
with NamedTemporaryFile(mode="w") as config_json:
if extra_config is not None:
json.dump(extra_config, config_json, indent=2)
else:
json.dump({}, config_json)
config_json.flush()
if attr in self.eval_cache and not refresh:
nar_hash = json.loads(
run(
nix_eval(
[
"--impure",
"--expr",
f'(builtins.fetchTree {{ type = "file"; url = "file://{config_json.name}"; }}).narHash',
]
)
).stdout.strip()
)
args = []
# get git commit from flake
if extra_config is not None:
metadata = nix_metadata(self.flake_dir)
url = metadata["url"]
if "dirtyRevision" in metadata:
# if not impure:
# raise ClanError(
# "The machine has a dirty revision, and impure mode is not allowed"
# )
# else:
# args += ["--impure"]
args += ["--impure"]
args += [
"--expr",
f"""
((builtins.getFlake "{url}").clanInternals.machinesFunc."{system}"."{self.name}" {{
extraConfig = builtins.fromJSON (builtins.readFile (builtins.fetchTree {{
type = "file";
url = if (builtins.compareVersions builtins.nixVersion "2.19") == -1 then "{config_json.name}" else "file:{config_json.name}";
narHash = "{nar_hash}";
}}));
}}).{attr}
""",
]
else:
if isinstance(self.flake, Path):
if (self.flake / ".git").exists():
flake = f"git+file://{self.flake}"
else:
flake = f"path:{self.flake}"
else:
flake = self.flake
args += [
f'{flake}#clanInternals.machines."{system}".{self.name}.{attr}',
*nix_options,
]
if method == "eval":
output = run(nix_eval(args)).stdout.strip()
return output
elif method == "build":
outpath = run(nix_build(args)).stdout.strip()
return Path(outpath)
else:
raise ValueError(f"Unknown method {method}")
def eval_nix(
self,
attr: str,
refresh: bool = False,
extra_config: None | dict = None,
impure: bool = False,
nix_options: list[str] = [],
) -> str:
"""
eval a nix attribute of the machine
@attr: the attribute to get
"""
if attr in self.eval_cache and not refresh and extra_config is None:
return self.eval_cache[attr]
if isinstance(self.flake, Path):
if (self.flake / ".git").exists():
flake = f"git+file://{self.flake}"
else:
flake = f"path:{self.flake}"
output = self.nix("eval", attr, extra_config, impure, nix_options)
if isinstance(output, str):
self.eval_cache[attr] = output
return output
else:
flake = self.flake
raise ClanError("eval_nix returned not a string")
print(f"evaluating {flake}#{attr}")
cmd = nix_eval([f"{flake}#{attr}"])
print(f"cmd: {cmd}")
output = run(cmd).stdout.strip()
self.eval_cache[attr] = output
return output
def build_nix(self, attr: str, refresh: bool = False) -> Path:
def build_nix(
self,
attr: str,
refresh: bool = False,
extra_config: None | dict = None,
impure: bool = False,
nix_options: list[str] = [],
) -> Path:
"""
build a nix attribute of the machine
@attr: the attribute to get
"""
config = nix_config()
system = config["system"]
attr = f'clanInternals.machines."{system}".{self.name}.{attr}'
if attr in self.build_cache and not refresh:
if attr in self.build_cache and not refresh and extra_config is None:
return self.build_cache[attr]
if isinstance(self.flake, Path):
flake = f"path:{self.flake}"
output = self.nix("build", attr, extra_config, impure, nix_options)
if isinstance(output, Path):
self.build_cache[attr] = output
return output
else:
flake = self.flake
log.info(f"building {flake}#{attr}")
outpath = run(nix_build([f"{flake}#{attr}"])).stdout.strip()
self.build_cache[attr] = Path(outpath)
return Path(outpath)
raise ClanError("build_nix returned not a Path")

View File

@@ -1,19 +1,92 @@
import argparse
import json
import logging
import os
import shlex
import subprocess
import sys
from pathlib import Path
from ..cmd import run
from ..errors import ClanError
from ..machines.machines import Machine
from ..nix import nix_build, nix_command, nix_config
from ..nix import nix_build, nix_command, nix_config, nix_metadata
from ..secrets.generate import generate_secrets
from ..secrets.upload import upload_secrets
from ..ssh import Host, HostGroup, HostKeyCheck, parse_deployment_address
log = logging.getLogger(__name__)
def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
def is_path_input(node: dict[str, dict[str, str]]) -> bool:
locked = node.get("locked")
if not locked:
return False
return locked["type"] == "path" or locked.get("url", "").startswith("file://")
def upload_sources(
flake_url: str, remote_url: str, always_upload_source: bool = False
) -> str:
if not always_upload_source:
flake_data = nix_metadata(flake_url)
url = flake_data["resolvedUrl"]
has_path_inputs = any(
is_path_input(node) for node in flake_data["locks"]["nodes"].values()
)
if not has_path_inputs and not is_path_input(flake_data):
# No need to upload sources, we can just build the flake url directly
# FIXME: this might fail for private repositories?
return url
if not has_path_inputs:
# Just copy the flake to the remote machine, we can substitute other inputs there.
path = flake_data["path"]
env = os.environ.copy()
# env["NIX_SSHOPTS"] = " ".join(opts.remote_ssh_options)
assert remote_url
cmd = nix_command(
[
"copy",
"--to",
f"ssh://{remote_url}",
"--no-check-sigs",
path,
]
)
proc = subprocess.run(cmd, stdout=subprocess.PIPE, env=env, check=False)
if proc.returncode != 0:
raise ClanError(
f"failed to upload sources: {shlex.join(cmd)} failed with {proc.returncode}"
)
return path
# Slow path: we need to upload all sources to the remote machine
assert remote_url
cmd = nix_command(
[
"flake",
"archive",
"--to",
f"ssh://{remote_url}",
"--json",
flake_url,
]
)
log.info("run %s", shlex.join(cmd))
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=False)
if proc.returncode != 0:
raise ClanError(
f"failed to upload sources: {shlex.join(cmd)} failed with {proc.returncode}"
)
try:
return json.loads(proc.stdout)["path"]
except (json.JSONDecodeError, OSError) as e:
raise ClanError(
f"failed to parse output of {shlex.join(cmd)}: {e}\nGot: {proc.stdout.decode('utf-8', 'replace')}"
)
def deploy_nixos(hosts: HostGroup) -> None:
"""
Deploy to all hosts in parallel
"""
@@ -23,14 +96,7 @@ def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
ssh_arg = f"-p {h.port}" if h.port else ""
env = os.environ.copy()
env["NIX_SSHOPTS"] = ssh_arg
res = h.run_local(
nix_command(["flake", "archive", "--to", f"ssh://{target}", "--json"]),
check=True,
stdout=subprocess.PIPE,
extra_env=env,
)
data = json.loads(res.stdout)
path = data["path"]
path = upload_sources(".", target)
if h.host_key_check != HostKeyCheck.STRICT:
ssh_arg += " -o StrictHostKeyChecking=no"
@@ -39,16 +105,11 @@ def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
ssh_arg += " -i " + h.key if h.key else ""
flake_attr = h.meta.get("flake_attr", "")
machine: Machine = h.meta["machine"]
generate_secrets(h.meta["machine"])
upload_secrets(h.meta["machine"])
generate_secrets(machine)
upload_secrets(machine)
target_host = h.meta.get("target_host")
if target_host:
target_user = h.meta.get("target_user")
if target_user:
target_host = f"{target_user}@{target_host}"
extra_args = h.meta.get("extra_args", [])
cmd = [
"nixos-rebuild",
@@ -64,9 +125,10 @@ def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
"--build-host",
"",
"--flake",
f"{path}#{flake_attr}",
f"{path}#{machine.name}",
]
if target_host:
if target_host := h.meta.get("target_host"):
target_host = f"{target_host.user or 'root'}@{target_host.host}"
cmd.extend(["--target-host", target_host])
ret = h.run(cmd, check=False)
# re-retry switch if the first time fails
@@ -87,18 +149,25 @@ def get_all_machines(clan_dir: Path) -> HostGroup:
machines = json.loads(Path(machines_json.rstrip()).read_text())
hosts = []
ignored_machines = []
for name, machine_data in machines.items():
# very hacky. would be better to do a MachinesGroup instead
host = parse_deployment_address(
name,
machine_data["deploymentAddress"],
meta={
"machine": Machine(
name=name, flake=clan_dir, deployment_info=machine_data
)
},
if machine_data.get("requireExplicitUpdate", False):
continue
machine = Machine(name=name, flake=clan_dir, deployment_info=machine_data)
try:
hosts.append(machine.build_host)
except ClanError:
ignored_machines.append(name)
continue
if not hosts and ignored_machines != []:
print(
"WARNING: No machines to update. The following defined machines were ignored because they do not have `clan.networking.targetHost` nixos option set:",
file=sys.stderr,
)
hosts.append(host)
for machine in ignored_machines:
print(machine, file=sys.stderr)
# very hacky. would be better to do a MachinesGroup instead
return HostGroup(hosts)
@@ -106,7 +175,7 @@ def get_selected_machines(machine_names: list[str], flake_dir: Path) -> HostGrou
hosts = []
for name in machine_names:
machine = Machine(name=name, flake=flake_dir)
hosts.append(machine.host)
hosts.append(machine.build_host)
return HostGroup(hosts)
@@ -116,7 +185,7 @@ def update(args: argparse.Namespace) -> None:
raise ClanError("Could not find clan flake toplevel directory")
if len(args.machines) == 1 and args.target_host is not None:
machine = Machine(name=args.machines[0], flake=args.flake)
machine.deployment_info["deploymentAddress"] = args.target_host
machine.target_host_address = args.target_host
host = parse_deployment_address(
args.machines[0],
args.target_host,
@@ -133,7 +202,7 @@ def update(args: argparse.Namespace) -> None:
else:
machines = get_selected_machines(args.machines, args.flake)
deploy_nixos(machines, args.flake)
deploy_nixos(machines)
def register_update_parser(parser: argparse.ArgumentParser) -> None:

View File

@@ -53,6 +53,11 @@ def nix_build(flags: list[str], gcroot: Path | None = None) -> list[str]:
)
def nix_add_to_gcroots(nix_path: Path, dest: Path) -> None:
cmd = ["nix-store", "--realise", f"{nix_path}", "--add-root", f"{dest}"]
run(cmd)
def nix_config() -> dict[str, Any]:
cmd = nix_command(["show-config", "--json"])
proc = run(cmd)

View File

@@ -1,6 +1,7 @@
# !/usr/bin/env python3
import argparse
from .check import register_check_parser
from .generate import register_generate_parser
from .groups import register_groups_parser
from .import_sops import register_import_sops_parser
@@ -32,6 +33,9 @@ def register_parser(parser: argparse.ArgumentParser) -> None:
import_sops_parser = subparser.add_parser("import-sops", help="import a sops file")
register_import_sops_parser(import_sops_parser)
check_parser = subparser.add_parser("check", help="check if secrets are up to date")
register_check_parser(check_parser)
parser_generate = subparser.add_parser(
"generate", help="generate secrets for machines if they don't exist yet"
)

View File

@@ -0,0 +1,46 @@
import argparse
import importlib
import logging
from ..machines.machines import Machine
log = logging.getLogger(__name__)
def check_secrets(machine: Machine) -> bool:
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
missing_secrets = []
missing_facts = []
for service in machine.secrets_data:
for secret in machine.secrets_data[service]["secrets"]:
if not secret_store.exists(service, secret):
log.info(f"Secret {secret} for service {service} is missing")
missing_secrets.append((service, secret))
for fact in machine.secrets_data[service]["facts"]:
if not fact_store.exists(service, fact):
log.info(f"Fact {fact} for service {service} is missing")
missing_facts.append((service, fact))
log.debug(f"missing_secrets: {missing_secrets}")
log.debug(f"missing_facts: {missing_facts}")
if missing_secrets or missing_facts:
return False
return True
def check_command(args: argparse.Namespace) -> None:
machine = Machine(name=args.machine, flake=args.flake)
check_secrets(machine)
def register_check_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to check secrets for",
)
parser.set_defaults(func=check_command)

View File

@@ -2,15 +2,16 @@ import argparse
import importlib
import logging
import os
import shutil
from pathlib import Path
from tempfile import TemporaryDirectory
from clan_cli.cmd import run
from ..errors import ClanError
from ..git import commit_files
from ..machines.machines import Machine
from ..nix import nix_shell
from .check import check_secrets
log = logging.getLogger(__name__)
@@ -19,22 +20,20 @@ def generate_secrets(machine: Machine) -> None:
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
with TemporaryDirectory() as d:
for service in machine.secrets_data:
print(service)
tmpdir = Path(d) / service
# check if all secrets exist and generate them if at least one is missing
needs_regeneration = any(
not secret_store.exists(service, secret)
for secret in machine.secrets_data[service]["secrets"]
) or any(
not (machine.flake / fact).exists()
for fact in machine.secrets_data[service]["facts"].values()
)
for fact in machine.secrets_data[service]["facts"].values():
if not (machine.flake / fact).exists():
print(f"fact {fact} is missing")
needs_regeneration = not check_secrets(machine)
log.debug(f"{service} needs_regeneration: {needs_regeneration}")
if needs_regeneration:
if not isinstance(machine.flake, Path):
msg = f"flake is not a Path: {machine.flake}"
msg += "fact/secret generation is only supported for local flakes"
env = os.environ.copy()
facts_dir = tmpdir / "facts"
facts_dir.mkdir(parents=True)
@@ -56,6 +55,8 @@ def generate_secrets(machine: Machine) -> None:
"--bind", str(facts_dir), str(facts_dir),
"--bind", str(secrets_dir), str(secrets_dir),
"--unshare-all",
"--unshare-user",
"--uid", "1000",
"--",
"bash", "-c", machine.secrets_data[service]["generator"]
],
@@ -65,6 +66,7 @@ def generate_secrets(machine: Machine) -> None:
cmd,
env=env,
)
files_to_commit = []
# store secrets
for secret in machine.secrets_data[service]["secrets"]:
secret_file = secrets_dir / secret
@@ -72,17 +74,27 @@ def generate_secrets(machine: Machine) -> None:
msg = f"did not generate a file for '{secret}' when running the following command:\n"
msg += machine.secrets_data[service]["generator"]
raise ClanError(msg)
secret_store.set(service, secret, secret_file.read_text())
secret_path = secret_store.set(
service, secret, secret_file.read_bytes()
)
if secret_path:
files_to_commit.append(secret_path)
# store facts
for name, fact_path in machine.secrets_data[service]["facts"].items():
for name in machine.secrets_data[service]["facts"]:
fact_file = facts_dir / name
if not fact_file.is_file():
msg = f"did not generate a file for '{name}' when running the following command:\n"
msg += machine.secrets_data[service]["generator"]
raise ClanError(msg)
fact_path = machine.flake / fact_path
fact_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(fact_file, fact_path)
fact_file = fact_store.set(service, name, fact_file.read_bytes())
if fact_file:
files_to_commit.append(fact_file)
commit_files(
files_to_commit,
machine.flake_dir,
f"Update facts/secrets for service {service} in machine {machine.name}",
)
print("successfully generated secrets")

View File

@@ -10,17 +10,18 @@ class SecretStore:
def __init__(self, machine: Machine) -> None:
self.machine = machine
def set(self, service: str, name: str, value: str) -> None:
def set(self, _service: str, name: str, value: bytes) -> Path | None:
subprocess.run(
nix_shell(
["nixpkgs#pass"],
["pass", "insert", "-m", f"machines/{self.machine.name}/{name}"],
),
input=value.encode("utf-8"),
input=value,
check=True,
)
return None # we manage the files outside of the git repo
def get(self, service: str, name: str) -> bytes:
def get(self, _service: str, name: str) -> bytes:
return subprocess.run(
nix_shell(
["nixpkgs#pass"],
@@ -30,12 +31,11 @@ class SecretStore:
stdout=subprocess.PIPE,
).stdout
def exists(self, service: str, name: str) -> bool:
def exists(self, _service: str, name: str) -> bool:
password_store = os.environ.get(
"PASSWORD_STORE_DIR", f"{os.environ['HOME']}/.password-store"
)
secret_path = Path(password_store) / f"machines/{self.machine.name}/{name}.gpg"
print(f"checking {secret_path}")
return secret_path.exists()
def generate_hash(self) -> bytes:
@@ -86,7 +86,7 @@ class SecretStore:
def update_check(self) -> bool:
local_hash = self.generate_hash()
remote_hash = self.machine.host.run(
remote_hash = self.machine.target_host.run(
# TODO get the path to the secrets from the machine
["cat", f"{self.machine.secrets_upload_directory}/.pass_info"],
check=False,

View File

@@ -28,13 +28,17 @@ class SecretStore:
)
add_machine(self.machine.flake_dir, self.machine.name, pub_key, False)
def set(self, _service: str, name: str, value: str) -> None:
def set(self, _service: str, name: str, value: bytes) -> Path | None:
path = (
sops_secrets_folder(self.machine.flake_dir) / f"{self.machine.name}-{name}"
)
encrypt_secret(
self.machine.flake_dir,
sops_secrets_folder(self.machine.flake_dir) / f"{self.machine.name}-{name}",
value,
path,
value.decode(),
add_machines=[self.machine.name],
)
return path
def get(self, _service: str, _name: str) -> bytes:
raise NotImplementedError()

View File

@@ -0,0 +1,31 @@
import os
import shutil
from pathlib import Path
from clan_cli.dirs import vm_state_dir
from clan_cli.machines.machines import Machine
class SecretStore:
def __init__(self, machine: Machine) -> None:
self.machine = machine
self.dir = vm_state_dir(str(machine.flake), machine.name) / "secrets"
self.dir.mkdir(parents=True, exist_ok=True)
def set(self, service: str, name: str, value: bytes) -> Path | None:
secret_file = self.dir / service / name
secret_file.parent.mkdir(parents=True, exist_ok=True)
secret_file.write_bytes(value)
return None # we manage the files outside of the git repo
def get(self, service: str, name: str) -> bytes:
secret_file = self.dir / service / name
return secret_file.read_bytes()
def exists(self, service: str, name: str) -> bool:
return (self.dir / service / name).exists()
def upload(self, output_dir: Path) -> None:
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
shutil.copytree(self.dir, output_dir)

View File

@@ -22,7 +22,7 @@ def upload_secrets(machine: Machine) -> None:
return
with TemporaryDirectory() as tempdir:
secret_store.upload(Path(tempdir))
host = machine.host
host = machine.target_host
ssh_cmd = host.ssh_cmd()
run(

View File

@@ -16,14 +16,7 @@ from enum import Enum
from pathlib import Path
from shlex import quote
from threading import Thread
from typing import (
IO,
Any,
Generic,
Literal,
TypeVar,
overload,
)
from typing import IO, Any, Generic, TypeVar
# https://no-color.org
DISABLE_COLOR = not sys.stderr.isatty() or os.environ.get("NO_COLOR", "") != ""
@@ -755,7 +748,7 @@ class HostGroup:
def parse_deployment_address(
machine_name: str, host: str, meta: dict[str, Any] = {}
machine_name: str, host: str, forward_agent: bool = True, meta: dict[str, Any] = {}
) -> Host:
parts = host.split("@")
user: str | None = None
@@ -777,83 +770,12 @@ def parse_deployment_address(
hostname = result.hostname
port = result.port
meta = meta.copy()
meta["flake_attr"] = machine_name
return Host(
hostname,
user=user,
port=port,
command_prefix=machine_name,
forward_agent=forward_agent,
meta=meta,
ssh_options=options,
)
@overload
def run(
cmd: list[str] | str,
text: Literal[True] = ...,
stdout: FILE = ...,
stderr: FILE = ...,
extra_env: dict[str, str] = ...,
cwd: None | str | Path = ...,
check: bool = ...,
) -> subprocess.CompletedProcess[str]:
...
@overload
def run(
cmd: list[str] | str,
text: Literal[False],
stdout: FILE = ...,
stderr: FILE = ...,
extra_env: dict[str, str] = ...,
cwd: None | str | Path = ...,
check: bool = ...,
) -> subprocess.CompletedProcess[bytes]:
...
def run(
cmd: list[str] | str,
text: bool = True,
stdout: FILE = None,
stderr: FILE = None,
extra_env: dict[str, str] = {},
cwd: None | str | Path = None,
check: bool = True,
) -> subprocess.CompletedProcess[Any]:
"""
Run command locally
@cmd if this parameter is a string the command is interpreted as a shell command,
otherwise if it is a list, than the first list element is the command
and the remaining list elements are passed as arguments to the
command.
@text when true, file objects for stdout and stderr are opened in text mode.
@stdout if not None stdout of the command will be redirected to this file i.e. stdout=subprocss.PIPE
@stderr if not None stderr of the command will be redirected to this file i.e. stderr=subprocess.PIPE
@extra_env environment variables to override whe running the command
@cwd current working directory to run the process in
@check If check is true, and the process exits with a non-zero exit code, a
CalledProcessError exception will be raised. Attributes of that exception
hold the arguments, the exit code, and stdout and stderr if they were
captured.
"""
if isinstance(cmd, list):
info("$ " + " ".join(cmd))
else:
info(f"$ {cmd}")
env = os.environ.copy()
env.update(extra_env)
return subprocess.run(
cmd,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd,
check=check,
shell=not isinstance(cmd, list),
text=text,
)

View File

@@ -9,18 +9,20 @@ from ..machines.machines import Machine
@dataclass
class VmConfig:
machine_name: str
machine_icon: Path
machine_description: str
flake_url: str | Path
clan_name: str
cores: int
memory_size: int
graphics: bool
wayland: bool = False
waypipe: bool = False
def inspect_vm(machine: Machine) -> VmConfig:
data = json.loads(machine.eval_nix("config.clanCore.vm.inspect"))
return VmConfig(machine_name=machine.name, flake_url=machine.flake, **data)
return VmConfig(flake_url=machine.flake, **data)
@dataclass

View File

@@ -0,0 +1,132 @@
import random
from dataclasses import dataclass
from pathlib import Path
from .inspect import VmConfig
@dataclass
class GraphicOptions:
args: list[str]
vsock_cid: int | None = None
def graphics_options(vm: VmConfig) -> GraphicOptions:
common = [
"-audio",
"driver=pa,model=virtio",
]
if vm.waypipe:
# FIXME: check for collisions
cid = random.randint(1, 2**32)
# fmt: off
return GraphicOptions([
*common,
"-nographic",
"-vga", "none",
"-device", f"vhost-vsock-pci,id=vhost-vsock-pci0,guest-cid={cid}",
# TODO: vgpu
#"-display", "egl-headless,gl=core",
#"-device", "virtio-vga,blob=true",
#"-device", "virtio-serial-pci",
#"-device", "vhost-user-vga,chardev=vgpu",
#"-chardev", "socket,id=vgpu,path=/tmp/vgpu.sock",
], cid)
# fmt: on
else:
# fmt: off
return GraphicOptions([
*common,
"-vga", "none",
"-display", "gtk,gl=on",
"-device", "virtio-gpu-gl",
"-display", "spice-app,gl=on",
"-device", "virtio-serial-pci",
"-chardev", "spicevmc,id=vdagent0,name=vdagent",
"-device", "virtserialport,chardev=vdagent0,name=com.redhat.spice.0",
"-device", "qemu-xhci,id=spicepass",
"-chardev", "spicevmc,id=usbredirchardev1,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev1,id=usbredirdev1",
"-chardev", "spicevmc,id=usbredirchardev2,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev2,id=usbredirdev2",
"-chardev", "spicevmc,id=usbredirchardev3,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev3,id=usbredirdev3",
"-device", "pci-ohci,id=smartpass",
"-device", "usb-ccid",
"-chardev", "spicevmc,id=ccid,name=smartcard",
], None)
# fmt: on
@dataclass
class QemuCommand:
args: list[str]
vsock_cid: int | None = None
def qemu_command(
vm: VmConfig,
nixos_config: dict[str, str],
secrets_dir: Path,
rootfs_img: Path,
state_img: Path,
virtiofsd_socket: Path,
qmp_socket_file: Path,
qga_socket_file: Path,
) -> QemuCommand:
kernel_cmdline = [
(Path(nixos_config["toplevel"]) / "kernel-params").read_text(),
f'init={nixos_config["toplevel"]}/init',
f'regInfo={nixos_config["regInfo"]}/registration',
"console=hvc0",
]
if not vm.waypipe:
kernel_cmdline.append("console=tty0")
# fmt: off
command = [
"qemu-kvm",
"-name", vm.machine_name,
"-m", f'{nixos_config["memorySize"]}M',
"-object", f"memory-backend-memfd,id=mem,size={nixos_config['memorySize']}M",
"-machine", "pc,memory-backend=mem,accel=kvm",
"-smp", str(nixos_config["cores"]),
"-cpu", "max",
"-enable-kvm",
# speed-up boot by not waiting for the boot menu
"-boot", "menu=off,strict=on",
"-device", "virtio-rng-pci",
"-netdev", "user,id=user.0",
"-device", "virtio-net-pci,netdev=user.0,romfile=",
"-chardev", f"socket,id=char1,path={virtiofsd_socket}",
"-device", "vhost-user-fs-pci,chardev=char1,tag=nix-store",
"-virtfs", f"local,path={secrets_dir},security_model=none,mount_tag=secrets",
"-drive", f"cache=writeback,file={rootfs_img},format=qcow2,id=drive1,if=none,index=1,werror=report",
"-device", "virtio-blk-pci,bootindex=1,drive=drive1,serial=root",
"-drive", f"cache=writeback,file={state_img},format=qcow2,id=state,if=none,index=2,werror=report",
"-device", "virtio-blk-pci,drive=state",
"-device", "virtio-keyboard",
"-usb", "-device", "usb-tablet,bus=usb-bus.0",
"-kernel", f'{nixos_config["toplevel"]}/kernel',
"-initrd", nixos_config["initrd"],
"-append", " ".join(kernel_cmdline),
# qmp & qga setup
"-qmp", f"unix:{qmp_socket_file},server,wait=off",
"-chardev", f"socket,path={qga_socket_file},server=on,wait=off,id=qga0",
"-device", "virtio-serial",
"-device", "virtserialport,chardev=qga0,name=org.qemu.guest_agent.0",
"-serial", "null",
"-chardev", "stdio,mux=on,id=char0,signal=off",
"-mon", "chardev=char0,mode=readline",
"-device", "virtconsole,chardev=char0,nr=0",
] # fmt: on
vsock_cid = None
if vm.graphics:
opts = graphics_options(vm)
vsock_cid = opts.vsock_cid
command.extend(opts.args)
else:
command.append("-nographic")
return QemuCommand(command, vsock_cid=vsock_cid)

View File

@@ -1,164 +1,59 @@
import argparse
import contextlib
import importlib
import json
import logging
import os
import random
import socket
import subprocess
import tempfile
import time
from collections.abc import Iterator
from dataclasses import dataclass, field
from pathlib import Path
from typing import IO
from tempfile import TemporaryDirectory
from ..cmd import Log, run
from ..dirs import machine_gcroot, module_root, vm_state_dir
from ..dirs import module_root, user_cache_dir, vm_state_dir
from ..errors import ClanError
from ..machines.machines import Machine
from ..nix import nix_build, nix_config, nix_shell
from ..nix import nix_shell
from ..secrets.generate import generate_secrets
from .inspect import VmConfig, inspect_vm
from .qemu import qemu_command
from .virtiofsd import start_virtiofsd
from .waypipe import start_waypipe
log = logging.getLogger(__name__)
@dataclass
class GraphicOptions:
args: list[str]
vsock_cid: int | None = None
def graphics_options(vm: VmConfig) -> GraphicOptions:
common = [
"-audio",
"driver=pa,model=virtio",
]
if vm.wayland:
# FIXME: check for collisions
cid = random.randint(1, 2**32)
# fmt: off
return GraphicOptions([
*common,
"-nographic",
"-vga", "none",
"-device", f"vhost-vsock-pci,id=vhost-vsock-pci0,guest-cid={cid}",
# TODO: vgpu
#"-display", "egl-headless,gl=core",
#"-device", "virtio-vga,blob=true",
#"-device", "virtio-serial-pci",
#"-device", "vhost-user-vga,chardev=vgpu",
#"-chardev", "socket,id=vgpu,path=/tmp/vgpu.sock",
], cid)
# fmt: on
else:
# fmt: off
return GraphicOptions([
*common,
"-vga", "none",
"-display", "gtk,gl=on",
"-device", "virtio-gpu-gl",
"-display", "spice-app,gl=on",
"-device", "virtio-serial-pci",
"-chardev", "spicevmc,id=vdagent0,name=vdagent",
"-device", "virtserialport,chardev=vdagent0,name=com.redhat.spice.0",
"-device", "qemu-xhci,id=spicepass",
"-chardev", "spicevmc,id=usbredirchardev1,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev1,id=usbredirdev1",
"-chardev", "spicevmc,id=usbredirchardev2,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev2,id=usbredirdev2",
"-chardev", "spicevmc,id=usbredirchardev3,name=usbredir",
"-device", "usb-redir,chardev=usbredirchardev3,id=usbredirdev3",
"-device", "pci-ohci,id=smartpass",
"-device", "usb-ccid",
"-chardev", "spicevmc,id=ccid,name=smartcard",
], None)
# fmt: on
@dataclass
class QemuCommand:
args: list[str]
vsock_cid: int | None = None
def qemu_command(
vm: VmConfig,
nixos_config: dict[str, str],
xchg_dir: Path,
secrets_dir: Path,
state_dir: Path,
disk_img: Path,
) -> QemuCommand:
kernel_cmdline = [
(Path(nixos_config["toplevel"]) / "kernel-params").read_text(),
f'init={nixos_config["toplevel"]}/init',
f'regInfo={nixos_config["regInfo"]}/registration',
"console=ttyS0,115200n8",
]
if not vm.wayland:
kernel_cmdline.append("console=tty0")
# fmt: off
command = [
"qemu-kvm",
"-name", vm.machine_name,
"-m", f'{nixos_config["memorySize"]}M',
"-object", f"memory-backend-memfd,id=mem,size={nixos_config['memorySize']}M",
"-machine", "pc,memory-backend=mem,accel=kvm",
"-smp", str(nixos_config["cores"]),
"-cpu", "max",
"-enable-kvm",
"-device", "virtio-rng-pci",
"-net", "nic,netdev=user.0,model=virtio",
"-netdev", "user,id=user.0",
"-virtfs", "local,path=/nix/store,security_model=none,mount_tag=nix-store",
"-virtfs", f"local,path={xchg_dir},security_model=none,mount_tag=shared",
"-virtfs", f"local,path={xchg_dir},security_model=none,mount_tag=xchg",
"-virtfs", f"local,path={secrets_dir},security_model=none,mount_tag=secrets",
"-virtfs", f"local,path={state_dir},security_model=none,mount_tag=state",
"-drive", f"cache=writeback,file={disk_img},format=raw,id=drive1,if=none,index=1,werror=report",
"-device", "virtio-blk-pci,bootindex=1,drive=drive1,serial=root",
"-device", "virtio-keyboard",
"-usb", "-device", "usb-tablet,bus=usb-bus.0",
"-kernel", f'{nixos_config["toplevel"]}/kernel',
"-initrd", nixos_config["initrd"],
"-append", " ".join(kernel_cmdline),
] # fmt: on
vsock_cid = None
if vm.graphics:
opts = graphics_options(vm)
vsock_cid = opts.vsock_cid
command.extend(opts.args)
else:
command.append("-nographic")
return QemuCommand(command, vsock_cid=vsock_cid)
def facts_to_nixos_config(facts: dict[str, dict[str, bytes]]) -> dict:
nixos_config: dict = {}
nixos_config["clanCore"] = {}
nixos_config["clanCore"]["secrets"] = {}
for service, service_facts in facts.items():
nixos_config["clanCore"]["secrets"][service] = {}
nixos_config["clanCore"]["secrets"][service]["facts"] = {}
for fact, value in service_facts.items():
nixos_config["clanCore"]["secrets"][service]["facts"][fact] = {
"value": value.decode()
}
return nixos_config
# TODO move this to the Machines class
def get_vm_create_info(
machine: Machine, vm: VmConfig, nix_options: list[str]
def build_vm(
machine: Machine, vm: VmConfig, tmpdir: Path, nix_options: list[str] = []
) -> dict[str, str]:
config = nix_config()
system = config["system"]
secrets_dir = get_secrets(machine, tmpdir)
clan_dir = machine.flake
cmd = nix_build(
[
f'{clan_dir}#clanInternals.machines."{system}"."{machine.name}".config.system.clan.vm.create',
*nix_options,
],
machine_gcroot(clan_name=vm.clan_name, flake_url=str(vm.flake_url))
/ f"vm-{machine.name}",
)
proc = run(
cmd, log=Log.BOTH, error_msg=f"Could not build vm config for {machine.name}"
facts_module = importlib.import_module(machine.facts_module)
fact_store = facts_module.FactStore(machine=machine)
facts = fact_store.get_all()
nixos_config_file = machine.build_nix(
"config.system.clan.vm.create",
extra_config=facts_to_nixos_config(facts),
nix_options=nix_options,
)
try:
return json.loads(Path(proc.stdout.strip()).read_text())
vm_data = json.loads(Path(nixos_config_file).read_text())
vm_data["secrets_dir"] = str(secrets_dir)
return vm_data
except json.JSONDecodeError as e:
raise ClanError(f"Failed to parse vm config: {e}")
@@ -168,32 +63,33 @@ def get_secrets(
tmpdir: Path,
) -> Path:
secrets_dir = tmpdir / "secrets"
secrets_dir.mkdir(exist_ok=True)
secrets_dir.mkdir(parents=True, exist_ok=True)
secrets_module = importlib.import_module(machine.secrets_module)
secret_store = secrets_module.SecretStore(machine=machine)
# Only generate secrets for local clans
if isinstance(machine.flake, Path) and machine.flake.is_dir():
generate_secrets(machine)
else:
log.warning("won't generate secrets for non local clan")
# TODO Only generate secrets for local clans
generate_secrets(machine)
secret_store.upload(secrets_dir)
return secrets_dir
def prepare_disk(tmpdir: Path, log_fd: IO[str] | None) -> Path:
disk_img = tmpdir / "disk.img"
def prepare_disk(
directory: Path,
size: str = "1024M",
file_name: str = "disk.img",
) -> Path:
disk_img = directory / file_name
cmd = nix_shell(
["nixpkgs#qemu"],
[
"qemu-img",
"create",
"-f",
"raw",
"qcow2",
str(disk_img),
"1024M",
size,
],
)
run(
@@ -202,107 +98,77 @@ def prepare_disk(tmpdir: Path, log_fd: IO[str] | None) -> Path:
error_msg=f"Could not create disk image at {disk_img}",
)
cmd = nix_shell(
["nixpkgs#e2fsprogs"],
[
"mkfs.ext4",
"-L",
"nixos",
str(disk_img),
],
)
run(
cmd,
log=Log.BOTH,
error_msg=f"Could not create ext4 filesystem at {disk_img}",
)
return disk_img
VMADDR_CID_HYPERVISOR = 2
def test_vsock_port(port: int) -> bool:
try:
s = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
s.connect((VMADDR_CID_HYPERVISOR, port))
s.close()
return True
except OSError:
return False
@contextlib.contextmanager
def start_waypipe(cid: int | None, title_prefix: str) -> Iterator[None]:
if cid is None:
yield
return
waypipe = nix_shell(
["git+https://git.clan.lol/clan/clan-core#waypipe"],
[
"waypipe",
"--vsock",
"--socket",
f"s{cid}:3049",
"--title-prefix",
title_prefix,
"client",
],
)
with subprocess.Popen(waypipe) as proc:
try:
while not test_vsock_port(3049):
time.sleep(0.1)
yield
finally:
proc.kill()
def run_vm(
vm: VmConfig,
nix_options: list[str] = [],
log_fd: IO[str] | None = None,
) -> None:
"""
log_fd can be used to stream the output of all commands to a UI
"""
def run_vm(vm: VmConfig, nix_options: list[str] = []) -> None:
machine = Machine(vm.machine_name, vm.flake_url)
log.debug(f"Creating VM for {machine}")
# TODO: We should get this from the vm argument
nixos_config = get_vm_create_info(machine, vm, nix_options)
# store the temporary rootfs inside XDG_CACHE_HOME on the host
# otherwise, when using /tmp, we risk running out of memory
cache = user_cache_dir() / "clan"
cache.mkdir(exist_ok=True)
with TemporaryDirectory(dir=cache) as cachedir, TemporaryDirectory() as sockets:
tmpdir = Path(cachedir)
with tempfile.TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
xchg_dir = tmpdir / "xchg"
xchg_dir.mkdir(exist_ok=True)
# TODO: We should get this from the vm argument
nixos_config = build_vm(machine, vm, tmpdir, nix_options)
secrets_dir = get_secrets(machine, tmpdir)
disk_img = prepare_disk(tmpdir, log_fd)
state_dir = vm_state_dir(vm.clan_name, str(machine.flake), machine.name)
state_dir = vm_state_dir(str(vm.flake_url), machine.name)
state_dir.mkdir(parents=True, exist_ok=True)
# specify socket files for qmp and qga
qmp_socket_file = Path(sockets) / "qmp.sock"
qga_socket_file = Path(sockets) / "qga.sock"
# Create symlinks to the qmp/qga sockets to be able to find them later.
# This indirection is needed because we cannot put the sockets directly
# in the state_dir.
# The reason is, qemu has a length limit of 108 bytes for the qmp socket
# path which is violated easily.
qmp_link = state_dir / "qmp.sock"
if os.path.lexists(qmp_link):
qmp_link.unlink()
qmp_link.symlink_to(qmp_socket_file)
qga_link = state_dir / "qga.sock"
if os.path.lexists(qga_link):
qga_link.unlink()
qga_link.symlink_to(qga_socket_file)
rootfs_img = prepare_disk(tmpdir)
state_img = state_dir / "state.qcow2"
if not state_img.exists():
state_img = prepare_disk(
directory=state_dir,
file_name="state.qcow2",
size="50G",
)
virtiofsd_socket = Path(sockets) / "virtiofsd.sock"
qemu_cmd = qemu_command(
vm,
nixos_config,
xchg_dir=xchg_dir,
secrets_dir=secrets_dir,
state_dir=state_dir,
disk_img=disk_img,
secrets_dir=Path(nixos_config["secrets_dir"]),
rootfs_img=rootfs_img,
state_img=state_img,
virtiofsd_socket=virtiofsd_socket,
qmp_socket_file=qmp_socket_file,
qga_socket_file=qga_socket_file,
)
packages = ["nixpkgs#qemu"]
env = os.environ.copy()
if vm.graphics and not vm.wayland:
if vm.graphics and not vm.waypipe:
packages.append("nixpkgs#virt-viewer")
remote_viewer_mimetypes = module_root() / "vms" / "mimetypes"
env[
"XDG_DATA_DIRS"
] = f"{remote_viewer_mimetypes}:{env.get('XDG_DATA_DIRS', '')}"
with start_waypipe(qemu_cmd.vsock_cid, f"[{vm.machine_name}] "):
with start_waypipe(
qemu_cmd.vsock_cid, f"[{vm.machine_name}] "
), start_virtiofsd(virtiofsd_socket):
run(
nix_shell(packages, qemu_cmd.args),
env=env,
@@ -316,7 +182,7 @@ class RunOptions:
machine: str
flake: Path
nix_options: list[str] = field(default_factory=list)
wayland: bool = False
waypipe: bool = False
def run_command(args: argparse.Namespace) -> None:
@@ -324,14 +190,11 @@ def run_command(args: argparse.Namespace) -> None:
machine=args.machine,
flake=args.flake,
nix_options=args.option,
wayland=args.wayland,
)
machine = Machine(run_options.machine, run_options.flake)
vm = inspect_vm(machine=machine)
# TODO: allow to set this in the config
vm.wayland = run_options.wayland
run_vm(vm, run_options.nix_options)
@@ -339,5 +202,4 @@ def run_command(args: argparse.Namespace) -> None:
def register_run_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("machine", type=str, help="machine in the flake to run")
parser.add_argument("--flake-url", type=str, help="flake url")
parser.add_argument("--wayland", action="store_true", help="use wayland")
parser.set_defaults(func=run_command)

View File

@@ -0,0 +1,41 @@
import contextlib
import shutil
import subprocess
import time
from collections.abc import Iterator
from pathlib import Path
from ..errors import ClanError
from ..nix import nix_shell
@contextlib.contextmanager
def start_virtiofsd(socket_path: Path) -> Iterator[None]:
sandbox = "namespace"
if shutil.which("newuidmap") is None:
sandbox = "none"
virtiofsd = nix_shell(
["nixpkgs#virtiofsd"],
[
"virtiofsd",
"--socket-path",
str(socket_path),
"--cache",
"always",
"--sandbox",
sandbox,
"--shared-dir",
"/nix/store",
],
)
with subprocess.Popen(virtiofsd) as proc:
try:
while not socket_path.exists():
rc = proc.poll()
if rc is not None:
msg = f"virtiofsd exited unexpectedly with code {rc}"
raise ClanError(msg)
time.sleep(0.1)
yield
finally:
proc.kill()

View File

@@ -0,0 +1,50 @@
import contextlib
import socket
import subprocess
import time
from collections.abc import Iterator
from ..errors import ClanError
from ..nix import nix_shell
VMADDR_CID_HYPERVISOR = 2
def test_vsock_port(port: int) -> bool:
try:
s = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
s.connect((VMADDR_CID_HYPERVISOR, port))
s.close()
return True
except OSError:
return False
@contextlib.contextmanager
def start_waypipe(cid: int | None, title_prefix: str) -> Iterator[None]:
if cid is None:
yield
return
waypipe = nix_shell(
["git+https://git.clan.lol/clan/clan-core#waypipe"],
[
"waypipe",
"--vsock",
"--socket",
f"s{cid}:3049",
"--title-prefix",
title_prefix,
"client",
],
)
with subprocess.Popen(waypipe) as proc:
try:
while not test_vsock_port(3049):
rc = proc.poll()
if rc is not None:
msg = f"waypipe exited unexpectedly with code {rc}"
raise ClanError(msg)
time.sleep(0.1)
yield
finally:
proc.kill()

View File

77
pkgs/clan-cli/qemu/qga.py Normal file
View File

@@ -0,0 +1,77 @@
import base64
import json
import socket
from pathlib import Path
from time import sleep
# qga is almost like qmp, but not quite, because:
# - server doesn't send initial message
# - no need to initialize by asking for capabilities
# - results need to be base64 decoded
class QgaSession:
def __init__(self, socket_file: Path | str) -> None:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# try to reconnect a couple of times if connection refused
for _ in range(100):
try:
self.sock.connect(str(socket_file))
return
except ConnectionRefusedError:
sleep(0.1)
self.sock.connect(str(socket_file))
def get_response(self) -> dict:
result = self.sock.recv(9999999)
return json.loads(result)
# only execute, don't wait for response
def exec_cmd(self, cmd: str) -> None:
self.sock.send(
json.dumps(
{
"execute": "guest-exec",
"arguments": {
"path": "/bin/sh",
"arg": ["-l", "-c", cmd],
"capture-output": True,
},
}
).encode("utf-8")
)
# run, wait for result, return exitcode and output
def run(self, cmd: str) -> tuple[int, str, str]:
self.exec_cmd(cmd)
result_pid = self.get_response()
pid = result_pid["return"]["pid"]
# loop until exited=true
status_payload = json.dumps(
{
"execute": "guest-exec-status",
"arguments": {
"pid": pid,
},
}
).encode("utf-8")
while True:
self.sock.send(status_payload)
result = self.get_response()
if "error" in result and result["error"]["desc"].startswith("PID"):
raise Exception("PID could not be found")
if result["return"]["exited"]:
break
sleep(0.1)
exitcode = result["return"]["exitcode"]
stdout = (
""
if "out-data" not in result["return"]
else base64.b64decode(result["return"]["out-data"]).decode("utf-8")
)
stderr = (
""
if "err-data" not in result["return"]
else base64.b64decode(result["return"]["err-data"]).decode("utf-8")
)
return exitcode, stdout, stderr

317
pkgs/clan-cli/qemu/qmp.py Normal file
View File

@@ -0,0 +1,317 @@
# mypy: ignore-errors
""" QEMU Monitor Protocol Python class """
# Copyright (C) 2009, 2010 Red Hat Inc.
#
# Authors:
# Luiz Capitulino <lcapitulino@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import errno
import json
import logging
import socket
from typing import Any
class QMPError(Exception):
"""
QMP base exception
"""
class QMPConnectError(QMPError):
"""
QMP connection exception
"""
class QMPCapabilitiesError(QMPError):
"""
QMP negotiate capabilities exception
"""
class QMPTimeoutError(QMPError):
"""
QMP timeout exception
"""
class QEMUMonitorProtocol:
"""
Provide an API to connect to QEMU via QEMU Monitor Protocol (QMP) and then
allow to handle commands and events.
"""
#: Logger object for debugging messages
logger: logging.Logger = logging.getLogger("QMP")
def __init__(
self,
address: str | tuple[str, int],
server: bool = False,
nickname: str | None = None,
) -> None:
"""
Create a QEMUMonitorProtocol class.
@param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection
@param server: server mode listens on the socket (bool)
@raise OSError on socket connection errors
@note No connection is established, this is done by the connect() or
accept() methods
"""
self.__events: list[dict[str, Any]] = []
self.__address: str | tuple[str, int] = address
self.__sock: socket.socket = self.__get_sock()
self.__sockfile: socket.SocketIO | None = None
self._nickname: str | None = nickname
if self._nickname:
self.logger = logging.getLogger("QMP").getChild(self._nickname)
if server:
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__sock.bind(self.__address)
self.__sock.listen(1)
def __get_sock(self) -> socket.socket:
if isinstance(self.__address, tuple):
family = socket.AF_INET
else:
family = socket.AF_UNIX
return socket.socket(family, socket.SOCK_STREAM)
def __negotiate_capabilities(self) -> dict[str, Any]:
greeting = self.__json_read()
if greeting is None or "QMP" not in greeting:
raise QMPConnectError
# Greeting seems ok, negotiate capabilities
resp = self.cmd("qmp_capabilities")
if resp and "return" in resp:
return greeting
raise QMPCapabilitiesError
def __json_read(self, only_event: bool = False) -> dict[str, Any] | None:
while True:
data = self.__sockfile.readline()
if not data:
return None
resp = json.loads(data)
if "event" in resp:
self.logger.debug("<<< %s", resp)
self.__events.append(resp)
if not only_event:
continue
return resp
def __get_events(self, wait: bool | float = False) -> None:
"""
Check for new events in the stream and cache them in __events.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
"""
# Check for new events regardless and pull them into the cache:
self.__sock.setblocking(0)
try:
self.__json_read()
except OSError as err:
if err.errno == errno.EAGAIN:
# No data available
pass
self.__sock.setblocking(1)
# Wait for new events, if needed.
# if wait is 0.0, this means "no wait" and is also implicitly false.
if not self.__events and wait:
if isinstance(wait, float):
self.__sock.settimeout(wait)
try:
ret = self.__json_read(only_event=True)
except socket.timeout:
raise QMPTimeoutError("Timeout waiting for event")
except Exception:
raise QMPConnectError("Error while reading from socket")
if ret is None:
raise QMPConnectError("Error while reading from socket")
self.__sock.settimeout(None)
def __enter__(self) -> "QEMUMonitorProtocol":
# Implement context manager enter function.
return self
def __exit__(self, exc_type: Any, exc_value: Any, exc_traceback: Any) -> bool:
# Implement context manager exit function.
self.close()
return False
def connect(self, negotiate: bool = True) -> dict[str, Any] | None:
"""
Connect to the QMP Monitor and perform capabilities negotiation.
@return QMP greeting dict, or None if negotiate is false
@raise OSError on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
"""
self.__sock.connect(self.__address)
self.__sockfile = self.__sock.makefile()
if negotiate:
return self.__negotiate_capabilities()
return None
def accept(self, timeout: float | None = 15.0) -> dict[str, Any]:
"""
Await connection from QMP Monitor and perform capabilities negotiation.
@param timeout: timeout in seconds (nonnegative float number, or
None). The value passed will set the behavior of the
underneath QMP socket as described in [1]. Default value
is set to 15.0.
@return QMP greeting dict
@raise OSError on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
[1]
https://docs.python.org/3/library/socket.html#socket.socket.settimeout
"""
self.__sock.settimeout(timeout)
self.__sock, _ = self.__sock.accept()
self.__sockfile = self.__sock.makefile()
return self.__negotiate_capabilities()
def cmd_obj(self, qmp_cmd: dict[str, Any]) -> dict[str, Any] | None:
"""
Send a QMP command to the QMP Monitor.
@param qmp_cmd: QMP command to be sent as a Python dict
@return QMP response as a Python dict or None if the connection has
been closed
"""
self.logger.debug(">>> %s", qmp_cmd)
try:
self.__sock.sendall(json.dumps(qmp_cmd).encode("utf-8"))
except OSError as err:
if err.errno == errno.EPIPE:
return None
raise err
resp = self.__json_read()
self.logger.debug("<<< %s", resp)
return resp
def cmd(
self,
name: str,
args: dict[str, Any] | None = None,
cmd_id: dict[str, Any] | list[Any] | str | int | None = None,
) -> dict[str, Any] | None:
"""
Build a QMP command and send it to the QMP Monitor.
@param name: command name (string)
@param args: command arguments (dict)
@param cmd_id: command id (dict, list, string or int)
"""
qmp_cmd: dict[str, Any] = {"execute": name}
if args:
qmp_cmd["arguments"] = args
if cmd_id:
qmp_cmd["id"] = cmd_id
return self.cmd_obj(qmp_cmd)
def command(self, cmd: str, **kwds: Any) -> Any:
"""
Build and send a QMP command to the monitor, report errors if any
"""
ret = self.cmd(cmd, kwds)
if "error" in ret:
raise Exception(ret["error"]["desc"])
return ret["return"]
def pull_event(self, wait: bool | float = False) -> dict[str, Any] | None:
"""
Pulls a single event.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
@return The first available QMP event, or None.
"""
self.__get_events(wait)
if self.__events:
return self.__events.pop(0)
return None
def get_events(self, wait: bool | float = False) -> list[dict[str, Any]]:
"""
Get a list of available QMP events.
@param wait (bool): block until an event is available.
@param wait (float): If wait is a float, treat it as a timeout value.
@raise QMPTimeoutError: If a timeout float is provided and the timeout
period elapses.
@raise QMPConnectError: If wait is True but no events could be
retrieved or if some other error occurred.
@return The list of available QMP events.
"""
self.__get_events(wait)
return self.__events
def clear_events(self) -> None:
"""
Clear current list of pending events.
"""
self.__events = []
def close(self) -> None:
"""
Close the socket and socket file.
"""
if self.__sock:
self.__sock.close()
if self.__sockfile:
self.__sockfile.close()
def settimeout(self, timeout: float | None) -> None:
"""
Set the socket timeout.
@param timeout (float): timeout in seconds, or None.
@note This is a wrap around socket.settimeout
"""
self.__sock.settimeout(timeout)
def get_sock_fd(self) -> int:
"""
Get the socket file descriptor.
@return The file descriptor number.
"""
return self.__sock.fileno()
def is_scm_available(self) -> bool:
"""
Check if the socket allows for SCM_RIGHTS.
@return True if SCM_RIGHTS is available, otherwise False.
"""
return self.__sock.family == socket.AF_UNIX

View File

@@ -41,7 +41,10 @@ class FlakeForTest(NamedTuple):
def generate_flake(
temporary_home: Path,
flake_template: Path,
substitutions: dict[str, str] = {},
substitutions: dict[str, str] = {
"__CHANGE_ME__": "_test_vm_persistence",
"git+https://git.clan.lol/clan/clan-core": "path://" + str(CLAN_CORE),
},
# define the machines directly including their config
machine_configs: dict[str, dict] = {},
) -> FlakeForTest:

View File

@@ -1,5 +1,5 @@
{ lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";

View File

@@ -1,5 +1,5 @@
{ lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";

View File

@@ -1,5 +1,5 @@
{ lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clan.virtualisation.graphics = false;

View File

@@ -20,16 +20,16 @@ from clan_cli.dirs import clan_key_safe, vm_state_dir
def test_clan_key_safe() -> None:
assert clan_key_safe("clan1", "/foo/bar") == "clan1-%2Ffoo%2Fbar"
assert clan_key_safe("/foo/bar") == "%2Ffoo%2Fbar"
def test_vm_state_dir_identity() -> None:
dir1 = vm_state_dir("clan1", "https://some.clan", "vm1")
dir2 = vm_state_dir("clan1", "https://some.clan", "vm1")
dir1 = vm_state_dir("https://some.clan", "vm1")
dir2 = vm_state_dir("https://some.clan", "vm1")
assert str(dir1) == str(dir2)
def test_vm_state_dir_no_collision() -> None:
dir1 = vm_state_dir("clan1", "/foo/bar", "vm1")
dir2 = vm_state_dir("clan1", "https://some.clan", "vm1")
dir1 = vm_state_dir("/foo/bar", "vm1")
dir2 = vm_state_dir("https://some.clan", "vm1")
assert str(dir1) != str(dir2)

View File

@@ -12,7 +12,7 @@
clanName = "test_flake_with_core";
machines = {
vm1 = { lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
@@ -32,7 +32,7 @@
};
};
vm2 = { lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";

View File

@@ -12,7 +12,7 @@
clanName = "test_flake_with_core_and_pass";
machines = {
vm1 = { lib, ... }: {
clan.networking.deploymentAddress = "__CLAN_DEPLOYMENT_ADDRESS__";
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clanCore.secretStore = "password-store";
clanCore.secretsUploadDirectory = lib.mkForce "__CLAN_SOPS_KEY_DIR__/secrets";

View File

@@ -26,7 +26,6 @@ def test_history_add(
"add",
str(uri),
]
cli.run(cmd)
history_file = user_history_file()

View File

@@ -60,7 +60,7 @@ def test_upload_secret(
flake = test_flake_with_core_and_pass.path.joinpath("flake.nix")
host = host_group.hosts[0]
addr = f"{host.user}@{host.host}:{host.port}?StrictHostKeyChecking=no&UserKnownHostsFile=/dev/null&IdentityFile={host.key}"
new_text = flake.read_text().replace("__CLAN_DEPLOYMENT_ADDRESS__", addr)
new_text = flake.read_text().replace("__CLAN_TARGET_ADDRESS__", addr)
flake.write_text(new_text)
cli.run(["secrets", "upload", "vm1"])
zerotier_identity_secret = (

View File

@@ -52,7 +52,7 @@ def test_secrets_upload(
flake = test_flake_with_core.path.joinpath("flake.nix")
host = host_group.hosts[0]
addr = f"{host.user}@{host.host}:{host.port}?StrictHostKeyChecking=no&UserKnownHostsFile=/dev/null&IdentityFile={host.key}"
new_text = flake.read_text().replace("__CLAN_DEPLOYMENT_ADDRESS__", addr)
new_text = flake.read_text().replace("__CLAN_TARGET_ADDRESS__", addr)
flake.write_text(new_text)
cli.run(["--flake", str(test_flake_with_core.path), "secrets", "upload", "vm1"])

View File

@@ -1,32 +1,11 @@
import subprocess
from clan_cli.ssh import Host, HostGroup, run
def test_run() -> None:
p = run("echo hello")
assert p.stdout is None
def test_run_failure() -> None:
p = run("exit 1", check=False)
assert p.returncode == 1
try:
p = run("exit 1")
except Exception:
pass
else:
assert False, "Command should have raised an error"
from clan_cli.ssh import Host, HostGroup
hosts = HostGroup([Host("some_host")])
def test_run_environment() -> None:
p1 = run("echo $env_var", stdout=subprocess.PIPE, extra_env=dict(env_var="true"))
assert p1.stdout == "true\n"
p2 = hosts.run_local(
"echo $env_var", extra_env=dict(env_var="true"), stdout=subprocess.PIPE
)
@@ -38,17 +17,6 @@ def test_run_environment() -> None:
assert "env_var=true" in p3[0].result.stdout
def test_run_non_shell() -> None:
p = run(["echo", "$hello"], stdout=subprocess.PIPE)
assert p.stdout == "$hello\n"
def test_run_stderr_stdout() -> None:
p = run("echo 1; echo 2 >&2", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert p.stdout == "1\n"
assert p.stderr == "2\n"
def test_run_local() -> None:
hosts.run_local("echo hello")

View File

@@ -1,5 +1,9 @@
import os
import sys
import threading
import traceback
from pathlib import Path
from time import sleep
from typing import TYPE_CHECKING
import pytest
@@ -8,6 +12,8 @@ from fixtures_flakes import FlakeForTest, generate_flake
from root import CLAN_CORE
from clan_cli.dirs import vm_state_dir
from qemu.qga import QgaSession
from qemu.qmp import QEMUMonitorProtocol
if TYPE_CHECKING:
from age_keys import KeyPair
@@ -15,6 +21,66 @@ if TYPE_CHECKING:
no_kvm = not os.path.exists("/dev/kvm")
def run_vm_in_thread(machine_name: str) -> None:
# runs machine and prints exceptions
def run() -> None:
try:
Cli().run(["vms", "run", machine_name])
except Exception:
# print exception details
print(traceback.format_exc(), file=sys.stderr)
print(sys.exc_info()[2], file=sys.stderr)
# run the machine in a separate thread
t = threading.Thread(target=run, name="run")
t.daemon = True
t.start()
# wait for qmp socket to exist
def wait_vm_up(state_dir: Path) -> None:
socket_file = state_dir / "qga.sock"
timeout: float = 300
while True:
if timeout <= 0:
raise TimeoutError(
f"qga socket {socket_file} not found. Is the VM running?"
)
if socket_file.exists():
break
sleep(0.1)
timeout -= 0.1
# wait for vm to be down by checking if qga socket is down
def wait_vm_down(state_dir: Path) -> None:
socket_file = state_dir / "qga.sock"
timeout: float = 300
while socket_file.exists():
if timeout <= 0:
raise TimeoutError(
f"qga socket {socket_file} still exists. Is the VM down?"
)
sleep(0.1)
timeout -= 0.1
# wait for vm to be up then connect and return qmp instance
def qmp_connect(state_dir: Path) -> QEMUMonitorProtocol:
wait_vm_up(state_dir)
qmp = QEMUMonitorProtocol(
address=str(os.path.realpath(state_dir / "qmp.sock")),
)
qmp.connect()
return qmp
# wait for vm to be up then connect and return qga instance
def qga_connect(state_dir: Path) -> QgaSession:
wait_vm_up(state_dir)
return QgaSession(os.path.realpath(state_dir / "qga.sock"))
@pytest.mark.impure
def test_inspect(
test_flake_with_core: FlakeForTest, capsys: pytest.CaptureFixture
@@ -49,66 +115,184 @@ def test_run(
@pytest.mark.skipif(no_kvm, reason="Requires KVM")
@pytest.mark.impure
def test_vm_persistence(
def test_vm_qmp(
monkeypatch: pytest.MonkeyPatch,
temporary_home: Path,
age_keys: list["KeyPair"],
) -> None:
monkeypatch.setenv("SOPS_AGE_KEY", age_keys[0].privkey)
# set up a simple clan flake
flake = generate_flake(
temporary_home,
flake_template=CLAN_CORE / "templates" / "new-clan",
substitutions={
"__CHANGE_ME__": "_test_vm_persistence",
"git+https://git.clan.lol/clan/clan-core": "path://" + str(CLAN_CORE),
},
machine_configs=dict(
my_machine=dict(
clanCore=dict(state=dict(my_state=dict(folders=["/var/my-state"]))),
clan=dict(
virtualisation=dict(graphics=False),
networking=dict(targetHost="client"),
),
services=dict(getty=dict(autologinUser="root")),
)
),
)
# 'clan vms run' must be executed from within the flake
monkeypatch.chdir(flake.path)
# the state dir is a point of reference for qemu interactions as it links to the qga/qmp sockets
state_dir = vm_state_dir(str(flake.path), "my_machine")
# start the VM
run_vm_in_thread("my_machine")
# connect with qmp
qmp = qmp_connect(state_dir)
# verify that issuing a command works
# result = qmp.cmd_obj({"execute": "query-status"})
result = qmp.command("query-status")
assert result["status"] == "running", result
# shutdown machine (prevent zombie qemu processes)
qmp.command("system_powerdown")
@pytest.mark.skipif(no_kvm, reason="Requires KVM")
@pytest.mark.impure
def test_vm_persistence(
monkeypatch: pytest.MonkeyPatch,
temporary_home: Path,
) -> None:
# set up a clan flake with some systemd services to test persistence
flake = generate_flake(
temporary_home,
flake_template=CLAN_CORE / "templates" / "new-clan",
machine_configs=dict(
my_machine=dict(
services=dict(getty=dict(autologinUser="root")),
clanCore=dict(
state=dict(
my_state=dict(
folders=[
# to be owned by root
"/var/my-state",
# to be owned by user 'test'
"/var/user-state",
]
)
)
),
# create test user to test if state can be owned by user
users=dict(
users=dict(
test=dict(
password="test",
isNormalUser=True,
),
root=dict(password="root"),
)
),
# create a systemd service to create a file in the state folder
# and another to read it after reboot
systemd=dict(
services=dict(
poweroff=dict(
description="Poweroff the machine",
wantedBy=["multi-user.target"],
after=["my-state.service"],
script="""
echo "Powering off the machine"
poweroff
""",
),
my_state=dict(
create_state=dict(
description="Create a file in the state folder",
wantedBy=["multi-user.target"],
script="""
echo "Creating a file in the state folder"
echo "dream2nix" > /var/my-state/test
""",
serviceConfig=dict(Type="oneshot"),
if [ ! -f /var/my-state/root ]; then
echo "Creating a file in the state folder"
echo "dream2nix" > /var/my-state/root
# create /var/my-state/test owned by user test
echo "dream2nix" > /var/my-state/test
chown test /var/my-state/test
# make sure /var/user-state is owned by test
chown test /var/user-state
fi
""",
serviceConfig=dict(
Type="oneshot",
),
),
reboot=dict(
description="Reboot the machine",
wantedBy=["multi-user.target"],
after=["my-state.service"],
script="""
if [ ! -f /var/my-state/rebooting ]; then
echo "Rebooting the machine"
touch /var/my-state/rebooting
poweroff
else
touch /var/my-state/rebooted
fi
""",
),
read_after_reboot=dict(
description="Read a file in the state folder",
wantedBy=["multi-user.target"],
after=["reboot.service"],
# TODO: currently state folders itself cannot be owned by users
script="""
if ! cat /var/my-state/test; then
echo "cannot read from state file" > /var/my-state/error
# ensure root file is owned by root
elif [ "$(stat -c '%U' /var/my-state/root)" != "root" ]; then
echo "state file /var/my-state/root is not owned by user root" > /var/my-state/error
# ensure test file is owned by test
elif [ "$(stat -c '%U' /var/my-state/test)" != "test" ]; then
echo "state file /var/my-state/test is not owned by user test" > /var/my-state/error
# ensure /var/user-state is owned by test
elif [ "$(stat -c '%U' /var/user-state)" != "test" ]; then
echo "state folder /var/user-state is not owned by user test" > /var/my-state/error
fi
""",
serviceConfig=dict(
Type="oneshot",
),
),
)
),
clan=dict(virtualisation=dict(graphics=False)),
users=dict(users=dict(root=dict(password="root"))),
clan=dict(
virtualisation=dict(graphics=False),
networking=dict(targetHost="client"),
),
)
),
)
monkeypatch.chdir(flake.path)
cli = Cli()
cli.run(
[
"secrets",
"users",
"add",
"user1",
age_keys[0].pubkey,
]
# the state dir is a point of reference for qemu interactions as it links to the qga/qmp sockets
state_dir = vm_state_dir(str(flake.path), "my_machine")
run_vm_in_thread("my_machine")
# wait for the VM to start
wait_vm_up(state_dir)
# wait for socket to be down (systemd service 'poweroff' rebooting machine)
wait_vm_down(state_dir)
# start vm again
run_vm_in_thread("my_machine")
# connect second time
qga = qga_connect(state_dir)
# ensure that the file created by the service is still there and has the expected content
exitcode, out, err = qga.run("cat /var/my-state/test")
assert exitcode == 0, err
assert out == "dream2nix\n", out
# check for errors
exitcode, out, err = qga.run("cat /var/my-state/error")
assert exitcode == 1, out
# check all systemd services are OK, or print details
exitcode, out, err = qga.run(
"systemctl --failed | tee /tmp/yolo | grep -q '0 loaded units listed' || ( cat /tmp/yolo && false )"
)
cli.run(["vms", "run", "my_machine"])
test_file = (
vm_state_dir("_test_vm_persistence", str(flake.path), "my_machine")
/ "var"
/ "my-state"
/ "test"
)
assert test_file.exists()
assert test_file.read_text() == "dream2nix\n"
assert exitcode == 0, out
# use qmp to shutdown the machine (prevent zombie qemu processes)
qmp = qmp_connect(state_dir)
qmp.command("system_powerdown")

View File

@@ -1,88 +1,39 @@
## Developing GTK3 Applications
## Developing GTK4 Applications
Here we will document on how to develop GTK3 application UI in python. First we want to setup
an example code base to look into. In this case gnome-music.
## Setup gnome-music as code reference
gnome-music does not use glade
Clone gnome-music and check out the tag v40.0
[gnome-music](https://github.com/GNOME/gnome-music/tree/40.0)
## Demos
Adw has a demo application showing all widgets. You can run it by executing:
```bash
git clone git@github.com:GNOME/gnome-music.git && cd gnome-music && git checkout 40.0
adwaita-1-demo
```
GTK4 has a demo application showing all widgets. You can run it by executing:
```bash
gtk4-widget-factory
```
Checkout nixpkgs version `468cb5980b56d348979488a74a9b5de638400160` for the correct gnome-music devshell then execute:
To find available icons execute:
```bash
nix develop /home/username/Projects/nixpkgs#gnome.gnome-music
gtk4-icon-browser
```
Look into the file `gnome-music.in` which bootstraps the application.
## Setup gnu-cash as reference
Gnucash uses glade with complex UI
Setup gnucash
```bash
git clone git@github.com:Gnucash/gnucash.git
git checkout ed4921271c863c7f6e0c800e206b25ac6e9ba4da
cd nixpkgs
git checkout 015739d7bffa7da4e923978040a2f7cba6af3270
nix develop /home/username/Projects/nixpkgs#gnucash
mkdir build && cd build
cmake ..
cd ..
make
```
- The use the GTK Builder instead of templates.
## Look into virt-manager it uses python + spice-gtk
Look into `virtManager/details/viewers.py` to see how spice-gtk is being used
```bash
git clone https://github.com/virt-manager/virt-manager
```
### Glade
Make sure to check the 'composit' box in glade in the GtkApplicationWindow to be able to
import the glade file through GTK template
## Links
- [Adw PyGobject Reference](http://lazka.github.io/pgi-docs/index.html#Adw-1)
- [GTK4 PyGobject Reference](http://lazka.github.io/pgi-docs/index.html#Gtk-4.0)
- [Adw Widget Gallery](https://gnome.pages.gitlab.gnome.org/libadwaita/doc/main/widget-gallery.html)
- [Python + GTK3 Tutorial](https://python-gtk-3-tutorial.readthedocs.io/en/latest/textview.html)
- Another python glade project [syncthing-gtk](https://github.com/kozec/syncthing-gtk)
- Other python glade project [linuxcnc](https://github.com/podarok/linuxcnc/tree/master)
- Install [Glade UI Toolbuilder](https://gitlab.gnome.org/GNOME/glade)
- To understand GTK3 Components look into the [Python GTK3 Tutorial](https://python-gtk-3-tutorial.readthedocs.io/en/latest/search.html?q=ApplicationWindow&check_keywords=yes&area=default)
- https://web.archive.org/web/20100706201447/http://www.pygtk.org/pygtk2reference/ (GTK2 Reference, many methods still exist in gtk3)
-
- Also look into [PyGObject](https://pygobject.readthedocs.io/en/latest/guide/gtk_template.html) to know more about threading and async etc.
- [GI Python API](https://lazka.github.io/pgi-docs/#Gtk-3.0)
- https://developer.gnome.org/documentation/tutorials/application.html
- [GTK3 Python] https://github.com/sam-m888/python-gtk3-tutorial/tree/master
- https://gnome.pages.gitlab.gnome.org/libhandy/doc/1.8/index.html
- https://github.com/geigi/cozy
- https://github.com/lutris/lutris/blob/2e9bd115febe08694f5d42dabcf9da36a1065f1d/lutris/gui/widgets/cellrenderers.py#L92
## Debugging Style and Layout
You can append `--debug` flag to enable debug logging printed into the console.
```bash
# Enable the debugger
# Enable the GTK debugger
gsettings set org.gtk.Settings.Debug enable-inspector-keybinding true
# Start the application with the debugger attached
GTK_DEBUG=interactive ./bin/clan-vm-manager
GTK_DEBUG=interactive ./bin/clan-vm-manager --debug
```

View File

@@ -9,6 +9,12 @@
{
"path": "../clan-cli/tests"
},
{
"path": "../../nixosModules"
},
{
"path": "../../lib/build-clan"
}
],
"settings": {
"python.linting.mypyEnabled": true,

View File

@@ -1,49 +1,13 @@
import argparse
from clan_cli.clan_uri import ClanURI
from clan_vm_manager.models.interfaces import ClanConfig
import logging
import sys
from .app import MainApplication
log = logging.getLogger(__name__)
# TODO: Trayicon support
# https://github.com/nicotine-plus/nicotine-plus/blob/b08552584eb6f35782ad77da93ae4aae3362bf64/pynicotine/gtkgui/widgets/trayicon.py#L982
def main() -> None:
parser = argparse.ArgumentParser(description="clan-vm-manager")
# Add join subcommand
subparser = parser.add_subparsers(
title="command",
description="command to execute",
help="the command to execute",
)
register_join_parser(subparser.add_parser("join", help="join a clan"))
register_overview_parser(subparser.add_parser("overview", help="overview screen"))
# Executed when no command is given
parser.set_defaults(func=show_overview)
args = parser.parse_args()
args.func(args)
def show_join(args: argparse.Namespace) -> None:
app = MainApplication(
config=ClanConfig(url=args.clan_uri, initial_view="list"),
)
return app.run()
def register_join_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("clan_uri", type=ClanURI, help="clan URI to join")
parser.set_defaults(func=show_join)
def show_overview(args: argparse.Namespace) -> None:
app = MainApplication(
config=ClanConfig(url=None, initial_view="list"),
)
return app.run()
def register_overview_parser(parser: argparse.ArgumentParser) -> None:
parser.set_defaults(func=show_overview)
app = MainApplication()
return app.run(sys.argv)

View File

@@ -1,46 +1,111 @@
#!/usr/bin/env python3
from pathlib import Path
import logging
from typing import Any, ClassVar
import gi
from clan_vm_manager.models.use_join import Join
from clan_vm_manager import assets
gi.require_version("Gtk", "4.0")
gi.require_version("Adw", "1")
from clan_cli.custom_logger import setup_logging
from gi.repository import Adw, Gdk, Gio, Gtk
from clan_vm_manager.models.interfaces import ClanConfig
from clan_vm_manager.models.use_vms import VMS
from clan_vm_manager.models.use_join import GLib, GObject
from clan_vm_manager.models.use_vms import VMs
from .constants import constants
from .trayicon import TrayIcon
from .windows.main_window import MainWindow
log = logging.getLogger(__name__)
class MainApplication(Adw.Application):
def __init__(self, config: ClanConfig) -> None:
super().__init__(
application_id=constants["APPID"], flags=Gio.ApplicationFlags.FLAGS_NONE
)
self.config = config
self.connect("shutdown", self.on_shutdown)
__gsignals__: ClassVar = {
"join_request": (GObject.SignalFlags.RUN_FIRST, None, [str]),
}
if config.url:
Join.use().push(config.url)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(
*args,
application_id="lol.clan.vm.manager",
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE,
**kwargs,
)
self.tray_icon: TrayIcon | None = None
self.add_main_option(
"debug",
ord("d"),
GLib.OptionFlags.NONE,
GLib.OptionArg.NONE,
"enable debug mode",
None,
)
self.vms = VMs.use()
log.debug(f"VMS object: {self.vms}")
self.window: Adw.ApplicationWindow | None = None
self.connect("shutdown", self.on_shutdown)
self.connect("activate", self.show_window)
def do_command_line(self, command_line: Any) -> int:
options = command_line.get_options_dict()
# convert GVariantDict -> GVariant -> dict
options = options.end().unpack()
if "debug" in options:
setup_logging("DEBUG", root_log_name=__name__.split(".")[0])
setup_logging("DEBUG", root_log_name="clan_cli")
else:
setup_logging("INFO", root_log_name=__name__.split(".")[0])
log.debug("Debug logging enabled")
args = command_line.get_arguments()
self.activate()
if len(args) > 1:
log.debug(f"Join request: {args[1]}")
uri = args[1]
self.emit("join_request", uri)
return 0
def on_shutdown(self, app: Gtk.Application) -> None:
print("Shutting down")
VMS.use().kill_all()
log.debug("Shutting down")
self.vms.kill_all()
if self.tray_icon is not None:
self.tray_icon.destroy()
def on_window_hide_unhide(self, *_args: Any) -> None:
assert self.window is not None
if self.window.is_visible():
self.window.hide()
else:
self.window.present()
def dummy_menu_entry(self) -> None:
log.info("Dummy menu entry called")
def do_activate(self) -> None:
self.init_style()
window = MainWindow(config=self.config)
window.set_application(self)
window.present()
self.show_window()
def show_window(self, app: Any = None) -> None:
if not self.window:
self.init_style()
self.window = MainWindow(config=ClanConfig(initial_view="list"))
self.window.set_application(self)
self.tray_icon = TrayIcon(self)
self.window.present()
# TODO: For css styling
def init_style(self) -> None:
resource_path = Path(__file__).parent / "style.css"
resource_path = assets.loc / "style.css"
log.debug(f"Style css path: {resource_path}")
css_provider = Gtk.CssProvider()
css_provider.load_from_path(str(resource_path))
Gtk.StyleContext.add_provider_for_display(

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -12,10 +12,34 @@ avatar {
}
.trust {
padding-top: 25px;
padding-bottom: 25px;
padding-top: 25px;
padding-bottom: 25px;
}
.join-list {
margin-left: 2px;
margin-right: 2px;
}
.progress-bar {
margin-right: 25px;
min-width: 200px;
}
.group-list {
background-color: inherit;
}
.group-list > .activatable:hover {
background-color: unset;
}
.group-list > row {
margin-top: 12px;
border-bottom: unset;
}
.vm-list {
margin-top: 25px;
margin-bottom: 25px;
@@ -32,8 +56,3 @@ avatar {
searchbar {
margin-bottom: 25px;
}
/* TODO: Disable shadow for empty lists */
/* list:empty {
box-shadow: none;
} */

View File

@@ -1,7 +0,0 @@
constants = {
"APPID": "clan.lol",
"APPVERSION": "2.0.0-beta",
"APPNAME": "clan-vm-manager",
"SYSNAME": "clan-manager",
"RESOURCEID": "/git.clan.lol/clan/clan-core",
}

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python3
import logging
from typing import Literal
import gi
@@ -11,10 +12,12 @@ from gi.repository import Adw
Severity = Literal["Error"] | Literal["Warning"] | Literal["Info"] | str
log = logging.getLogger(__name__)
def show_error_dialog(error: ClanError, severity: Severity | None = "Error") -> None:
message = str(error)
dialog = Adw.MessageDialog(parent=None, heading=severity, body=message)
print("error:", message)
log.error(message)
dialog.add_response("ok", "ok")
dialog.choose()

View File

@@ -1,3 +1,4 @@
import logging
import os
import signal
import sys
@@ -6,7 +7,6 @@ from pathlib import Path
from typing import Any
import gi
from clan_cli.errors import ClanError
gi.require_version("GdkPixbuf", "2.0")
@@ -14,6 +14,8 @@ import dataclasses
import multiprocessing as mp
from collections.abc import Callable
log = logging.getLogger(__name__)
# Kill the new process and all its children by sending a SIGTERM signal to the process group
def _kill_group(proc: mp.Process) -> None:
@@ -21,7 +23,7 @@ def _kill_group(proc: mp.Process) -> None:
if proc.is_alive() and pid:
os.killpg(pid, signal.SIGTERM)
else:
print(f"Process {proc.name} with pid {pid} is already dead", file=sys.stderr)
log.warning(f"Process '{proc.name}' with pid '{pid}' is already dead")
@dataclasses.dataclass(frozen=True)
@@ -99,7 +101,7 @@ def _init_proc(
def spawn(
*,
log_dir: Path,
out_file: Path,
on_except: Callable[[Exception, mp.process.BaseProcess], None] | None,
func: Callable,
**kwargs: Any,
@@ -108,13 +110,8 @@ def spawn(
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method(method="forkserver")
if not log_dir.is_dir():
raise ClanError(f"Log path {log_dir} is not a directory")
log_dir.mkdir(parents=True, exist_ok=True)
# Set names
proc_name = f"MPExec:{func.__name__}"
out_file = log_dir / "out.log"
# Start the process
proc = mp.Process(
@@ -127,7 +124,7 @@ def spawn(
# Print some information
cmd = f"tail -f {out_file}"
print(f"Connect to stdout with: {cmd}")
log.info(f"Connect to stdout with: {cmd}")
# Return the process
mp_proc = MPProcess(name=proc_name, proc=proc, out_file=out_file)

View File

@@ -1,8 +1,6 @@
from dataclasses import dataclass
from enum import StrEnum
import gi
from clan_cli.clan_uri import ClanURI
gi.require_version("Gtk", "4.0")
@@ -10,9 +8,3 @@ gi.require_version("Gtk", "4.0")
@dataclass
class ClanConfig:
initial_view: str
url: ClanURI | None
class VMStatus(StrEnum):
RUNNING = "Running"
STOPPED = "Stopped"

View File

@@ -1,27 +1,45 @@
import logging
import threading
from collections.abc import Callable
from typing import Any
from typing import Any, ClassVar
import gi
from clan_cli import ClanError
from clan_cli.clan_uri import ClanURI
from clan_cli.history.add import HistoryEntry, add_history
from clan_cli.history.add import add_history
from clan_vm_manager.errors.show_error import show_error_dialog
from clan_vm_manager.models.use_vms import Clans
gi.require_version("Gtk", "4.0")
gi.require_version("Adw", "1")
from gi.repository import Gio, GObject
from gi.repository import Gio, GLib, GObject
log = logging.getLogger(__name__)
class JoinValue(GObject.Object):
# TODO: custom signals for async join
# __gsignals__: ClassVar = {}
__gsignals__: ClassVar = {
"join_finished": (GObject.SignalFlags.RUN_FIRST, None, [GObject.Object]),
}
url: ClanURI
def __init__(self, url: ClanURI) -> None:
def __init__(
self, url: ClanURI, on_join: Callable[["JoinValue", Any], None]
) -> None:
super().__init__()
self.url = url
self.connect("join_finished", on_join)
def __join(self) -> None:
add_history(self.url, all_machines=False)
GLib.idle_add(lambda: self.emit("join_finished", self))
def join(self) -> None:
threading.Thread(target=self.__join).start()
class Join:
@@ -45,26 +63,31 @@ class Join:
return cls._instance
def push(self, url: ClanURI) -> None:
def push(self, url: ClanURI, on_join: Callable[[JoinValue], None]) -> None:
"""
Add a join request.
This method can add multiple join requests if called subsequently for each request.
"""
self.list_store.append(JoinValue(url))
def join(self, item: JoinValue, cb: Callable[[list[HistoryEntry]], None]) -> None:
# TODO: remove the item that was accepted join from this list
# and call the success function. (The caller is responsible for handling the success)
try:
print(f"trying to join: {item.url}")
if url.get_id() in [item.url.get_id() for item in self.list_store]:
log.info(f"Join request already exists: {url}")
return
history = add_history(item.url)
cb(history)
def after_join(item: JoinValue, _: Any) -> None:
self.discard(item)
Clans.use().refresh()
# VMS.use().refresh()
print("Refreshed list after join")
on_join(item)
self.list_store.append(JoinValue(url, after_join))
def join(self, item: JoinValue) -> None:
try:
log.info(f"trying to join: {item.url}")
item.join()
except ClanError as e:
show_error_dialog(e)
pass
def discard(self, item: JoinValue) -> None:
(has, idx) = self.list_store.find(item)

View File

@@ -1,17 +1,19 @@
import os
import tempfile
import weakref
from datetime import datetime
from pathlib import Path
from typing import Any, ClassVar
from typing import IO, Any, ClassVar
import gi
from clan_cli import vms
from clan_cli.clan_uri import ClanScheme, ClanURI
from clan_cli.errors import ClanError
from clan_cli.history.add import HistoryEntry
from clan_cli.history.list import list_history
from clan_vm_manager import assets
from clan_vm_manager.errors.show_error import show_error_dialog
from clan_vm_manager.models.interfaces import VMStatus
from .executor import MPProcess, spawn
@@ -21,75 +23,212 @@ import multiprocessing as mp
import threading
from clan_cli.machines.machines import Machine
from gi.repository import Gio, GLib, GObject
from gi.repository import Gio, GLib, GObject, Gtk
log = logging.getLogger(__name__)
class ClanGroup(GObject.Object):
def __init__(self, url: str | Path, vms: list["VM"]) -> None:
super().__init__()
self.url = url
self.vms = vms
self.clan_name = vms[0].data.flake.clan_name
self.list_store = Gio.ListStore.new(VM)
for vm in vms:
self.list_store.append(vm)
def init_grp_store(list_store: Gio.ListStore) -> None:
groups: dict[str | Path, list["VM"]] = {}
for vm in get_saved_vms():
ll = groups.get(vm.data.flake.flake_url, [])
ll.append(vm)
groups[vm.data.flake.flake_url] = ll
for url, vm_list in groups.items():
grp = ClanGroup(url, vm_list)
list_store.append(grp)
class Clans:
list_store: Gio.ListStore
_instance: "None | ClanGroup" = None
# Make sure the VMS class is used as a singleton
def __init__(self) -> None:
raise RuntimeError("Call use() instead")
@classmethod
def use(cls: Any) -> "ClanGroup":
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls.list_store = Gio.ListStore.new(ClanGroup)
init_grp_store(cls.list_store)
return cls._instance
def filter_by_name(self, text: str) -> None:
if text:
filtered_list = self.list_store
filtered_list.remove_all()
groups: dict[str | Path, list["VM"]] = {}
for vm in get_saved_vms():
ll = groups.get(vm.data.flake.flake_url, [])
print(text, vm.data.flake.vm.machine_name)
if text.lower() in vm.data.flake.vm.machine_name.lower():
ll.append(vm)
groups[vm.data.flake.flake_url] = ll
for url, vm_list in groups.items():
grp = ClanGroup(url, vm_list)
filtered_list.append(grp)
else:
self.refresh()
def refresh(self) -> None:
self.list_store.remove_all()
init_grp_store(self.list_store)
class VM(GObject.Object):
# Define a custom signal with the name "vm_stopped" and a string argument for the message
__gsignals__: ClassVar = {
"vm_status_changed": (GObject.SignalFlags.RUN_FIRST, None, [GObject.Object]),
"build_vm": (GObject.SignalFlags.RUN_FIRST, None, [GObject.Object, bool]),
}
def __init__(
self,
icon: Path,
status: VMStatus,
data: HistoryEntry,
) -> None:
super().__init__()
self.KILL_TIMEOUT = 6 # seconds
self.data = data
self.process = MPProcess("dummy", mp.Process(), Path("./dummy"))
self._watcher_id: int = 0
self.status = status
self._last_liveness: bool = False
self._stop_watcher_id: int = 0
self._stop_timer_init: datetime | None = None
self._logs_id: int = 0
self._log_file: IO[str] | None = None
self.progress_bar: Gtk.ProgressBar = Gtk.ProgressBar()
self.progress_bar.hide()
self.progress_bar.set_hexpand(True) # Horizontally expand
self.prog_bar_id: int = 0
self.log_dir = tempfile.TemporaryDirectory(
prefix="clan_vm-", suffix=f"-{self.data.flake.flake_attr}"
)
self._finalizer = weakref.finalize(self, self.stop)
self._finalizer = weakref.finalize(self, self.kill)
self.connect("build_vm", self.build_vm)
uri = ClanURI.from_str(
url=self.data.flake.flake_url, flake_attr=self.data.flake.flake_attr
)
match uri.scheme:
case ClanScheme.LOCAL.value(path):
self.machine = Machine(
name=self.data.flake.flake_attr,
flake=path, # type: ignore
)
case ClanScheme.REMOTE.value(url):
self.machine = Machine(
name=self.data.flake.flake_attr,
flake=url, # type: ignore
)
def _pulse_progress_bar(self) -> bool:
self.progress_bar.pulse()
return GLib.SOURCE_CONTINUE
def build_vm(self, vm: "VM", _vm: "VM", building: bool) -> None:
if building:
log.info("Building VM")
self.progress_bar.show()
self.prog_bar_id = GLib.timeout_add(100, self._pulse_progress_bar)
if self.prog_bar_id == 0:
raise ClanError("Couldn't spawn a progess bar task")
else:
self.progress_bar.hide()
if not GLib.Source.remove(self.prog_bar_id):
log.error("Failed to remove progress bar task")
log.info("VM built")
def __start(self) -> None:
if self.is_running():
log.warn("VM is already running")
return
machine = Machine(
name=self.data.flake.flake_attr,
flake=Path(self.data.flake.flake_url),
)
vm = vms.run.inspect_vm(machine)
log.info(f"Starting VM {self.get_id()}")
vm = vms.run.inspect_vm(self.machine)
# GLib.idle_add(self.emit, "build_vm", self, True)
# self.process = spawn(
# on_except=None,
# log_dir=Path(str(self.log_dir.name)),
# func=vms.run.build_vm,
# machine=self.machine,
# vm=vm,
# )
# self.process.proc.join()
# GLib.idle_add(self.emit, "build_vm", self, False)
# if self.process.proc.exitcode != 0:
# log.error(f"Failed to build VM {self.get_id()}")
# return
self.process = spawn(
on_except=None,
log_dir=Path(str(self.log_dir.name)),
out_file=Path(str(self.log_dir.name)) / "vm.log",
func=vms.run.run_vm,
vm=vm,
)
log.debug(f"Started VM {self.get_id()}")
GLib.idle_add(self.emit, "vm_status_changed", self)
log.debug(f"Starting logs watcher on file: {self.process.out_file}")
self._logs_id = GLib.timeout_add(50, self._get_logs_task)
if self._logs_id == 0:
raise ClanError("Failed to add logs watcher")
log.debug(f"Starting VM watcher for: {self.machine.name}")
self._watcher_id = GLib.timeout_add(50, self._vm_watcher_task)
if self._watcher_id == 0:
raise ClanError("Failed to add watcher")
def start(self) -> None:
if self.is_running():
log.warn("VM is already running")
return
threading.Thread(target=self.__start).start()
if self._watcher_id == 0:
# Every 50ms check if the VM is still running
self._watcher_id = GLib.timeout_add(50, self._vm_watcher_task)
if self._watcher_id == 0:
log.error("Failed to add watcher")
raise ClanError("Failed to add watcher")
def _vm_watcher_task(self) -> bool:
if self.is_running() != self._last_liveness:
if not self.is_running():
self.emit("vm_status_changed", self)
prev_liveness = self._last_liveness
self._last_liveness = self.is_running()
log.debug("Removing VM watcher")
return GLib.SOURCE_REMOVE
# If the VM was running and now it is not, remove the watcher
if prev_liveness and not self.is_running():
return GLib.SOURCE_CONTINUE
def _get_logs_task(self) -> bool:
if not self.process.out_file.exists():
return GLib.SOURCE_CONTINUE
if not self._log_file:
try:
self._log_file = open(self.process.out_file)
except Exception as ex:
log.exception(ex)
self._log_file = None
return GLib.SOURCE_REMOVE
line = os.read(self._log_file.fileno(), 4096)
if len(line) != 0:
print(line.decode("utf-8"), end="", flush=True)
if not self.is_running():
log.debug("Removing logs watcher")
self._log_file = None
return GLib.SOURCE_REMOVE
return GLib.SOURCE_CONTINUE
def is_running(self) -> bool:
@@ -98,43 +237,62 @@ class VM(GObject.Object):
def get_id(self) -> str:
return f"{self.data.flake.flake_url}#{self.data.flake.flake_attr}"
def stop(self) -> None:
log.info("Stopping VM")
if not self.is_running():
log.error("VM already stopped")
return
def __shutdown_watchdog(self) -> None:
if self.is_running():
assert self._stop_timer_init is not None
diff = datetime.now() - self._stop_timer_init
if diff.seconds > self.KILL_TIMEOUT:
log.error(f"VM {self.get_id()} has not stopped. Killing it")
self.process.kill_group()
return GLib.SOURCE_CONTINUE
else:
log.info(f"VM {self.get_id()} has stopped")
return GLib.SOURCE_REMOVE
def __stop(self) -> None:
log.info(f"Stopping VM {self.get_id()}")
try:
with self.machine.vm.qmp_ctx() as qmp:
qmp.command("system_powerdown")
except ClanError as e:
log.debug(e)
self._stop_timer_init = datetime.now()
self._stop_watcher_id = GLib.timeout_add(100, self.__shutdown_watchdog)
if self._stop_watcher_id == 0:
raise ClanError("Failed to add stop watcher")
def shutdown(self) -> None:
if not self.is_running():
return
log.info(f"Stopping VM {self.get_id()}")
threading.Thread(target=self.__stop).start()
def kill(self) -> None:
if not self.is_running():
log.warning(f"Tried to kill VM {self.get_id()} is not running")
return
log.info(f"Killing VM {self.get_id()} now")
self.process.kill_group()
def read_log(self) -> str:
def read_whole_log(self) -> str:
if not self.process.out_file.exists():
log.error(f"Log file {self.process.out_file} does not exist")
return ""
return self.process.out_file.read_text()
class VMS:
"""
This is a singleton.
It is initialized with the first call of use()
Usage:
VMS.use().get_running_vms()
VMS.use() can also be called before the data is needed. e.g. to eliminate/reduce waiting time.
"""
class VMs:
list_store: Gio.ListStore
_instance: "None | VMS" = None
_instance: "None | VMs" = None
# Make sure the VMS class is used as a singleton
def __init__(self) -> None:
raise RuntimeError("Call use() instead")
@classmethod
def use(cls: Any) -> "VMS":
def use(cls: Any) -> "VMs":
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls.list_store = Gio.ListStore.new(VM)
@@ -148,19 +306,29 @@ class VMS:
filtered_list = self.list_store
filtered_list.remove_all()
for vm in get_saved_vms():
if text.lower() in vm.data.flake.clan_name.lower():
if text.lower() in vm.data.flake.vm.machine_name.lower():
filtered_list.append(vm)
else:
self.refresh()
def get_by_id(self, ident: str) -> None | VM:
for vm in self.list_store:
if ident == vm.get_id():
return vm
return None
def get_running_vms(self) -> list[VM]:
return list(filter(lambda vm: vm.is_running(), self.list_store))
def kill_all(self) -> None:
log.debug(f"Running vms: {self.get_running_vms()}")
for vm in self.get_running_vms():
vm.stop()
vm.kill()
def refresh(self) -> None:
log.error("NEVER FUCKING DO THIS")
return
self.list_store.remove_all()
for vm in get_saved_vms():
self.list_store.append(vm)
@@ -168,7 +336,7 @@ class VMS:
def get_saved_vms() -> list[VM]:
vm_list = []
log.info("=====CREATING NEW VM OBJ====")
try:
# Execute `clan flakes add <path>` to democlan for this to work
for entry in list_history():
@@ -179,7 +347,6 @@ def get_saved_vms() -> list[VM]:
base = VM(
icon=Path(icon),
status=VMStatus.STOPPED,
data=entry,
)
vm_list.append(base)

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,22 @@
import logging
from collections.abc import Callable
from functools import partial
from typing import Any
import gi
from clan_cli.history.add import HistoryEntry
from clan_cli import ClanError, history, machines
from clan_cli.clan_uri import ClanURI
from clan_vm_manager.models.interfaces import ClanConfig
from clan_vm_manager.models.use_join import Join, JoinValue
from clan_vm_manager.models.use_views import Views
gi.require_version("Adw", "1")
from gi.repository import Adw, Gdk, Gio, GObject, Gtk
from gi.repository import Adw, Gdk, Gio, GLib, GObject, Gtk
from clan_vm_manager.models.use_vms import VM, VMS
from clan_vm_manager.models.use_vms import VM, ClanGroup, Clans
log = logging.getLogger(__name__)
def create_boxed_list(
@@ -39,20 +45,26 @@ class ClanList(Gtk.Box):
# ------------------------#
"""
def __init__(self) -> None:
def __init__(self, config: ClanConfig) -> None:
super().__init__(orientation=Gtk.Orientation.VERTICAL)
vms = VMS.use()
self.app = Gio.Application.get_default()
self.app.connect("join_request", self.on_join_request)
groups = Clans.use()
join = Join.use()
self.log_label: Gtk.Label = Gtk.Label()
self.__init_machines = history.add.list_history()
self.join_boxed_list = create_boxed_list(
model=join.list_store, render_row=self.render_join_row
)
self.join_boxed_list.add_css_class("join-list")
self.vm_boxed_list = create_boxed_list(
model=vms.list_store, render_row=self.render_vm_row
self.group_list = create_boxed_list(
model=groups.list_store, render_row=self.render_group_row
)
self.vm_boxed_list.add_css_class("vm-list")
self.group_list.add_css_class("group-list")
search_bar = Gtk.SearchBar()
# This widget will typically be the top-level window
@@ -65,65 +77,158 @@ class ClanList(Gtk.Box):
self.append(search_bar)
self.append(self.join_boxed_list)
self.append(self.vm_boxed_list)
self.append(self.group_list)
def render_group_row(self, boxed_list: Gtk.ListBox, group: ClanGroup) -> Gtk.Widget:
# if boxed_list.has_css_class("no-shadow"):
# boxed_list.remove_css_class("no-shadow")
grp = Adw.PreferencesGroup()
grp.set_title(group.clan_name)
grp.set_description(group.url)
add_action = Gio.SimpleAction.new("add", GLib.VariantType.new("s"))
add_action.connect("activate", self.on_add)
app = Gio.Application.get_default()
app.add_action(add_action)
menu_model = Gio.Menu()
for vm in machines.list.list_machines(flake_url=group.url):
if vm not in [item.data.flake.flake_attr for item in group.list_store]:
menu_model.append(vm, f"app.add::{vm}")
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
box.set_valign(Gtk.Align.CENTER)
add_button = Gtk.MenuButton()
add_button.set_has_frame(False)
add_button.set_menu_model(menu_model)
add_button.set_label("Add machine")
box.append(add_button)
grp.set_header_suffix(box)
vm_list = create_boxed_list(
model=group.list_store, render_row=self.render_vm_row
)
grp.add(vm_list)
return grp
def on_add(self, action: Any, parameter: Any) -> None:
target = parameter.get_string()
print("Adding new machine", target)
def on_search_changed(self, entry: Gtk.SearchEntry) -> None:
VMS.use().filter_by_name(entry.get_text())
Clans.use().filter_by_name(entry.get_text())
# Disable the shadow if the list is empty
if not VMS.use().list_store.get_n_items():
self.vm_boxed_list.add_css_class("no-shadow")
if not self.app.vms.list_store.get_n_items():
self.group_list.add_css_class("no-shadow")
def render_vm_row(self, boxed_list: Gtk.ListBox, vm: VM) -> Gtk.Widget:
# Remove no-shadow class if attached
if boxed_list.has_css_class("no-shadow"):
boxed_list.remove_css_class("no-shadow")
flake = vm.data.flake
row = Adw.ActionRow()
# Title
row.set_title(flake.clan_name)
row.set_title_lines(1)
row.set_title_selectable(True)
# Subtitle
row.set_subtitle(vm.get_id())
row.set_subtitle_lines(1)
# # Avatar
# ====== Display Avatar ======
avatar = Adw.Avatar()
avatar.set_custom_image(Gdk.Texture.new_from_filename(flake.icon))
avatar.set_text(flake.clan_name + " " + flake.flake_attr)
machine_icon = flake.vm.machine_icon
if machine_icon:
avatar.set_custom_image(Gdk.Texture.new_from_filename(str(machine_icon)))
elif flake.icon:
avatar.set_custom_image(Gdk.Texture.new_from_filename(str(flake.icon)))
else:
avatar.set_text(flake.clan_name + " " + flake.flake_attr)
avatar.set_show_initials(True)
avatar.set_size(50)
row.add_prefix(avatar)
# Switch
switch = Gtk.Switch()
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
# ====== Display Name And Url =====
row.set_title(flake.flake_attr)
row.set_title_lines(1)
row.set_title_selectable(True)
if flake.vm.machine_description:
row.set_subtitle(flake.vm.machine_description)
else:
row.set_subtitle(flake.clan_name)
row.set_subtitle_lines(1)
# ==== Display build progress bar ====
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
box.set_valign(Gtk.Align.CENTER)
box.append(switch)
box.append(vm.progress_bar)
box.set_homogeneous(False)
row.add_suffix(box) # This allows children to have different sizes
# ==== Action buttons ====
switch = Gtk.Switch()
switch_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
switch_box.set_valign(Gtk.Align.CENTER)
switch_box.append(switch)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
box.set_valign(Gtk.Align.CENTER)
open_action = Gio.SimpleAction.new("edit", GLib.VariantType.new("s"))
open_action.connect("activate", self.on_edit)
app = Gio.Application.get_default()
app.add_action(open_action)
menu_model = Gio.Menu()
menu_model.append("Edit", f"app.edit::{vm.get_id()}")
pref_button = Gtk.MenuButton()
pref_button.set_icon_name("open-menu-symbolic")
pref_button.set_menu_model(menu_model)
box.append(switch_box)
box.append(pref_button)
switch.connect("notify::active", partial(self.on_row_toggle, vm))
vm.connect("vm_status_changed", partial(self.vm_status_changed, switch))
# suffix.append(box)
row.add_suffix(box)
return row
def on_edit(self, action: Any, parameter: Any) -> None:
target = parameter.get_string()
vm = self.app.vms.get_by_id(target)
if not vm:
raise ClanError("Something went wrong. Please restart the app.")
print("Editing settings for machine", vm)
def render_join_row(self, boxed_list: Gtk.ListBox, item: JoinValue) -> Gtk.Widget:
if boxed_list.has_css_class("no-shadow"):
boxed_list.remove_css_class("no-shadow")
row = Adw.ActionRow()
row.set_title(item.url.get_internal())
row.set_title(item.url.params.flake_attr)
row.set_subtitle(item.url.get_internal())
row.add_css_class("trust")
# TODO: figure out how to detect that
if True:
row.set_subtitle("Clan already exists. Joining again will update it")
exist = self.app.vms.use().get_by_id(item.url.get_id())
if exist:
sub = row.get_subtitle()
row.set_subtitle(
sub + "\nClan already exists. Joining again will update it"
)
avatar = Adw.Avatar()
avatar.set_text(str(item.url))
avatar.set_text(str(item.url.params.flake_attr))
avatar.set_show_initials(True)
avatar.set_size(50)
row.add_prefix(avatar)
@@ -131,6 +236,7 @@ class ClanList(Gtk.Box):
cancel_button = Gtk.Button(label="Cancel")
cancel_button.add_css_class("error")
cancel_button.connect("clicked", partial(self.on_discard_clicked, item))
self.cancel_button = cancel_button
trust_button = Gtk.Button(label="Join")
trust_button.add_css_class("success")
@@ -148,27 +254,31 @@ class ClanList(Gtk.Box):
def show_error_dialog(self, error: str) -> None:
p = Views.use().main_window
# app = Gio.Application.get_default()
# p = Gtk.Application.get_active_window(app)
dialog = Adw.MessageDialog(heading="Error")
dialog.add_response("ok", "ok")
dialog.set_body(error)
dialog.set_transient_for(p) # set the parent window of the dialog
dialog.choose()
def on_join_request(self, widget: Any, url: str) -> None:
log.debug("Join request: %s", url)
clan_uri = ClanURI.from_str(url)
Join.use().push(clan_uri, self.after_join)
def after_join(self, item: JoinValue) -> None:
# If the join request list is empty disable the shadow artefact
if not Join.use().list_store.get_n_items():
self.join_boxed_list.add_css_class("no-shadow")
print("after join in list")
def on_trust_clicked(self, item: JoinValue, widget: Gtk.Widget) -> None:
def on_join(_history: list[HistoryEntry]) -> None:
VMS.use().refresh()
widget.set_sensitive(False)
self.cancel_button.set_sensitive(False)
# TODO(@hsjobeki): Confirm and edit details
# Views.use().view.set_visible_child_name("details")
Join.use().join(item, cb=on_join)
# If the join request list is empty disable the shadow artefact
if not Join.use().list_store.get_n_items():
self.join_boxed_list.add_css_class("no-shadow")
Join.use().join(item)
def on_discard_clicked(self, item: JoinValue, widget: Gtk.Widget) -> None:
Join.use().discard(item)
@@ -176,21 +286,17 @@ class ClanList(Gtk.Box):
self.join_boxed_list.add_css_class("no-shadow")
def on_row_toggle(self, vm: VM, row: Adw.SwitchRow, state: bool) -> None:
print("Toggled", vm.data.flake.flake_attr, "active:", row.get_active())
if row.get_active():
row.set_state(False)
vm.start()
if not row.get_active():
row.set_state(True)
vm.stop()
vm.shutdown()
def vm_status_changed(self, switch: Gtk.Switch, vm: VM, _vm: VM) -> None:
switch.set_active(vm.is_running())
switch.set_state(vm.is_running())
if not vm.is_running() and vm.process.proc.exitcode != 0:
print("VM exited with error. Exitcode:", vm.process.proc.exitcode)
# print(vm.read_log())
# self.show_error_dialog(vm.read_log())
exitc = vm.process.proc.exitcode
if not vm.is_running() and exitc != 0:
log.error(f"VM exited with error. Exitcode: {exitc}")

View File

@@ -7,7 +7,7 @@ from clan_vm_manager.views.list import ClanList
gi.require_version("Adw", "1")
from gi.repository import Adw
from gi.repository import Adw, Gtk
class MainWindow(Adw.ApplicationWindow):
@@ -26,7 +26,12 @@ class MainWindow(Adw.ApplicationWindow):
stack_view = Views.use().view
Views.use().set_main_window(self)
stack_view.add_named(ClanList(), "list")
scroll = Gtk.ScrolledWindow()
scroll.set_propagate_natural_height(True)
scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scroll.set_child(ClanList(config))
stack_view.add_named(scroll, "list")
stack_view.add_named(Details(), "details")
stack_view.set_visible_child_name(config.initial_view)

View File

@@ -57,9 +57,10 @@ python3.pkgs.buildPythonApplication {
'';
desktopItems = [
(makeDesktopItem {
name = "clan-vm-manager";
exec = "clan-vm-manager join %u";
desktopName = "CLan VM Manager";
name = "lol.clan.vm.manager";
exec = "clan-vm-manager %u";
icon = ./clan_vm_manager/assets/clan_white.png;
desktopName = "cLAN Manager";
startupWMClass = "clan";
mimeTypes = [ "x-scheme-handler/clan" ];
})

52
pkgs/clan-vm-manager/demo.sh Executable file
View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -e -o pipefail
check_git_tag() {
local repo_path="$1"
local target_tag="$2"
# Change directory to the specified Git repository
pushd "$repo_path" > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Error: Failed to change directory to $repo_path"
return 1
fi
# Get the current Git tag
local current_tag=$(git describe --tags --exact-match 2>/dev/null)
# Restore the original directory
popd > /dev/null 2>&1
# Check if the current tag is 2.0
if [ "$current_tag" = "$target_tag" ]; then
echo "Current Git tag in $repo_path is $target_tag"
else
echo "Error: Current Git tag in $repo_path is not $target_tag"
exit 1
fi
}
if [ -z "$1" ]; then
echo "Usage: $0 <democlan>"
exit 1
fi
democlan="$1"
check_git_tag "$democlan" "v2.2"
check_git_tag "." "v2.2"
rm -rf ~/.config/clan
clan history add "clan://$democlan#localsend-wayland1"
clear
cat << EOF
Open up this link in a browser:
"clan://$democlan#localsend-wayland2"
EOF

View File

@@ -1,12 +1,17 @@
{ clan-vm-manager, libadwaita, clan-cli, mkShell, ruff, desktop-file-utils, xdg-utils, mypy, python3Packages }:
{ lib, stdenv, clan-vm-manager, gtk4, libadwaita, clan-cli, mkShell, ruff, desktop-file-utils, xdg-utils, mypy, python3Packages }:
mkShell {
inherit (clan-vm-manager) propagatedBuildInputs buildInputs;
linuxOnlyPackages = lib.optionals stdenv.isLinux [
xdg-utils
];
nativeBuildInputs = [
ruff
desktop-file-utils
xdg-utils
mypy
python3Packages.ipdb
gtk4.dev
libadwaita.devdoc # has the demo called 'adwaita-1-demo'
] ++ clan-vm-manager.nativeBuildInputs;
@@ -18,19 +23,22 @@ mkShell {
# prepend clan-cli for development
export PYTHONPATH=../clan-cli:$PYTHONPATH
ln -snf ${clan-vm-manager} result
if ! command -v xdg-mime &> /dev/null; then
echo "Warning: 'xdg-mime' is not available. The desktop file cannot be installed."
fi
# install desktop file
set -eou pipefail
DESKTOP_DST=~/.local/share/applications/clan-vm-manager.desktop
DESKTOP_SRC=${clan-vm-manager}/share/applications/clan-vm-manager.desktop
UI_BIN=${clan-vm-manager}/bin/clan-vm-manager
DESKTOP_FILE_NAME=lol.clan.vm.manager.desktop
DESKTOP_DST=~/.local/share/applications/$DESKTOP_FILE_NAME
DESKTOP_SRC=${clan-vm-manager}/share/applications/$DESKTOP_FILE_NAME
UI_BIN="${clan-vm-manager}/bin/clan-vm-manager"
cp -f $DESKTOP_SRC $DESKTOP_DST
sleep 2
sed -i "s|Exec=.*clan-vm-manager|Exec=$UI_BIN|" $DESKTOP_DST
xdg-mime default clan-vm-manager.desktop x-scheme-handler/clan
xdg-mime default $DESKTOP_FILE_NAME x-scheme-handler/clan
echo "==== Validating desktop file installation ===="
set -x
desktop-file-validate $DESKTOP_DST

View File

@@ -28,6 +28,12 @@
hash = "sha256-dxz4AmeJAweffyPCayvykworQNntHtHeq6PXMXWsM5k=";
};
});
# halalify zerotierone
zerotierone = pkgs.zerotierone.overrideAttrs (_old: {
meta = _old.meta // {
license = lib.licenses.apsl20;
};
});
};
};
}