clan-vm-manager: Restore to known good version
This commit is contained in:
132
pkgs/clan-vm-manager/clan_vm_manager/components/executor.py
Normal file
132
pkgs/clan-vm-manager/clan_vm_manager/components/executor.py
Normal file
@@ -0,0 +1,132 @@
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import gi
|
||||
|
||||
gi.require_version("GdkPixbuf", "2.0")
|
||||
|
||||
import dataclasses
|
||||
import multiprocessing as mp
|
||||
from collections.abc import Callable
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Kill the new process and all its children by sending a SIGTERM signal to the process group
|
||||
def _kill_group(proc: mp.Process) -> None:
|
||||
pid = proc.pid
|
||||
if proc.is_alive() and pid:
|
||||
os.killpg(pid, signal.SIGTERM)
|
||||
else:
|
||||
log.warning(f"Process '{proc.name}' with pid '{pid}' is already dead")
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class MPProcess:
|
||||
name: str
|
||||
proc: mp.Process
|
||||
out_file: Path
|
||||
|
||||
# Kill the new process and all its children by sending a SIGTERM signal to the process group
|
||||
def kill_group(self) -> None:
|
||||
_kill_group(proc=self.proc)
|
||||
|
||||
|
||||
def _set_proc_name(name: str) -> None:
|
||||
if sys.platform != "linux":
|
||||
return
|
||||
import ctypes
|
||||
|
||||
# Define the prctl function with the appropriate arguments and return type
|
||||
libc = ctypes.CDLL("libc.so.6")
|
||||
prctl = libc.prctl
|
||||
prctl.argtypes = [
|
||||
ctypes.c_int,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_ulong,
|
||||
ctypes.c_ulong,
|
||||
ctypes.c_ulong,
|
||||
]
|
||||
prctl.restype = ctypes.c_int
|
||||
|
||||
# Set the process name to "my_process"
|
||||
prctl(15, name.encode(), 0, 0, 0)
|
||||
|
||||
|
||||
def _init_proc(
|
||||
func: Callable,
|
||||
out_file: Path,
|
||||
proc_name: str,
|
||||
on_except: Callable[[Exception, mp.process.BaseProcess], None] | None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
# Create a new process group
|
||||
os.setsid()
|
||||
|
||||
# Open stdout and stderr
|
||||
with open(out_file, "w") as out_fd:
|
||||
os.dup2(out_fd.fileno(), sys.stdout.fileno())
|
||||
os.dup2(out_fd.fileno(), sys.stderr.fileno())
|
||||
|
||||
# Print some information
|
||||
pid = os.getpid()
|
||||
gpid = os.getpgid(pid=pid)
|
||||
|
||||
# Set the process name
|
||||
_set_proc_name(proc_name)
|
||||
|
||||
# Close stdin
|
||||
sys.stdin.close()
|
||||
|
||||
linebreak = "=" * 5
|
||||
# Execute the main function
|
||||
print(linebreak + f" {func.__name__}:{pid} " + linebreak, file=sys.stderr)
|
||||
try:
|
||||
func(**kwargs)
|
||||
except Exception as ex:
|
||||
traceback.print_exc()
|
||||
if on_except is not None:
|
||||
on_except(ex, mp.current_process())
|
||||
|
||||
# Kill the new process and all its children by sending a SIGTERM signal to the process group
|
||||
pid = os.getpid()
|
||||
gpid = os.getpgid(pid=pid)
|
||||
print(f"Killing process group pid={pid} gpid={gpid}", file=sys.stderr)
|
||||
os.killpg(gpid, signal.SIGTERM)
|
||||
sys.exit(1)
|
||||
# Don't use a finally block here, because we want the exitcode to be set to
|
||||
# 0 if the function returns normally
|
||||
|
||||
|
||||
def spawn(
|
||||
*,
|
||||
out_file: Path,
|
||||
on_except: Callable[[Exception, mp.process.BaseProcess], None] | None,
|
||||
func: Callable,
|
||||
**kwargs: Any,
|
||||
) -> MPProcess:
|
||||
# Decouple the process from the parent
|
||||
if mp.get_start_method(allow_none=True) is None:
|
||||
mp.set_start_method(method="forkserver")
|
||||
|
||||
# Set names
|
||||
proc_name = f"MPExec:{func.__name__}"
|
||||
|
||||
# Start the process
|
||||
proc = mp.Process(
|
||||
target=_init_proc,
|
||||
args=(func, out_file, proc_name, on_except),
|
||||
name=proc_name,
|
||||
kwargs=kwargs,
|
||||
)
|
||||
proc.start()
|
||||
|
||||
# Return the process
|
||||
mp_proc = MPProcess(name=proc_name, proc=proc, out_file=out_file)
|
||||
|
||||
return mp_proc
|
||||
220
pkgs/clan-vm-manager/clan_vm_manager/components/gkvstore.py
Normal file
220
pkgs/clan-vm-manager/clan_vm_manager/components/gkvstore.py
Normal file
@@ -0,0 +1,220 @@
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
import gi
|
||||
|
||||
gi.require_version("Gio", "2.0")
|
||||
from gi.repository import Gio, GObject
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Define type variables for key and value types
|
||||
K = TypeVar("K") # Key type
|
||||
V = TypeVar(
|
||||
"V", bound=GObject.Object
|
||||
) # Value type, bound to GObject.GObject or its subclasses
|
||||
|
||||
|
||||
class GKVStore(GObject.GObject, Gio.ListModel, Generic[K, V]):
|
||||
"""
|
||||
A simple key-value store that implements the Gio.ListModel interface, with generic types for keys and values.
|
||||
Only use self[key] and del self[key] for accessing the items for better performance.
|
||||
This class could be optimized by having the objects remember their position in the list.
|
||||
"""
|
||||
|
||||
def __init__(self, gtype: type[V], key_gen: Callable[[V], K]) -> None:
|
||||
super().__init__()
|
||||
self.gtype = gtype
|
||||
self.key_gen = key_gen
|
||||
# From Python 3.7 onwards dictionaries are ordered by default
|
||||
self._items: dict[K, V] = dict()
|
||||
|
||||
##################################
|
||||
# #
|
||||
# Gio.ListStore Interface #
|
||||
# #
|
||||
##################################
|
||||
@classmethod
|
||||
def new(cls: Any, gtype: type[V]) -> "GKVStore":
|
||||
return cls.__new__(cls, gtype)
|
||||
|
||||
def append(self, item: V) -> None:
|
||||
key = self.key_gen(item)
|
||||
self[key] = item
|
||||
|
||||
def find(self, item: V) -> tuple[bool, int]:
|
||||
log.warning("Finding is O(n) in GKVStore. Better use indexing")
|
||||
for i, v in enumerate(self.values()):
|
||||
if v == item:
|
||||
return True, i
|
||||
return False, -1
|
||||
|
||||
def find_with_equal_func(
|
||||
self, item: V, equal_func: Callable[[V, V], bool]
|
||||
) -> tuple[bool, int]:
|
||||
log.warning("Finding is O(n) in GKVStore. Better use indexing")
|
||||
for i, v in enumerate(self.values()):
|
||||
if equal_func(v, item):
|
||||
return True, i
|
||||
return False, -1
|
||||
|
||||
def find_with_equal_func_full(
|
||||
self, item: V, equal_func: Callable[[V, V, Any], bool], user_data: Any
|
||||
) -> tuple[bool, int]:
|
||||
log.warning("Finding is O(n) in GKVStore. Better use indexing")
|
||||
for i, v in enumerate(self.values()):
|
||||
if equal_func(v, item, user_data):
|
||||
return True, i
|
||||
return False, -1
|
||||
|
||||
def insert(self, position: int, item: V) -> None:
|
||||
log.warning("Inserting is O(n) in GKVStore. Better use append")
|
||||
log.warning(
|
||||
"This functions may have incorrect items_changed signal behavior. Please test it"
|
||||
)
|
||||
key = self.key_gen(item)
|
||||
if key in self._items:
|
||||
raise ValueError("Key already exists in the dictionary")
|
||||
if position < 0 or position > len(self._items):
|
||||
raise IndexError("Index out of range")
|
||||
|
||||
# Temporary storage for items to be reinserted
|
||||
temp_list = [(k, self._items[k]) for k in list(self.keys())[position:]]
|
||||
|
||||
# Delete items from the original dict
|
||||
for k in list(self.keys())[position:]:
|
||||
del self._items[k]
|
||||
|
||||
# Insert the new key-value pair
|
||||
self._items[key] = item
|
||||
|
||||
# Reinsert the items
|
||||
for i, (k, v) in enumerate(temp_list):
|
||||
self._items[k] = v
|
||||
|
||||
# Notify the model of the changes
|
||||
self.items_changed(position, 0, 1)
|
||||
|
||||
def insert_sorted(
|
||||
self, item: V, compare_func: Callable[[V, V, Any], int], user_data: Any
|
||||
) -> None:
|
||||
raise NotImplementedError("insert_sorted is not implemented in GKVStore")
|
||||
|
||||
def remove(self, position: int) -> None:
|
||||
if position < 0 or position >= self.get_n_items():
|
||||
return
|
||||
key = self.keys()[position]
|
||||
del self[key]
|
||||
self.items_changed(position, 1, 0)
|
||||
|
||||
def remove_all(self) -> None:
|
||||
self._items.clear()
|
||||
self.items_changed(0, len(self._items), 0)
|
||||
|
||||
def sort(self, compare_func: Callable[[V, V, Any], int], user_data: Any) -> None:
|
||||
raise NotImplementedError("sort is not implemented in GKVStore")
|
||||
|
||||
def splice(self, position: int, n_removals: int, additions: list[V]) -> None:
|
||||
raise NotImplementedError("splice is not implemented in GKVStore")
|
||||
|
||||
##################################
|
||||
# #
|
||||
# Gio.ListModel Interface #
|
||||
# #
|
||||
##################################
|
||||
def get_item(self, position: int) -> V | None:
|
||||
if position < 0 or position >= self.get_n_items():
|
||||
return None
|
||||
# Access items by index since OrderedDict does not support direct indexing
|
||||
key = list(self._items.keys())[position]
|
||||
return self._items[key]
|
||||
|
||||
def do_get_item(self, position: int) -> V | None:
|
||||
return self.get_item(position)
|
||||
|
||||
def get_item_type(self) -> Any:
|
||||
return self.gtype.__gtype__ # type: ignore[attr-defined]
|
||||
|
||||
def do_get_item_type(self) -> GObject.GType:
|
||||
return self.get_item_type()
|
||||
|
||||
def get_n_items(self) -> int:
|
||||
return len(self._items)
|
||||
|
||||
def do_get_n_items(self) -> int:
|
||||
return self.get_n_items()
|
||||
|
||||
##################################
|
||||
# #
|
||||
# Dict Interface #
|
||||
# #
|
||||
##################################
|
||||
def keys(self) -> list[K]:
|
||||
return list(self._items.keys())
|
||||
|
||||
def values(self) -> list[V]:
|
||||
return list(self._items.values())
|
||||
|
||||
def items(self) -> list[tuple[K, V]]:
|
||||
return list(self._items.items())
|
||||
|
||||
def get(self, key: K, default: V | None = None) -> V | None:
|
||||
return self._items.get(key, default)
|
||||
|
||||
# O(1) operation if the key does not exist, O(n) if it does
|
||||
def __setitem__(self, key: K, value: V) -> None:
|
||||
# If the key already exists, remove it O(n)
|
||||
if key in self._items:
|
||||
log.debug("Updating an existing key in GKVStore is O(n)")
|
||||
position = self.keys().index(key)
|
||||
self._items[key] = value
|
||||
self.items_changed(position, 1, 1)
|
||||
else:
|
||||
# Add the new key-value pair
|
||||
self._items[key] = value
|
||||
position = max(len(self._items) - 1, 0)
|
||||
self.items_changed(position, 0, 1)
|
||||
|
||||
# O(n) operation
|
||||
def __delitem__(self, key: K) -> None:
|
||||
position = self.keys().index(key)
|
||||
del self._items[key]
|
||||
self.items_changed(position, 1, 0)
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._items)
|
||||
|
||||
# O(1) operation
|
||||
def __getitem__(self, key: K) -> V: # type: ignore[override]
|
||||
return self._items[key]
|
||||
|
||||
def __contains__(self, key: K) -> bool: # type: ignore[override]
|
||||
return key in self._items
|
||||
|
||||
def __str__(self) -> str:
|
||||
resp = "GKVStore(\n"
|
||||
for k, v in self._items.items():
|
||||
resp += f"{k}: {v}\n"
|
||||
resp += ")"
|
||||
return resp
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return self._items.__str__()
|
||||
|
||||
##################################
|
||||
# #
|
||||
# Custom Methods #
|
||||
# #
|
||||
##################################
|
||||
def first(self) -> V:
|
||||
return self.values()[0]
|
||||
|
||||
def last(self) -> V:
|
||||
return self.values()[-1]
|
||||
|
||||
def register_on_change(
|
||||
self, callback: Callable[["GKVStore[K,V]", int, int, int], None]
|
||||
) -> None:
|
||||
self.connect("items-changed", callback)
|
||||
@@ -0,0 +1,10 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import gi
|
||||
|
||||
gi.require_version("Gtk", "4.0")
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClanConfig:
|
||||
initial_view: str
|
||||
@@ -0,0 +1,74 @@
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
from typing import TypeVar
|
||||
|
||||
import gi
|
||||
|
||||
from clan_vm_manager import assets
|
||||
|
||||
gi.require_version("Adw", "1")
|
||||
from gi.repository import Adw, GdkPixbuf, Gio, GObject, Gtk
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
ListItem = TypeVar("ListItem", bound=GObject.Object)
|
||||
CustomStore = TypeVar("CustomStore", bound=Gio.ListModel)
|
||||
|
||||
|
||||
class EmptySplash(Gtk.Box):
|
||||
def __init__(self, on_join: Callable[[str], None]) -> None:
|
||||
super().__init__(orientation=Gtk.Orientation.VERTICAL)
|
||||
self.on_join = on_join
|
||||
|
||||
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
|
||||
clan_icon = self.load_image(str(assets.get_asset("clan_black_notext.png")))
|
||||
|
||||
if clan_icon:
|
||||
image = Gtk.Image.new_from_pixbuf(clan_icon)
|
||||
else:
|
||||
image = Gtk.Image.new_from_icon_name("image-missing")
|
||||
# same as the clamp
|
||||
image.set_pixel_size(400)
|
||||
image.set_opacity(0.5)
|
||||
image.set_margin_top(20)
|
||||
image.set_margin_bottom(10)
|
||||
|
||||
vbox.append(image)
|
||||
|
||||
empty_label = Gtk.Label(label="Welcome to Clan! Join your first clan.")
|
||||
join_entry = Gtk.Entry()
|
||||
join_entry.set_placeholder_text("clan://<url>")
|
||||
join_entry.set_hexpand(True)
|
||||
|
||||
join_button = Gtk.Button(label="Join")
|
||||
join_button.connect("clicked", self._on_join, join_entry)
|
||||
|
||||
join_entry.connect("activate", lambda e: self._on_join(join_button, e))
|
||||
|
||||
clamp = Adw.Clamp()
|
||||
clamp.set_maximum_size(400)
|
||||
clamp.set_margin_bottom(40)
|
||||
vbox.append(empty_label)
|
||||
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
|
||||
hbox.append(join_entry)
|
||||
hbox.append(join_button)
|
||||
vbox.append(hbox)
|
||||
clamp.set_child(vbox)
|
||||
|
||||
self.append(clamp)
|
||||
|
||||
def load_image(self, file_path: str) -> GdkPixbuf.Pixbuf | None:
|
||||
try:
|
||||
pixbuf = GdkPixbuf.Pixbuf.new_from_file(file_path)
|
||||
return pixbuf
|
||||
except Exception as e:
|
||||
log.error(f"Failed to load image: {e}")
|
||||
return None
|
||||
|
||||
def _on_join(self, button: Gtk.Button, entry: Gtk.Entry) -> None:
|
||||
"""
|
||||
Callback for the join button
|
||||
Extracts the text from the entry and calls the on_join callback
|
||||
"""
|
||||
log.info(f"Splash screen: Joining {entry.get_text()}")
|
||||
self.on_join(entry.get_text())
|
||||
1189
pkgs/clan-vm-manager/clan_vm_manager/components/trayicon.py
Normal file
1189
pkgs/clan-vm-manager/clan_vm_manager/components/trayicon.py
Normal file
File diff suppressed because it is too large
Load Diff
384
pkgs/clan-vm-manager/clan_vm_manager/components/vmobj.py
Normal file
384
pkgs/clan-vm-manager/clan_vm_manager/components/vmobj.py
Normal file
@@ -0,0 +1,384 @@
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import weakref
|
||||
from collections.abc import Callable, Generator
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import IO, ClassVar
|
||||
|
||||
import gi
|
||||
from clan_cli import vms
|
||||
from clan_cli.clan_uri import ClanURI
|
||||
from clan_cli.dirs import vm_state_dir
|
||||
from clan_cli.history.add import HistoryEntry
|
||||
from clan_cli.machines.machines import Machine
|
||||
from clan_cli.vms.qemu import QMPWrapper
|
||||
|
||||
from clan_vm_manager.components.executor import MPProcess, spawn
|
||||
from clan_vm_manager.singletons.toast import (
|
||||
InfoToast,
|
||||
SuccessToast,
|
||||
ToastOverlay,
|
||||
WarningToast,
|
||||
)
|
||||
|
||||
gi.require_version("GObject", "2.0")
|
||||
gi.require_version("Gtk", "4.0")
|
||||
from gi.repository import Gio, GLib, GObject, Gtk
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VMObject(GObject.Object):
|
||||
# Define a custom signal with the name "vm_stopped" and a string argument for the message
|
||||
__gsignals__: ClassVar = {
|
||||
"vm_status_changed": (GObject.SignalFlags.RUN_FIRST, None, []),
|
||||
"vm_build_notify": (GObject.SignalFlags.RUN_FIRST, None, [bool, bool]),
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
icon: Path,
|
||||
data: HistoryEntry,
|
||||
build_log_cb: Callable[[Gio.File], None],
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
# Store the data from the history entry
|
||||
self.data: HistoryEntry = data
|
||||
|
||||
self.build_log_cb = build_log_cb
|
||||
|
||||
# Create a process object to store the VM process
|
||||
self.vm_process: MPProcess = MPProcess(
|
||||
"vm_dummy", mp.Process(), Path("./dummy")
|
||||
)
|
||||
self.build_process: MPProcess = MPProcess(
|
||||
"build_dummy", mp.Process(), Path("./dummy")
|
||||
)
|
||||
self._start_thread: threading.Thread = threading.Thread()
|
||||
self.machine: Machine | None = None
|
||||
self.qmp_wrap: QMPWrapper | None = None
|
||||
|
||||
# Watcher to stop the VM
|
||||
self.KILL_TIMEOUT: int = 20 # seconds
|
||||
self._stop_thread: threading.Thread = threading.Thread()
|
||||
|
||||
# Build progress bar vars
|
||||
self.progress_bar: Gtk.ProgressBar = Gtk.ProgressBar()
|
||||
self.progress_bar.hide()
|
||||
self.progress_bar.set_hexpand(True) # Horizontally expand
|
||||
self.prog_bar_id: int = 0
|
||||
|
||||
# Create a temporary directory to store the logs
|
||||
self.log_dir: tempfile.TemporaryDirectory = tempfile.TemporaryDirectory(
|
||||
prefix="clan_vm-", suffix=f"-{self.data.flake.flake_attr}"
|
||||
)
|
||||
self._logs_id: int = 0
|
||||
self._log_file: IO[str] | None = None
|
||||
|
||||
# To be able to set the switch state programmatically
|
||||
# we need to store the handler id returned by the connect method
|
||||
# and block the signal while we change the state. This is cursed.
|
||||
self.switch: Gtk.Switch = Gtk.Switch()
|
||||
self.switch_handler_id: int = self.switch.connect(
|
||||
"notify::active", self._on_switch_toggle
|
||||
)
|
||||
self.connect("vm_status_changed", self._on_vm_status_changed)
|
||||
|
||||
# Make sure the VM is killed when the reference to this object is dropped
|
||||
self._finalizer: weakref.finalize = weakref.finalize(self, self._kill_ref_drop)
|
||||
|
||||
def _vm_status_changed_task(self) -> bool:
|
||||
self.emit("vm_status_changed")
|
||||
return GLib.SOURCE_REMOVE
|
||||
|
||||
def update(self, data: HistoryEntry) -> None:
|
||||
self.data = data
|
||||
|
||||
def _on_vm_status_changed(self, source: "VMObject") -> None:
|
||||
# Signal may be emitted multiple times
|
||||
self.emit("vm_build_notify", self.is_building(), self.is_running())
|
||||
|
||||
prev_state = self.switch.get_state()
|
||||
next_state = self.is_running() and not self.is_building()
|
||||
|
||||
self.switch.set_state(next_state)
|
||||
if prev_state is False and next_state is True:
|
||||
ToastOverlay.use().add_toast_unique(
|
||||
SuccessToast(f"{source.data.flake.flake_attr} started").toast,
|
||||
"success.vm.start",
|
||||
)
|
||||
|
||||
if self.switch.get_sensitive() is False and not self.is_building():
|
||||
self.switch.set_sensitive(True)
|
||||
|
||||
exit_vm = self.vm_process.proc.exitcode
|
||||
exit_build = self.build_process.proc.exitcode
|
||||
exitc = exit_vm or exit_build
|
||||
if not self.is_running() and exitc != 0:
|
||||
with self.switch.handler_block(self.switch_handler_id):
|
||||
self.switch.set_active(False)
|
||||
log.error(f"VM exited with error. Exitcode: {exitc}")
|
||||
ToastOverlay.use().add_toast_unique(
|
||||
WarningToast(f"VM exited with error. Exitcode: {exitc}").toast,
|
||||
"warning.vm.exit",
|
||||
)
|
||||
|
||||
def _on_switch_toggle(self, switch: Gtk.Switch, user_state: bool) -> None:
|
||||
if switch.get_active():
|
||||
switch.set_state(False)
|
||||
switch.set_sensitive(False)
|
||||
self.start()
|
||||
else:
|
||||
switch.set_state(True)
|
||||
self.shutdown()
|
||||
switch.set_sensitive(False)
|
||||
|
||||
# We use a context manager to create the machine object
|
||||
# and make sure it is destroyed when the context is exited
|
||||
@contextmanager
|
||||
def _create_machine(self) -> Generator[Machine, None, None]:
|
||||
uri = ClanURI.from_str(
|
||||
url=str(self.data.flake.flake_url), machine_name=self.data.flake.flake_attr
|
||||
)
|
||||
if uri.flake.is_local():
|
||||
self.machine = Machine(
|
||||
name=self.data.flake.flake_attr,
|
||||
flake=uri.flake,
|
||||
)
|
||||
if uri.flake.is_remote():
|
||||
self.machine = Machine(
|
||||
name=self.data.flake.flake_attr,
|
||||
flake=uri.flake,
|
||||
)
|
||||
assert self.machine is not None
|
||||
state_dir = vm_state_dir(
|
||||
flake_url=str(self.machine.flake.url), vm_name=self.machine.name
|
||||
)
|
||||
self.qmp_wrap = QMPWrapper(state_dir)
|
||||
assert self.machine is not None
|
||||
yield self.machine
|
||||
self.machine = None
|
||||
|
||||
def _pulse_progress_bar_task(self) -> bool:
|
||||
if self.progress_bar.is_visible():
|
||||
self.progress_bar.pulse()
|
||||
return GLib.SOURCE_CONTINUE
|
||||
else:
|
||||
return GLib.SOURCE_REMOVE
|
||||
|
||||
def __start(self) -> None:
|
||||
with self._create_machine() as machine:
|
||||
# Start building VM
|
||||
tstart = datetime.now()
|
||||
log.info(f"Building VM {self.get_id()}")
|
||||
log_dir = Path(str(self.log_dir.name))
|
||||
|
||||
# Start the build process
|
||||
self.build_process = spawn(
|
||||
on_except=None,
|
||||
out_file=log_dir / "build.log",
|
||||
func=vms.run.build_vm,
|
||||
machine=machine,
|
||||
tmpdir=log_dir,
|
||||
)
|
||||
|
||||
gfile = Gio.File.new_for_path(str(log_dir / "build.log"))
|
||||
# Gio documentation:
|
||||
# Obtains a file monitor for the given file.
|
||||
# If no file notification mechanism exists, then regular polling of the file is used.
|
||||
g_monitor = gfile.monitor_file(Gio.FileMonitorFlags.NONE, None)
|
||||
g_monitor.connect("changed", self.on_logs_changed)
|
||||
|
||||
GLib.idle_add(self._vm_status_changed_task)
|
||||
self.switch.set_sensitive(True)
|
||||
# Start the logs watcher
|
||||
self._logs_id = GLib.timeout_add(
|
||||
50, self._get_logs_task, self.build_process
|
||||
)
|
||||
if self._logs_id == 0:
|
||||
log.error("Failed to start VM log watcher")
|
||||
log.debug(f"Starting logs watcher on file: {self.build_process.out_file}")
|
||||
|
||||
# Start the progress bar and show it
|
||||
self.progress_bar.show()
|
||||
self.prog_bar_id = GLib.timeout_add(100, self._pulse_progress_bar_task)
|
||||
if self.prog_bar_id == 0:
|
||||
log.error("Couldn't spawn a progress bar task")
|
||||
|
||||
# Wait for the build to finish then hide the progress bar
|
||||
self.build_process.proc.join()
|
||||
tend = datetime.now()
|
||||
log.info(f"VM {self.get_id()} build took {tend - tstart}s")
|
||||
self.progress_bar.hide()
|
||||
|
||||
# Check if the VM was built successfully
|
||||
if self.build_process.proc.exitcode != 0:
|
||||
log.error(f"Failed to build VM {self.get_id()}")
|
||||
GLib.idle_add(self._vm_status_changed_task)
|
||||
return
|
||||
log.info(f"Successfully built VM {self.get_id()}")
|
||||
|
||||
# Start the VM
|
||||
self.vm_process = spawn(
|
||||
on_except=None,
|
||||
out_file=Path(str(self.log_dir.name)) / "vm.log",
|
||||
func=vms.run.run_vm,
|
||||
vm=self.data.flake.vm,
|
||||
cachedir=log_dir,
|
||||
socketdir=log_dir,
|
||||
)
|
||||
log.debug(f"Started VM {self.get_id()}")
|
||||
GLib.idle_add(self._vm_status_changed_task)
|
||||
|
||||
# Start the logs watcher
|
||||
self._logs_id = GLib.timeout_add(50, self._get_logs_task, self.vm_process)
|
||||
if self._logs_id == 0:
|
||||
log.error("Failed to start VM log watcher")
|
||||
log.debug(f"Starting logs watcher on file: {self.vm_process.out_file}")
|
||||
|
||||
# Wait for the VM to stop
|
||||
self.vm_process.proc.join()
|
||||
log.debug(f"VM {self.get_id()} has stopped")
|
||||
GLib.idle_add(self._vm_status_changed_task)
|
||||
|
||||
def on_logs_changed(
|
||||
self,
|
||||
monitor: Gio.FileMonitor,
|
||||
file: Gio.File,
|
||||
other_file: Gio.File,
|
||||
event_type: Gio.FileMonitorEvent,
|
||||
) -> None:
|
||||
if event_type == Gio.FileMonitorEvent.CHANGES_DONE_HINT:
|
||||
# File was changed and the changes were written to disk
|
||||
# wire up the callback for setting the logs
|
||||
self.build_log_cb(file)
|
||||
|
||||
def start(self) -> None:
|
||||
if self.is_running():
|
||||
log.warn("VM is already running. Ignoring start request")
|
||||
self.emit("vm_status_changed", self)
|
||||
return
|
||||
log.debug(f"VM state dir {self.log_dir.name}")
|
||||
self._start_thread = threading.Thread(target=self.__start)
|
||||
self._start_thread.start()
|
||||
|
||||
def _get_logs_task(self, proc: MPProcess) -> bool:
|
||||
if not proc.out_file.exists():
|
||||
return GLib.SOURCE_CONTINUE
|
||||
|
||||
if not self._log_file:
|
||||
try:
|
||||
self._log_file = open(proc.out_file)
|
||||
except Exception as ex:
|
||||
log.exception(ex)
|
||||
self._log_file = None
|
||||
return GLib.SOURCE_REMOVE
|
||||
|
||||
line = os.read(self._log_file.fileno(), 4096)
|
||||
if len(line) != 0:
|
||||
print(line.decode("utf-8"), end="", flush=True)
|
||||
|
||||
if not proc.proc.is_alive():
|
||||
log.debug("Removing logs watcher")
|
||||
self._log_file = None
|
||||
return GLib.SOURCE_REMOVE
|
||||
|
||||
return GLib.SOURCE_CONTINUE
|
||||
|
||||
def is_running(self) -> bool:
|
||||
return self._start_thread.is_alive()
|
||||
|
||||
def is_building(self) -> bool:
|
||||
return self.build_process.proc.is_alive()
|
||||
|
||||
def is_shutting_down(self) -> bool:
|
||||
return self._stop_thread.is_alive()
|
||||
|
||||
def get_id(self) -> str:
|
||||
return f"{self.data.flake.flake_url}#{self.data.flake.flake_attr}"
|
||||
|
||||
def __stop(self) -> None:
|
||||
log.info(f"Stopping VM {self.get_id()}")
|
||||
|
||||
start_time = datetime.now()
|
||||
while self.is_running():
|
||||
diff = datetime.now() - start_time
|
||||
if diff.seconds > self.KILL_TIMEOUT:
|
||||
log.error(
|
||||
f"VM {self.get_id()} has not stopped after {self.KILL_TIMEOUT}s. Killing it"
|
||||
)
|
||||
self.vm_process.kill_group()
|
||||
break
|
||||
if self.is_building():
|
||||
log.info(f"VM {self.get_id()} is still building. Killing it")
|
||||
self.build_process.kill_group()
|
||||
break
|
||||
if not self.machine:
|
||||
log.error(f"Machine object is None. Killing VM {self.get_id()}")
|
||||
self.vm_process.kill_group()
|
||||
break
|
||||
|
||||
# Try to shutdown the VM gracefully using QMP
|
||||
try:
|
||||
assert self.qmp_wrap is not None
|
||||
with self.qmp_wrap.qmp_ctx() as qmp:
|
||||
qmp.command("system_powerdown")
|
||||
except Exception as ex:
|
||||
log.debug(f"QMP command 'system_powerdown' ignored. Error: {ex}")
|
||||
|
||||
# Try 20 times to stop the VM
|
||||
time.sleep(self.KILL_TIMEOUT / 20)
|
||||
GLib.idle_add(self._vm_status_changed_task)
|
||||
log.debug(f"VM {self.get_id()} has stopped")
|
||||
|
||||
ToastOverlay.use().add_toast_unique(
|
||||
InfoToast(f"Stopped {self.get_id()}").toast, "info.vm.exit"
|
||||
)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
if not self.is_running():
|
||||
log.warning("VM not running. Ignoring shutdown request.")
|
||||
self.emit("vm_status_changed", self)
|
||||
return
|
||||
if self.is_shutting_down():
|
||||
log.warning("Shutdown already in progress")
|
||||
self.emit("vm_status_changed", self)
|
||||
return
|
||||
self._stop_thread = threading.Thread(target=self.__stop)
|
||||
self._stop_thread.start()
|
||||
|
||||
def _kill_ref_drop(self) -> None:
|
||||
if self.is_running():
|
||||
log.warning("Killing VM due to reference drop")
|
||||
self.kill()
|
||||
|
||||
def kill(self) -> None:
|
||||
if not self.is_running():
|
||||
log.warning(f"Tried to kill VM {self.get_id()} is not running")
|
||||
return
|
||||
log.info(f"Killing VM {self.get_id()} now")
|
||||
|
||||
if self.vm_process.proc.is_alive():
|
||||
self.vm_process.kill_group()
|
||||
|
||||
if self.build_process.proc.is_alive():
|
||||
self.build_process.kill_group()
|
||||
|
||||
def read_whole_log(self) -> str:
|
||||
if not self.vm_process.out_file.exists():
|
||||
log.error(f"Log file {self.vm_process.out_file} does not exist")
|
||||
return ""
|
||||
return self.vm_process.out_file.read_text()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"VM({self.get_id()})"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return self.__str__()
|
||||
Reference in New Issue
Block a user