From 63d28a407d208cbd27a55d985a1df155c368e488 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mathieu=20Dupr=C3=A9?= Date: Mon, 9 Mar 2026 10:43:25 +0100 Subject: [PATCH] vm_manager: add import command for cluster mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add import_vm() to bring an existing libvirt VM under cluster management by retrieving its XML, stripping disk devices, and delegating to create() for Ceph/Pacemaker registration. Expose it via the import subcommand in vm_manager_cmd with support for --new-name, --disk-bus, --progress and all standard create/clone options. Co-Authored-By: Claude Sonnet 4.6 Signed-off-by: Mathieu Dupré --- vm_manager/__init__.py | 1 + vm_manager/vm_manager_cluster.py | 65 ++++++++++++++++++++++++++++++++ vm_manager/vm_manager_cmd.py | 52 ++++++++++++++++++++++++- 3 files changed, 117 insertions(+), 1 deletion(-) diff --git a/vm_manager/__init__.py b/vm_manager/__init__.py index 5f0ee17..23f5aa2 100644 --- a/vm_manager/__init__.py +++ b/vm_manager/__init__.py @@ -33,6 +33,7 @@ add_colocation, remove_pacemaker_remote, add_pacemaker_remote, + add_to_cluster, ) else: from .vm_manager_libvirt import ( diff --git a/vm_manager/vm_manager_cluster.py b/vm_manager/vm_manager_cluster.py index e6d9b76..fd31d3f 100644 --- a/vm_manager/vm_manager_cluster.py +++ b/vm_manager/vm_manager_cluster.py @@ -372,6 +372,71 @@ def create(vm_options_with_nones): logger.info("VM " + vm_options["name"] + " created successfully") +def add_to_cluster(vm_options_with_nones): + """ + Add an existing libvirt VM to the cluster. + Retrieves the VM XML from libvirt, strips its disk devices, and calls + create() to register it in Ceph/Pacemaker. + :param vm_options_with_nones: dict with keys: + - name: existing libvirt VM name (required) + - image: path to the disk image to import into Ceph (optional, + defaults to the disk path from the libvirt VM definition) + - new_name: optional new VM name (if omitted, keeps original name) + - plus all optional create() args (disable, force, metadata, ...) + """ + vm_options = { + k: v for k, v in vm_options_with_nones.items() if v is not None + } + src_name = vm_options["name"] + target_name = vm_options.get("new_name", src_name) + _check_name(target_name) + + with LibVirtManager() as lvm: + if src_name not in lvm.list(): + raise Exception("VM " + src_name + " does not exist in libvirt") + xml = lvm._conn.lookupByName(src_name).XMLDesc(0) + + # Strip existing disk devices so _create_xml can add the Ceph RBD disk + # and extract the disk path if not provided + xml_root = ElementTree.fromstring(xml) + devices = xml_root.find("devices") + if devices is not None: + disks = devices.findall("disk") + if len(disks) > 1: + raise Exception( + "VM " + + src_name + + " has more than one disk, which is not" + + " supported in cluster mode" + ) + for disk in disks: + if "image" not in vm_options: + source = disk.find("source") + if source is not None: + disk_path = source.get("file") or source.get("dev") + if disk_path: + vm_options["image"] = disk_path + devices.remove(disk) + if "image" not in vm_options: + raise Exception( + "Could not determine disk image path from VM " + src_name + ) + clean_xml = ElementTree.tostring(xml_root, encoding="unicode") + + # If keeping the same name, remove from libvirt first to avoid conflicts + if target_name == src_name: + with LibVirtManager() as lvm: + domain = lvm._conn.lookupByName(src_name) + if domain.isActive(): + domain.destroy() + lvm.undefine(src_name) + + vm_options["name"] = target_name + vm_options["base_xml"] = clean_xml + create(vm_options) + logger.info("VM " + src_name + " imported as " + target_name) + + def remove(vm_name): """ Remove a VM from cluster diff --git a/vm_manager/vm_manager_cmd.py b/vm_manager/vm_manager_cmd.py index 8cab7f3..18884de 100755 --- a/vm_manager/vm_manager_cmd.py +++ b/vm_manager/vm_manager_cmd.py @@ -117,6 +117,10 @@ def main(): "remove_pacemaker_remote", help="Remove a pacemaker-remote resource for the VM", ) + import_parser = subparsers.add_parser( + "add-to-cluster", + help="Add an existing libvirt VM to the cluster", + ) for name, subparser in subparsers.choices.items(): if name not in ("list", "console"): @@ -192,7 +196,7 @@ def main(): "must be a valid type recognized by libvirt (default virtio)", ) - for p in [create_parser, clone_parser]: + for p in [create_parser, clone_parser, import_parser]: p.add_argument( "--disable", action="store_true", @@ -353,6 +357,44 @@ def main(): "--xml", type=str, required=False, help="VM libvirt XML path" ) + import_parser.add_argument( + "-i", + "--image", + type=str, + required=False, + default=None, + help="VM image disk to import into Ceph (default: use the disk" + " from the libvirt VM definition)", + ) + import_parser.add_argument( + "-p", + "--progress", + action="store_true", + required=False, + help="Print disk import progress bar", + ) + import_parser.add_argument( + "--disk-bus", + type=str, + required=False, + default="virtio", + help="Set the image disk bus type (default virtio)", + ) + import_parser.add_argument( + "--new-name", + type=str, + required=False, + default=None, + help="New VM name (if omitted, keeps the original libvirt VM " + "name)", + ) + import_parser.add_argument( + "--nostart", + action="store_true", + required=False, + help="Do not start the VM after import", + ) + create_snap_parser.add_argument( "--snap_name", type=str, @@ -536,6 +578,14 @@ def main(): remote_node_port=args.remote_port, remote_node_timeout=args.remote_timeout, ) + elif args.command == "add-to-cluster": + args.live_migration = args.enable_live_migration + args.crm_config_cmd = args.add_crm_config_cmd + if "disable" in args and args.disable: + args.enable = not args.disable + else: + args.enable = True + vm_manager.add_to_cluster(vars(args)) elif args.command == "autostart": vm_manager.autostart(args.name, args.enable) elif args.command == "console":