Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions vm_manager/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
add_colocation,
remove_pacemaker_remote,
add_pacemaker_remote,
add_to_cluster,
)
else:
from .vm_manager_libvirt import (
Expand Down
65 changes: 65 additions & 0 deletions vm_manager/vm_manager_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,71 @@
logger.info("VM " + vm_options["name"] + " created successfully")


def add_to_cluster(vm_options_with_nones):

Check failure on line 375 in vm_manager/vm_manager_cluster.py

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Refactor this function to reduce its Cognitive Complexity from 23 to the 15 allowed.

See more on https://sonarcloud.io/project/issues?id=seapath_vm_manager&issues=AZzYbPf2ybH6xRrHYAw2&open=AZzYbPf2ybH6xRrHYAw2&pullRequest=85
"""
Add an existing libvirt VM to the cluster.
Retrieves the VM XML from libvirt, strips its disk devices, and calls
create() to register it in Ceph/Pacemaker.
:param vm_options_with_nones: dict with keys:
- name: existing libvirt VM name (required)
- image: path to the disk image to import into Ceph (optional,
defaults to the disk path from the libvirt VM definition)
- new_name: optional new VM name (if omitted, keeps original name)
- plus all optional create() args (disable, force, metadata, ...)
"""
vm_options = {
k: v for k, v in vm_options_with_nones.items() if v is not None
}
src_name = vm_options["name"]
target_name = vm_options.get("new_name", src_name)
_check_name(target_name)

with LibVirtManager() as lvm:
if src_name not in lvm.list():
raise Exception("VM " + src_name + " does not exist in libvirt")

Check warning on line 396 in vm_manager/vm_manager_cluster.py

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Replace this generic exception class with a more specific one.

See more on https://sonarcloud.io/project/issues?id=seapath_vm_manager&issues=AZzSpcVGtu-cQYVe4_sI&open=AZzSpcVGtu-cQYVe4_sI&pullRequest=85
xml = lvm._conn.lookupByName(src_name).XMLDesc(0)

# Strip existing disk devices so _create_xml can add the Ceph RBD disk
# and extract the disk path if not provided
xml_root = ElementTree.fromstring(xml)
devices = xml_root.find("devices")
if devices is not None:
disks = devices.findall("disk")
if len(disks) > 1:
raise Exception(
"VM "
+ src_name
+ " has more than one disk, which is not"
+ " supported in cluster mode"
)

Check warning on line 411 in vm_manager/vm_manager_cluster.py

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Replace this generic exception class with a more specific one.

See more on https://sonarcloud.io/project/issues?id=seapath_vm_manager&issues=AZzYbPf2ybH6xRrHYAw3&open=AZzYbPf2ybH6xRrHYAw3&pullRequest=85
for disk in disks:
if "image" not in vm_options:
source = disk.find("source")
if source is not None:
disk_path = source.get("file") or source.get("dev")
if disk_path:
vm_options["image"] = disk_path
devices.remove(disk)
if "image" not in vm_options:
raise Exception(
"Could not determine disk image path from VM " + src_name
)

Check warning on line 423 in vm_manager/vm_manager_cluster.py

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Replace this generic exception class with a more specific one.

See more on https://sonarcloud.io/project/issues?id=seapath_vm_manager&issues=AZzSpcVGtu-cQYVe4_sJ&open=AZzSpcVGtu-cQYVe4_sJ&pullRequest=85
clean_xml = ElementTree.tostring(xml_root, encoding="unicode")

# If keeping the same name, remove from libvirt first to avoid conflicts
if target_name == src_name:
with LibVirtManager() as lvm:
domain = lvm._conn.lookupByName(src_name)
if domain.isActive():
domain.destroy()
lvm.undefine(src_name)

vm_options["name"] = target_name
vm_options["base_xml"] = clean_xml
create(vm_options)
logger.info("VM " + src_name + " imported as " + target_name)


def remove(vm_name):
"""
Remove a VM from cluster
Expand Down
52 changes: 51 additions & 1 deletion vm_manager/vm_manager_cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ def main():
"remove_pacemaker_remote",
help="Remove a pacemaker-remote resource for the VM",
)
import_parser = subparsers.add_parser(
"add-to-cluster",
help="Add an existing libvirt VM to the cluster",
)

for name, subparser in subparsers.choices.items():
if name not in ("list", "console"):
Expand Down Expand Up @@ -192,7 +196,7 @@ def main():
"must be a valid type recognized by libvirt (default virtio)",
)

for p in [create_parser, clone_parser]:
for p in [create_parser, clone_parser, import_parser]:
p.add_argument(
"--disable",
action="store_true",
Expand Down Expand Up @@ -353,6 +357,44 @@ def main():
"--xml", type=str, required=False, help="VM libvirt XML path"
)

import_parser.add_argument(
"-i",
"--image",
type=str,
required=False,
default=None,
help="VM image disk to import into Ceph (default: use the disk"
" from the libvirt VM definition)",
)
import_parser.add_argument(
"-p",
"--progress",
action="store_true",
required=False,
help="Print disk import progress bar",
)
import_parser.add_argument(
"--disk-bus",
type=str,
required=False,
default="virtio",
help="Set the image disk bus type (default virtio)",
)
import_parser.add_argument(
"--new-name",
type=str,
required=False,
default=None,
help="New VM name (if omitted, keeps the original libvirt VM "
"name)",
)
import_parser.add_argument(
"--nostart",
action="store_true",
required=False,
help="Do not start the VM after import",
)

create_snap_parser.add_argument(
"--snap_name",
type=str,
Expand Down Expand Up @@ -536,6 +578,14 @@ def main():
remote_node_port=args.remote_port,
remote_node_timeout=args.remote_timeout,
)
elif args.command == "add-to-cluster":
args.live_migration = args.enable_live_migration
args.crm_config_cmd = args.add_crm_config_cmd
if "disable" in args and args.disable:
args.enable = not args.disable
else:
args.enable = True
vm_manager.add_to_cluster(vars(args))
elif args.command == "autostart":
vm_manager.autostart(args.name, args.enable)
elif args.command == "console":
Expand Down