diff --git a/.claude/skills/test-playbooks.md b/.claude/skills/test-playbooks.md new file mode 100644 index 000000000..69aae059d --- /dev/null +++ b/.claude/skills/test-playbooks.md @@ -0,0 +1,40 @@ +--- +name: test-playbooks +description: Test Ansible playbooks across Ubuntu versions on target machines +--- + +## Prerequisites +- Virtualenv activated (`source .venv/bin/activate` or `source /opt/deepops/env/bin/activate`) +- Target machines provisioned and accessible via SSH +- Inventory configured: either `config/inventory` (static) or `config/maas-inventory.yml` (MAAS dynamic) + +## Steps +1. Verify connectivity: `ansible -m ping all` +2. Run playbook: `ansible-playbook playbooks/.yml` +3. Verify results (check playbook output, run smoke tests on targets) +4. To test another OS version: reprovision targets with the new OS, re-run playbook, verify again + +## Test Matrix +| Playbook | Inventory groups needed | Test on 24.04 | Test on 22.04 | +|----------|------------------------|---------------|---------------| +| k8s-cluster.yml | kube_control_plane, kube_node, etcd | yes | yes | +| slurm-cluster.yml | slurm-master, slurm-node | yes | yes | +| ngc-ready-server.yml | (any host group) | yes | yes | + +## MAAS Users +If using MAAS dynamic inventory (`scripts/maas_inventory.py`), the deploy script automates provisioning: +```bash +./scripts/maas_deploy.sh --status # check VM state +./scripts/maas_deploy.sh --os noble --profile k8s # deploy + tag for K8s +./scripts/maas_deploy.sh --os jammy --profile slurm # deploy + tag for Slurm +./scripts/maas_deploy.sh --profile k8s --tags-only # re-tag without redeploying +./scripts/maas_deploy.sh --release # release VMs +``` +Profiles assign MAAS tags that the dynamic inventory maps to Ansible groups: +- **k8s**: first machine = `kube_control_plane` + `etcd`, remaining = `kube_node` +- **slurm**: first machine = `slurm-master`, remaining = `slurm-node` + +## Group Naming +- K8s groups use underscores: `kube_control_plane`, `kube_node`, `k8s_cluster` +- Slurm groups use hyphens: `slurm-master`, `slurm-node`, `slurm-cluster` +- Old hyphenated K8s names (`kube-master`, `kube-node`) are accepted via TAG_ALIASES diff --git a/ansible.cfg b/ansible.cfg index 349edff5d..e11773f66 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -2,7 +2,7 @@ collections_paths = ./collections roles_path = ./roles/galaxy:./roles:./submodules/kubespray/roles library = ./submodules/kubespray/library -inventory = ./config/inventory +inventory = ./config/inventory,./scripts/maas_inventory.py host_key_checking = False gathering = smart fact_caching = jsonfile diff --git a/config.example/maas-inventory.yml b/config.example/maas-inventory.yml index 6f7d62498..1482875e3 100644 --- a/config.example/maas-inventory.yml +++ b/config.example/maas-inventory.yml @@ -26,3 +26,7 @@ ssh_user: "ubuntu" # Only needed if target machines are on a private network that requires # jumping through a bastion host. Leave commented out for direct access. #ssh_bastion: "user@bastion-host" + +# Machines to manage (hostnames as they appear in MAAS) +# Used by maas_deploy.sh for deploy/release operations +#machines: "maas-worker maas-worker-2 maas-worker-3" diff --git a/scripts/maas_deploy.sh b/scripts/maas_deploy.sh new file mode 100755 index 000000000..3e7e4a34a --- /dev/null +++ b/scripts/maas_deploy.sh @@ -0,0 +1,528 @@ +#!/usr/bin/env bash +# maas_deploy.sh — Deploy, tag, and manage MAAS VMs for testing +# +# Usage: +# ./scripts/maas_deploy.sh [OPTIONS] [distro_series] +# +# Options: +# --os Ubuntu series to deploy (default: noble) +# --profile Apply MAAS tags for inventory grouping after deploy +# --tags-only Just apply/update tags without redeploying +# --release Release all VMs back to MAAS +# --status Show current VM status and tags +# -h, --help Show this help +# +# Configuration: +# Reads config/maas-inventory.yml (same config as maas_inventory.py). +# Environment variables override config file values: +# MAAS_API_URL, MAAS_API_KEY, MAAS_MACHINES, MAAS_SSH_USER, MAAS_SSH_PROXY +# +# Examples: +# ./scripts/maas_deploy.sh --os noble --profile k8s +# ./scripts/maas_deploy.sh --os jammy --profile slurm +# ./scripts/maas_deploy.sh --profile k8s --tags-only +# ./scripts/maas_deploy.sh --status +# ./scripts/maas_deploy.sh --release +# ./scripts/maas_deploy.sh noble # backward compat + +set -euo pipefail + +# Find repo root (parent of scripts/) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# MAAS status codes +STATUS_READY=4 +STATUS_DEPLOYED=6 +STATUS_DEPLOYING=9 +STATUS_RELEASING=12 + +# Known test tags — cleared before applying a profile +KNOWN_TEST_TAGS=( + kube_control_plane kube_node etcd + slurm-master slurm-node slurm-nfs slurm-cache slurm-metric slurm-login +) + +# --- Configuration ------------------------------------------------------------ + +load_config() { + local config_file="${REPO_ROOT}/config/maas-inventory.yml" + + # Parse config file if it exists (simple key: value parsing) + if [[ -f "$config_file" ]]; then + local key value line + while IFS= read -r line; do + # Skip comments and empty lines + [[ "$line" =~ ^[[:space:]]*# ]] && continue + [[ "$line" =~ ^[[:space:]]*$ ]] && continue + [[ "$line" != *:* ]] && continue + + key="${line%%:*}" + key="${key// /}" + value="${line#*:}" + value="${value#"${value%%[![:space:]]*}"}" # ltrim + value="${value%"${value##*[![:space:]]}"}" # rtrim + value="${value#\"}" ; value="${value%\"}" # strip double quotes + value="${value#\'}" ; value="${value%\'}" # strip single quotes + + case "$key" in + api_url) [[ -z "${MAAS_API_URL:-}" ]] && MAAS_API_URL="$value" ;; + api_key) [[ -z "${MAAS_API_KEY:-}" ]] && MAAS_API_KEY="$value" ;; + ssh_user) [[ -z "${MAAS_SSH_USER:-}" ]] && MAAS_SSH_USER="$value" ;; + ssh_bastion) [[ -z "${MAAS_SSH_PROXY:-}" ]] && MAAS_SSH_PROXY="ssh -W %h:%p -q ${value}" ;; + network) [[ -z "${MAAS_NETWORK:-}" ]] && MAAS_NETWORK="$value" ;; + machines) [[ -z "${MAAS_MACHINES:-}" ]] && MAAS_MACHINES="$value" ;; + esac + done < "$config_file" + fi + + # Defaults for anything still unset + MAAS_API_URL="${MAAS_API_URL:-}" + MAAS_API_KEY="${MAAS_API_KEY:-}" + MAAS_MACHINES="${MAAS_MACHINES:-maas-worker maas-worker-2 maas-worker-3}" + MAAS_SSH_USER="${MAAS_SSH_USER:-ubuntu}" + MAAS_SSH_PROXY="${MAAS_SSH_PROXY:-}" + MAAS_NETWORK="${MAAS_NETWORK:-}" + + # Validate required fields + if [[ -z "$MAAS_API_URL" ]]; then + echo "ERROR: MAAS_API_URL not configured" + echo "Set it in config/maas-inventory.yml or as an environment variable" + exit 1 + fi + if [[ -z "$MAAS_API_KEY" ]]; then + echo "ERROR: MAAS_API_KEY not configured" + echo "Set it in config/maas-inventory.yml or as an environment variable" + exit 1 + fi +} + +# --- Argument Parsing --------------------------------------------------------- + +ACTION="deploy" +DISTRO_SERIES="" +PROFILE="" + +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + --os) + DISTRO_SERIES="$2"; shift 2 ;; + --profile) + PROFILE="$2"; shift 2 ;; + --tags-only) + ACTION="tags-only"; shift ;; + --release) + ACTION="release"; shift ;; + --status) + ACTION="status"; shift ;; + -h|--help) + # Print header comment as help + sed -n '2,/^[^#]/{ /^#/s/^# \{0,1\}//p; }' "${BASH_SOURCE[0]}" + exit 0 ;; + -*) + echo "Unknown option: $1"; exit 1 ;; + *) + # Backward compat: positional arg is distro series + DISTRO_SERIES="$1"; shift ;; + esac + done + + # Default OS for deploy action + if [[ "$ACTION" == "deploy" && -z "$DISTRO_SERIES" ]]; then + DISTRO_SERIES="noble" + fi + + # Validate: --tags-only requires --profile + if [[ "$ACTION" == "tags-only" && -z "$PROFILE" ]]; then + echo "ERROR: --tags-only requires --profile " + exit 1 + fi + + # Validate profile name if given + if [[ -n "$PROFILE" && "$PROFILE" != "k8s" && "$PROFILE" != "slurm" ]]; then + echo "ERROR: Unknown profile '${PROFILE}' (valid: k8s, slurm)" + exit 1 + fi +} + +# --- MAAS API Helpers --------------------------------------------------------- + +maas_auth_header() { + local consumer_key token_key token_secret + IFS=':' read -r consumer_key token_key token_secret <<< "$MAAS_API_KEY" + local nonce timestamp + nonce=$(python3 -c "import uuid; print(uuid.uuid4().hex)") + timestamp=$(date +%s) + echo "OAuth oauth_version=\"1.0\", oauth_signature_method=\"PLAINTEXT\", oauth_consumer_key=\"${consumer_key}\", oauth_token=\"${token_key}\", oauth_signature=\"&${token_secret}\", oauth_nonce=\"${nonce}\", oauth_timestamp=\"${timestamp}\"" +} + +maas_get() { + local endpoint="$1" + curl -s -H "Authorization: $(maas_auth_header)" "${MAAS_API_URL}${endpoint}" +} + +maas_post() { + local endpoint="$1" + shift + curl -s -H "Authorization: $(maas_auth_header)" -X POST "${MAAS_API_URL}${endpoint}" "$@" +} + +get_system_id() { + local hostname="$1" + maas_get "/machines/?hostname=${hostname}" | python3 -c " +import json, sys +machines = json.load(sys.stdin) +if machines: + print(machines[0]['system_id']) +else: + print('', end='') +" +} + +get_status() { + local system_id="$1" + maas_get "/machines/${system_id}/" | python3 -c " +import json, sys +m = json.load(sys.stdin) +print(m['status']) +" +} + +get_ip() { + local system_id="$1" + local network_filter="${MAAS_NETWORK:-}" + maas_get "/machines/${system_id}/" | python3 -c " +import json, sys +m = json.load(sys.stdin) +network = '${network_filter}' +for iface in m.get('interface_set', []): + for link in iface.get('links', []): + ip = link.get('ip_address', '') + if network and ip and ip.startswith(network): + print(ip) + sys.exit(0) +# Fallback: first IP +for iface in m.get('interface_set', []): + for link in iface.get('links', []): + ip = link.get('ip_address', '') + if ip: + print(ip) + sys.exit(0) +" +} + +get_machine_info() { + # Returns: status_name|os_display|tags + local system_id="$1" + maas_get "/machines/${system_id}/" | python3 -c " +import json, sys +m = json.load(sys.stdin) +tags = ', '.join(m.get('tag_names', [])) +series = m.get('distro_series', '') +series_map = {'noble': '24.04', 'jammy': '22.04', 'focal': '20.04', 'bionic': '18.04'} +if series and m.get('osystem') == 'ubuntu': + os_info = 'Ubuntu ' + series_map.get(series, series) +elif series: + os_info = m.get('osystem', '') + '/' + series +else: + os_info = '-' +print(f'{m.get(\"status_name\", \"Unknown\")}|{os_info}|{tags}') +" +} + +wait_for_status() { + local system_id="$1" + local target_status="$2" + local hostname="$3" + local max_wait=600 + local elapsed=0 + local interval=10 + + while [ $elapsed -lt $max_wait ]; do + local status + status=$(get_status "$system_id") + if [ "$status" = "$target_status" ]; then + return 0 + fi + printf "." + sleep $interval + elapsed=$((elapsed + interval)) + done + echo "" + echo "ERROR: ${hostname} did not reach status ${target_status} within ${max_wait}s (current: ${status})" + return 1 +} + +wait_for_ssh() { + local ip="$1" + local hostname="$2" + local max_wait=120 + local elapsed=0 + local ssh_opts=(-o StrictHostKeyChecking=no -o ConnectTimeout=5) + + if [[ -n "$MAAS_SSH_PROXY" ]]; then + ssh_opts+=(-o "ProxyCommand=${MAAS_SSH_PROXY}") + fi + + while [ $elapsed -lt $max_wait ]; do + if ssh "${ssh_opts[@]}" "${MAAS_SSH_USER}@${ip}" "true" 2>/dev/null; then + return 0 + fi + printf "." + sleep 5 + elapsed=$((elapsed + 5)) + done + echo "" + echo "ERROR: SSH to ${hostname} (${ip}) not available within ${max_wait}s" + return 1 +} + +# --- Tag Management ----------------------------------------------------------- + +ensure_tag_exists() { + local tag="$1" + # Create tag (ignore error if it already exists) + maas_post "/tags/" -d "name=${tag}" -d "comment=DeepOps test tag" >/dev/null 2>&1 || true +} + +add_tag_to_machine() { + local tag="$1" + local system_id="$2" + ensure_tag_exists "$tag" + maas_post "/tags/${tag}/?op=update_nodes" -d "add=${system_id}" >/dev/null 2>&1 +} + +remove_tag_from_machine() { + local tag="$1" + local system_id="$2" + maas_post "/tags/${tag}/?op=update_nodes" -d "remove=${system_id}" >/dev/null 2>&1 || true +} + +clear_test_tags() { + echo " Clearing existing test tags..." + for tag in "${KNOWN_TEST_TAGS[@]}"; do + for i in "${!HOSTNAMES[@]}"; do + remove_tag_from_machine "$tag" "${SIDS[$i]}" + done + done +} + +do_apply_profile() { + if [[ -z "$PROFILE" ]]; then + return 0 + fi + + echo "" + echo "--- Applying profile: ${PROFILE} ---" + + # Clear existing test tags first (clean slate) + clear_test_tags + + local idx=0 + for i in "${!HOSTNAMES[@]}"; do + local hostname="${HOSTNAMES[$i]}" + local sid="${SIDS[$i]}" + local tags="" + + case "$PROFILE" in + k8s) + if [[ $idx -eq 0 ]]; then + tags="kube_control_plane etcd" + else + tags="kube_node" + fi + ;; + slurm) + if [[ $idx -eq 0 ]]; then + tags="slurm-master" + else + tags="slurm-node" + fi + ;; + esac + + echo " ${hostname} -> ${tags}" + for tag in $tags; do + add_tag_to_machine "$tag" "$sid" + done + + idx=$((idx + 1)) + done + echo " Tags applied." +} + +# --- Actions ------------------------------------------------------------------ + +resolve_machines() { + HOSTNAMES=() + SIDS=() + for hostname in $MAAS_MACHINES; do + local sid + sid=$(get_system_id "$hostname") + if [[ -z "$sid" ]]; then + echo "ERROR: Machine '${hostname}' not found in MAAS" + exit 1 + fi + HOSTNAMES+=("$hostname") + SIDS+=("$sid") + done +} + +do_status() { + echo "=== MAAS VM Status ===" + echo "" + printf "%-18s %-10s %-14s %-16s %s\n" "HOSTNAME" "STATUS" "IP" "OS" "TAGS" + printf "%-18s %-10s %-14s %-16s %s\n" "--------" "------" "--" "--" "----" + + for i in "${!HOSTNAMES[@]}"; do + local sid="${SIDS[$i]}" + local hostname="${HOSTNAMES[$i]}" + local info ip + info=$(get_machine_info "$sid") + ip=$(get_ip "$sid" 2>/dev/null || echo "n/a") + + local status_name os_info tags + IFS='|' read -r status_name os_info tags <<< "$info" + + printf "%-18s %-10s %-14s %-16s %s\n" \ + "$hostname" "$status_name" "${ip:-n/a}" "$os_info" "$tags" + done + echo "" +} + +do_release() { + echo "=== Releasing all machines ===" + echo "" + + for i in "${!HOSTNAMES[@]}"; do + local hostname="${HOSTNAMES[$i]}" + local sid="${SIDS[$i]}" + local status + status=$(get_status "$sid") + if [[ "$status" == "$STATUS_DEPLOYED" ]]; then + echo " Releasing ${hostname}..." + maas_post "/machines/${sid}/" -d "op=release" >/dev/null + elif [[ "$status" == "$STATUS_READY" ]]; then + echo " ${hostname} already ready" + else + echo " ${hostname} status=${status}, attempting release..." + maas_post "/machines/${sid}/" -d "op=release" >/dev/null 2>&1 || true + fi + done + + echo "" + echo "Waiting for Ready state..." + for i in "${!HOSTNAMES[@]}"; do + printf " Waiting for ${HOSTNAMES[$i]}" + wait_for_status "${SIDS[$i]}" "$STATUS_READY" "${HOSTNAMES[$i]}" + echo " Ready" + done + + echo "" + echo "All machines released." +} + +do_deploy() { + echo "=== MAAS VM Deploy ===" + echo "API: ${MAAS_API_URL}" + echo "Machines: ${MAAS_MACHINES}" + echo "OS: ${DISTRO_SERIES}" + [[ -n "$PROFILE" ]] && echo "Profile: ${PROFILE}" + echo "" + + for i in "${!HOSTNAMES[@]}"; do + echo " ${HOSTNAMES[$i]} -> ${SIDS[$i]}" + done + echo "" + + # Step 1: Release deployed machines + echo "--- Step 1: Releasing machines ---" + for i in "${!HOSTNAMES[@]}"; do + local hostname="${HOSTNAMES[$i]}" + local sid="${SIDS[$i]}" + local status + status=$(get_status "$sid") + if [[ "$status" == "$STATUS_DEPLOYED" ]]; then + echo " Releasing ${hostname}..." + maas_post "/machines/${sid}/" -d "op=release" >/dev/null + elif [[ "$status" == "$STATUS_READY" ]]; then + echo " ${hostname} already ready, skipping release" + else + echo " ${hostname} status=${status}, attempting release..." + maas_post "/machines/${sid}/" -d "op=release" >/dev/null 2>&1 || true + fi + done + + # Step 2: Wait for Ready + echo "" + echo "--- Step 2: Waiting for Ready state ---" + for i in "${!HOSTNAMES[@]}"; do + printf " Waiting for ${HOSTNAMES[$i]}" + wait_for_status "${SIDS[$i]}" "$STATUS_READY" "${HOSTNAMES[$i]}" + echo " Ready" + done + + # Step 3: Deploy + echo "" + echo "--- Step 3: Deploying ${DISTRO_SERIES} ---" + for i in "${!HOSTNAMES[@]}"; do + echo " Deploying ${HOSTNAMES[$i]} with ${DISTRO_SERIES}..." + maas_post "/machines/${SIDS[$i]}/" -d "op=deploy" -d "distro_series=${DISTRO_SERIES}" >/dev/null + done + + # Step 4: Wait for Deployed + echo "" + echo "--- Step 4: Waiting for deployment (this takes a few minutes) ---" + for i in "${!HOSTNAMES[@]}"; do + printf " Waiting for ${HOSTNAMES[$i]}" + wait_for_status "${SIDS[$i]}" "$STATUS_DEPLOYED" "${HOSTNAMES[$i]}" + echo " Deployed" + done + + # Step 5: Verify SSH + echo "" + echo "--- Step 5: Verifying SSH connectivity ---" + local ssh_opts=(-o StrictHostKeyChecking=no) + if [[ -n "$MAAS_SSH_PROXY" ]]; then + ssh_opts+=(-o "ProxyCommand=${MAAS_SSH_PROXY}") + fi + for i in "${!HOSTNAMES[@]}"; do + local ip + ip=$(get_ip "${SIDS[$i]}") + printf " Waiting for SSH on ${HOSTNAMES[$i]} (${ip})" + wait_for_ssh "$ip" "${HOSTNAMES[$i]}" + local os_info + os_info=$(ssh "${ssh_opts[@]}" "${MAAS_SSH_USER}@${ip}" "lsb_release -ds" 2>/dev/null || echo "unknown") + echo " OK (${os_info})" + done + + # Step 6: Apply profile tags + do_apply_profile + + echo "" + echo "=== All machines deployed and accessible ===" + echo "" + echo "You can now run:" + echo " source .venv/bin/activate" + echo " ansible -m ping all" + [[ "$PROFILE" == "k8s" ]] && echo " ansible-playbook playbooks/k8s-cluster.yml" + [[ "$PROFILE" == "slurm" ]] && echo " ansible-playbook playbooks/slurm-cluster.yml" +} + +# --- Main --------------------------------------------------------------------- + +main() { + parse_args "$@" + load_config + resolve_machines + + case "$ACTION" in + status) do_status ;; + release) do_release ;; + tags-only) do_apply_profile ;; + deploy) do_deploy ;; + esac +} + +main "$@" diff --git a/scripts/maas_inventory.py b/scripts/maas_inventory.py index 3c6653b7d..6222c31dd 100755 --- a/scripts/maas_inventory.py +++ b/scripts/maas_inventory.py @@ -3,7 +3,7 @@ Queries a Canonical MAAS server's REST API and generates Ansible inventory based on machine tags. Machines tagged with Ansible group names (e.g., -"slurm-master", "kube-node") are placed into those groups automatically. +"slurm-master", "kube_node") are placed into those groups automatically. Only machines in the "Deployed" state (status=6) are included. @@ -38,10 +38,12 @@ from pathlib import Path # DeepOps group hierarchy: parent -> list of child groups. -# Tags in MAAS should match "leaf" group names (e.g., slurm-master, kube-node). -# These parent groups are auto-created using Ansible's "children" mechanism. +# Tags in MAAS should match "leaf" group names. Preferred K8s tags use +# underscores (kube_control_plane, kube_node); old hyphenated tags +# (kube-master, kube-node) are accepted via TAG_ALIASES below. +# Slurm groups retain hyphens (slurm-master, slurm-node). GROUP_CHILDREN = { - "k8s-cluster": ["kube-master", "kube-node"], + "k8s_cluster": ["kube_control_plane", "kube_node"], "slurm-cluster": [ "slurm-master", "slurm-node", "slurm-cache", "slurm-nfs", "slurm-metric", "slurm-login", @@ -52,6 +54,14 @@ "slurm-login": ["slurm-master"], } +# Backward-compatible tag aliases: old MAAS tag name -> canonical group name. +# Users can tag machines with either the old or new name. +TAG_ALIASES = { + "kube-master": "kube_control_plane", + "kube-node": "kube_node", + "k8s-cluster": "k8s_cluster", +} + def build_oauth1_header(api_key): """Build OAuth1 Authorization header for MAAS API.""" @@ -115,12 +125,17 @@ def load_config(): config[k] = v break - if not config["api_url"]: - print("Error: MAAS_API_URL not configured", file=sys.stderr) - sys.exit(1) - if not config["api_key"]: - print("Error: MAAS_API_KEY not configured", file=sys.stderr) - sys.exit(1) + # Detect unconfigured: empty values or placeholder templates from config.example + api_url = config["api_url"] + api_key = config["api_key"] + if (not api_url or not api_key + or "<" in api_url or "<" in api_key + or api_key == "CONSUMER_KEY:TOKEN_KEY:TOKEN_SECRET"): + # Return gracefully so ansible doesn't fail when MAAS isn't configured. + # This allows the dynamic inventory to coexist with static inventory + # in ansible.cfg without errors for users who don't use MAAS. + config["_unconfigured"] = True + return config # Defaults if not config["ssh_user"]: @@ -220,13 +235,14 @@ def build_inventory(config): inventory["_meta"]["hostvars"][hostname] = hostvars inventory["all"]["hosts"].append(hostname) - # Map tags to Ansible groups + # Map tags to Ansible groups (apply aliases for renamed K8s groups) for tag in tags: - if tag not in inventory: - inventory[tag] = {"hosts": [], "vars": {}} - elif "hosts" not in inventory[tag]: - inventory[tag]["hosts"] = [] - inventory[tag]["hosts"].append(hostname) + group = TAG_ALIASES.get(tag, tag) + if group not in inventory: + inventory[group] = {"hosts": [], "vars": {}} + elif "hosts" not in inventory[group]: + inventory[group]["hosts"] = [] + inventory[group]["hosts"].append(hostname) return inventory @@ -242,6 +258,14 @@ def main(): config = load_config() + # If MAAS is not configured, return empty inventory (no error) + if config.get("_unconfigured"): + if args.list: + print(json.dumps({"_meta": {"hostvars": {}}})) + else: + print(json.dumps({})) + return + if args.list: inventory = build_inventory(config) print(json.dumps(inventory, indent=2))